diff options
Diffstat (limited to 'include')
37 files changed, 260 insertions, 137 deletions
diff --git a/include/asm-alpha/Kbuild b/include/asm-alpha/Kbuild index e57fd57538b..2b06b3bad5f 100644 --- a/include/asm-alpha/Kbuild +++ b/include/asm-alpha/Kbuild @@ -1,5 +1,5 @@ include include/asm-generic/Kbuild.asm -unifdef-y += console.h fpu.h sysinfo.h +unifdef-y += console.h fpu.h sysinfo.h compiler.h header-y += gentrap.h regdef.h pal.h reg.h diff --git a/include/asm-alpha/compiler.h b/include/asm-alpha/compiler.h index 00c6f57ad9a..d2768cc3d7a 100644 --- a/include/asm-alpha/compiler.h +++ b/include/asm-alpha/compiler.h @@ -90,6 +90,7 @@ __asm__("stw %1,%0" : "=m"(mem) : "r"(val)) #endif +#ifdef __KERNEL__ /* Some idiots over in <linux/compiler.h> thought inline should imply always_inline. This breaks stuff. We'll include this file whenever we run into such problems. */ @@ -101,4 +102,6 @@ #undef __always_inline #define __always_inline inline __attribute__((always_inline)) +#endif /* __KERNEL__ */ + #endif /* __ALPHA_COMPILER_H */ diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h index 8c7cd50d4ea..d2bed3cb33f 100644 --- a/include/asm-alpha/page.h +++ b/include/asm-alpha/page.h @@ -1,6 +1,8 @@ #ifndef _ALPHA_PAGE_H #define _ALPHA_PAGE_H +#ifdef __KERNEL__ + #include <asm/pal.h> /* PAGE_SHIFT determines the page size */ @@ -8,8 +10,6 @@ #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) -#ifdef __KERNEL__ - #ifndef __ASSEMBLY__ #define STRICT_MM_TYPECHECKS @@ -92,9 +92,9 @@ typedef unsigned long pgprot_t; #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#endif /* __KERNEL__ */ #include <asm-generic/memory_model.h> #include <asm-generic/page.h> +#endif /* __KERNEL__ */ #endif /* _ALPHA_PAGE_H */ diff --git a/include/asm-arm/arch-s3c2410/regs-power.h b/include/asm-arm/arch-s3c2410/regs-power.h new file mode 100644 index 00000000000..6c319ea2afa --- /dev/null +++ b/include/asm-arm/arch-s3c2410/regs-power.h @@ -0,0 +1,34 @@ +/* linux/include/asm/arch-s3c2410/regs-power.h + * + * Copyright (c) 2003,2004,2005,2006 Simtec Electronics <linux@simtec.co.uk> + * http://armlinux.simtec.co.uk/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * S3C24XX power control register definitions +*/ + +#ifndef __ASM_ARM_REGS_PWR +#define __ASM_ARM_REGS_PWR __FILE__ + +#define S3C24XX_PWRREG(x) ((x) + S3C24XX_VA_CLKPWR) + +#define S3C2412_PWRMODECON S3C24XX_PWRREG(0x20) +#define S3C2412_PWRCFG S3C24XX_PWRREG(0x24) + +#define S3C2412_PWRCFG_BATF_IGNORE (0<<0) +#define S3C2412_PWRCFG_BATF_SLEEP (3<<0) +#define S3C2412_PWRCFG_BATF_MASK (3<<0) + +#define S3C2412_PWRCFG_STANDBYWFI_IGNORE (0<<6) +#define S3C2412_PWRCFG_STANDBYWFI_IDLE (1<<6) +#define S3C2412_PWRCFG_STANDBYWFI_STOP (2<<6) +#define S3C2412_PWRCFG_STANDBYWFI_SLEEP (3<<6) +#define S3C2412_PWRCFG_STANDBYWFI_MASK (3<<6) + +#define S3C2412_PWRCFG_RTC_MASKIRQ (1<<8) +#define S3C2412_PWRCFG_NAND_NORST (1<<9) + +#endif /* __ASM_ARM_REGS_PWR */ diff --git a/include/asm-arm/elf.h b/include/asm-arm/elf.h index 71061ca5c5d..ae7baa6c73f 100644 --- a/include/asm-arm/elf.h +++ b/include/asm-arm/elf.h @@ -8,7 +8,9 @@ #include <asm/ptrace.h> #include <asm/user.h> +#ifdef __KERNEL #include <asm/procinfo.h> +#endif typedef unsigned long elf_greg_t; typedef unsigned long elf_freg_t[3]; diff --git a/include/asm-arm/page.h b/include/asm-arm/page.h index 63d12f0244c..b721270b998 100644 --- a/include/asm-arm/page.h +++ b/include/asm-arm/page.h @@ -193,8 +193,8 @@ extern pmd_t *top_pmd; #define ARCH_SLAB_MINALIGN 8 #endif -#endif /* __KERNEL__ */ - #include <asm-generic/page.h> +#endif /* __KERNEL__ */ + #endif diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm index 6b16dda1811..c00de6028fa 100644 --- a/include/asm-generic/Kbuild.asm +++ b/include/asm-generic/Kbuild.asm @@ -2,7 +2,7 @@ unifdef-y += a.out.h auxvec.h byteorder.h errno.h fcntl.h ioctl.h \ ioctls.h ipcbuf.h mman.h msgbuf.h param.h poll.h \ posix_types.h ptrace.h resource.h sembuf.h shmbuf.h shmparam.h \ sigcontext.h siginfo.h signal.h socket.h sockios.h stat.h \ - statfs.h termbits.h termios.h timex.h types.h unistd.h user.h + statfs.h termbits.h termios.h types.h unistd.h user.h # These probably shouldn't be exported unifdef-y += elf.h page.h diff --git a/include/asm-i386/Kbuild b/include/asm-i386/Kbuild index 335b2fa4e06..2308190321d 100644 --- a/include/asm-i386/Kbuild +++ b/include/asm-i386/Kbuild @@ -1,5 +1,5 @@ include include/asm-generic/Kbuild.asm -header-y += boot.h debugreg.h ldt.h setup.h ucontext.h +header-y += boot.h debugreg.h ldt.h ucontext.h -unifdef-y += mtrr.h vm86.h +unifdef-y += mtrr.h setup.h vm86.h diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h index 1eac92cb5b1..db4344d9f73 100644 --- a/include/asm-i386/elf.h +++ b/include/asm-i386/elf.h @@ -7,10 +7,7 @@ #include <asm/ptrace.h> #include <asm/user.h> -#include <asm/processor.h> -#include <asm/system.h> /* for savesegment */ #include <asm/auxvec.h> -#include <asm/desc.h> #include <linux/utsname.h> @@ -48,6 +45,12 @@ typedef struct user_fxsr_struct elf_fpxregset_t; #define ELF_DATA ELFDATA2LSB #define ELF_ARCH EM_386 +#ifdef __KERNEL__ + +#include <asm/processor.h> +#include <asm/system.h> /* for savesegment */ +#include <asm/desc.h> + /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx contains a pointer to a function which might be registered using `atexit'. This provides a mean for the dynamic linker to call DT_FINI functions for @@ -111,7 +114,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t; #define ELF_PLATFORM (system_utsname.machine) -#ifdef __KERNEL__ #define SET_PERSONALITY(ex, ibcs2) do { } while (0) /* diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h index f737e423029..2734909eff8 100644 --- a/include/asm-i386/setup.h +++ b/include/asm-i386/setup.h @@ -6,6 +6,7 @@ #ifndef _i386_SETUP_H #define _i386_SETUP_H +#ifdef __KERNEL__ #include <linux/pfn.h> /* @@ -13,6 +14,7 @@ */ #define MAXMEM_PFN PFN_DOWN(MAXMEM) #define MAX_NONPAE_PFN (1 << 20) +#endif #define PARAM_SIZE 4096 #define COMMAND_LINE_SIZE 256 diff --git a/include/asm-i386/signal.h b/include/asm-i386/signal.h index 3824a502351..c3e8adec591 100644 --- a/include/asm-i386/signal.h +++ b/include/asm-i386/signal.h @@ -2,7 +2,6 @@ #define _ASMi386_SIGNAL_H #include <linux/types.h> -#include <linux/linkage.h> #include <linux/time.h> #include <linux/compiler.h> @@ -10,6 +9,9 @@ struct siginfo; #ifdef __KERNEL__ + +#include <linux/linkage.h> + /* Most things should be clean enough to redefine this at will, if care is taken to make libc match. */ diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 49928eb33f8..098bcee94e3 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -11,9 +11,14 @@ struct task_struct; /* one of the stranger aspects of C forward declarations.. */ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); +/* + * Saving eflags is important. It switches not only IOPL between tasks, + * it also protects other tasks from NT leaking through sysenter etc. + */ #define switch_to(prev,next,last) do { \ unsigned long esi,edi; \ - asm volatile("pushl %%ebp\n\t" \ + asm volatile("pushfl\n\t" /* Save flags */ \ + "pushl %%ebp\n\t" \ "movl %%esp,%0\n\t" /* save ESP */ \ "movl %5,%%esp\n\t" /* restore ESP */ \ "movl $1f,%1\n\t" /* save EIP */ \ @@ -21,6 +26,7 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc "jmp __switch_to\n" \ "1:\t" \ "popl %%ebp\n\t" \ + "popfl" \ :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ "=a" (last),"=S" (esi),"=D" (edi) \ :"m" (next->thread.esp),"m" (next->thread.eip), \ diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index d983b74e4d9..fc1c8ddae14 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h @@ -324,6 +324,8 @@ #define __NR_vmsplice 316 #define __NR_move_pages 317 +#ifdef __KERNEL__ + #define NR_syscalls 318 /* @@ -423,8 +425,6 @@ __asm__ volatile ("push %%ebp ; push %%ebx ; movl 4(%2),%%ebp ; " \ __syscall_return(type,__res); \ } -#ifdef __KERNEL__ - #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/include/asm-ia64/Kbuild b/include/asm-ia64/Kbuild index 85d6f8005eb..f1cb00f39c2 100644 --- a/include/asm-ia64/Kbuild +++ b/include/asm-ia64/Kbuild @@ -4,4 +4,4 @@ header-y += break.h fpu.h fpswa.h gcc_intrin.h ia64regs.h \ intel_intrin.h intrinsics.h perfmon_default_smpl.h \ ptrace_offsets.h rse.h setup.h ucontext.h -unifdef-y += perfmon.h +unifdef-y += perfmon.h ustack.h diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index f5a949ec6e1..947cb72b520 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h @@ -7,6 +7,7 @@ * David Mosberger-Tang <davidm@hpl.hp.com> */ +# ifdef __KERNEL__ #include <asm/intrinsics.h> #include <asm/types.h> @@ -64,7 +65,6 @@ # define __pa(x) ((x) - PAGE_OFFSET) # define __va(x) ((x) + PAGE_OFFSET) #else /* !__ASSEMBLY */ -# ifdef __KERNEL__ # define STRICT_MM_TYPECHECKS extern void clear_page (void *page); @@ -174,7 +174,6 @@ get_order (unsigned long size) return order; } -# endif /* __KERNEL__ */ #endif /* !__ASSEMBLY__ */ #ifdef STRICT_MM_TYPECHECKS @@ -228,4 +227,5 @@ get_order (unsigned long size) (((current->personality & READ_IMPLIES_EXEC) != 0) \ ? VM_EXEC : 0)) +# endif /* __KERNEL__ */ #endif /* _ASM_IA64_PAGE_H */ diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h index 415abb23b21..1414316efd4 100644 --- a/include/asm-ia64/ptrace.h +++ b/include/asm-ia64/ptrace.h @@ -56,6 +56,8 @@ #include <asm/fpu.h> + +#ifdef __KERNEL__ #ifndef ASM_OFFSETS_C #include <asm/asm-offsets.h> #endif @@ -79,10 +81,9 @@ #define KERNEL_STACK_SIZE IA64_STK_OFFSET -#ifndef __ASSEMBLY__ +#endif /* __KERNEL__ */ -#include <asm/current.h> -#include <asm/page.h> +#ifndef __ASSEMBLY__ /* * This struct defines the way the registers are saved on system @@ -229,6 +230,9 @@ struct switch_stack { #ifdef __KERNEL__ +#include <asm/current.h> +#include <asm/page.h> + #define __ARCH_SYS_PTRACE 1 /* diff --git a/include/asm-ia64/ustack.h b/include/asm-ia64/ustack.h index da55c91246e..a349467913e 100644 --- a/include/asm-ia64/ustack.h +++ b/include/asm-ia64/ustack.h @@ -5,12 +5,15 @@ * Constants for the user stack size */ +#ifdef __KERNEL__ #include <asm/page.h> /* The absolute hard limit for stack size is 1/2 of the mappable space in the region */ #define MAX_USER_STACK_SIZE (RGN_MAP_LIMIT/2) -/* Make a default stack size of 2GB */ -#define DEFAULT_USER_STACK_SIZE (1UL << 31) #define STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT) +#endif + +/* Make a default stack size of 2GiB */ +#define DEFAULT_USER_STACK_SIZE (1UL << 31) #endif /* _ASM_IA64_USTACK_H */ diff --git a/include/asm-m68k/page.h b/include/asm-m68k/page.h index db017f838c2..fcc165ddd09 100644 --- a/include/asm-m68k/page.h +++ b/include/asm-m68k/page.h @@ -2,6 +2,8 @@ #define _M68K_PAGE_H +#ifdef __KERNEL__ + /* PAGE_SHIFT determines the page size */ #ifndef CONFIG_SUN3 #define PAGE_SHIFT (12) @@ -15,8 +17,6 @@ #endif #define PAGE_MASK (~(PAGE_SIZE-1)) -#ifdef __KERNEL__ - #include <asm/setup.h> #if PAGE_SHIFT < 13 @@ -175,8 +175,8 @@ static inline void *__va(unsigned long x) #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#endif /* __KERNEL__ */ - #include <asm-generic/page.h> +#endif /* __KERNEL__ */ + #endif /* _M68K_PAGE_H */ diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h index 6ed1151a05a..219d359861f 100644 --- a/include/asm-mips/page.h +++ b/include/asm-mips/page.h @@ -14,8 +14,6 @@ #include <spaces.h> -#endif - /* * PAGE_SHIFT determines the page size */ @@ -34,8 +32,6 @@ #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) - -#ifdef __KERNEL__ #ifndef __ASSEMBLY__ extern void clear_page(void * page); @@ -168,8 +164,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE) #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) -#endif /* defined (__KERNEL__) */ - #ifdef CONFIG_LIMITED_DMA #define WANT_PAGE_VIRTUAL #endif @@ -177,4 +171,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; #include <asm-generic/memory_model.h> #include <asm-generic/page.h> +#endif /* defined (__KERNEL__) */ + #endif /* _ASM_PAGE_H */ diff --git a/include/asm-parisc/page.h b/include/asm-parisc/page.h index 0695bc958d5..57d6d82756d 100644 --- a/include/asm-parisc/page.h +++ b/include/asm-parisc/page.h @@ -1,22 +1,14 @@ #ifndef _PARISC_PAGE_H #define _PARISC_PAGE_H -#if !defined(__KERNEL__) -/* this is for userspace applications (4k page size) */ -# define PAGE_SHIFT 12 /* 4k */ -# define PAGE_SIZE (1UL << PAGE_SHIFT) -# define PAGE_MASK (~(PAGE_SIZE-1)) -#endif - - #ifdef __KERNEL__ #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) -# define PAGE_SHIFT 12 /* 4k */ +# define PAGE_SHIFT 12 #elif defined(CONFIG_PARISC_PAGE_SIZE_16KB) -# define PAGE_SHIFT 14 /* 16k */ +# define PAGE_SHIFT 14 #elif defined(CONFIG_PARISC_PAGE_SIZE_64KB) -# define PAGE_SHIFT 16 /* 64k */ +# define PAGE_SHIFT 16 #else # error "unknown default kernel page size" #endif @@ -188,9 +180,9 @@ extern int npmem_ranges; #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#endif /* __KERNEL__ */ - #include <asm-generic/memory_model.h> #include <asm-generic/page.h> +#endif /* __KERNEL__ */ + #endif /* _PARISC_PAGE_H */ diff --git a/include/asm-powerpc/eeh.h b/include/asm-powerpc/eeh.h index 4df3e80118f..6a784396660 100644 --- a/include/asm-powerpc/eeh.h +++ b/include/asm-powerpc/eeh.h @@ -205,6 +205,7 @@ static inline void eeh_memset_io(volatile void __iomem *addr, int c, lc |= lc << 8; lc |= lc << 16; + __asm__ __volatile__ ("sync" : : : "memory"); while(n && !EEH_CHECK_ALIGN(p, 4)) { *((volatile u8 *)p) = c; p++; @@ -229,6 +230,7 @@ static inline void eeh_memcpy_fromio(void *dest, const volatile void __iomem *sr void *destsave = dest; unsigned long nsave = n; + __asm__ __volatile__ ("sync" : : : "memory"); while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) { *((u8 *)dest) = *((volatile u8 *)vsrc); __asm__ __volatile__ ("eieio" : : : "memory"); @@ -266,6 +268,7 @@ static inline void eeh_memcpy_toio(volatile void __iomem *dest, const void *src, { void *vdest = (void __force *) dest; + __asm__ __volatile__ ("sync" : : : "memory"); while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) { *((volatile u8 *)vdest) = *((u8 *)src); src++; diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h index f1b3c00bc1c..936422e5489 100644 --- a/include/asm-powerpc/futex.h +++ b/include/asm-powerpc/futex.h @@ -84,7 +84,33 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) { - return -ENOSYS; + int prev; + + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + __asm__ __volatile__ ( + LWSYNC_ON_SMP +"1: lwarx %0,0,%2 # futex_atomic_cmpxchg_inatomic\n\ + cmpw 0,%0,%3\n\ + bne- 3f\n" + PPC405_ERR77(0,%2) +"2: stwcx. %4,0,%2\n\ + bne- 1b\n" + ISYNC_ON_SMP +"3: .section .fixup,\"ax\"\n\ +4: li %0,%5\n\ + b 3b\n\ + .previous\n\ + .section __ex_table,\"a\"\n\ + .align 3\n\ + " PPC_LONG "1b,4b,2b,4b\n\ + .previous" \ + : "=&r" (prev), "+m" (*uaddr) + : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT) + : "cc", "memory"); + + return prev; } #endif /* __KERNEL__ */ diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h index 36c4c34bf56..212428db0d8 100644 --- a/include/asm-powerpc/io.h +++ b/include/asm-powerpc/io.h @@ -19,6 +19,7 @@ extern int check_legacy_ioport(unsigned long base_port); #include <linux/compiler.h> #include <asm/page.h> #include <asm/byteorder.h> +#include <asm/paca.h> #ifdef CONFIG_PPC_ISERIES #include <asm/iseries/iseries_io.h> #endif @@ -162,7 +163,11 @@ extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, int ns); extern void _insl_ns(volatile u32 __iomem *port, void *buf, int nl); extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, int nl); -#define mmiowb() +static inline void mmiowb(void) +{ + __asm__ __volatile__ ("sync" : : : "memory"); + get_paca()->io_sync = 0; +} /* * output pause versions need a delay at least for the @@ -278,22 +283,23 @@ static inline int in_8(const volatile unsigned char __iomem *addr) { int ret; - __asm__ __volatile__("lbz%U1%X1 %0,%1; twi 0,%0,0; isync" + __asm__ __volatile__("sync; lbz%U1%X1 %0,%1; twi 0,%0,0; isync" : "=r" (ret) : "m" (*addr)); return ret; } static inline void out_8(volatile unsigned char __iomem *addr, int val) { - __asm__ __volatile__("stb%U0%X0 %1,%0; sync" + __asm__ __volatile__("sync; stb%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); + get_paca()->io_sync = 1; } static inline int in_le16(const volatile unsigned short __iomem *addr) { int ret; - __asm__ __volatile__("lhbrx %0,0,%1; twi 0,%0,0; isync" + __asm__ __volatile__("sync; lhbrx %0,0,%1; twi 0,%0,0; isync" : "=r" (ret) : "r" (addr), "m" (*addr)); return ret; } @@ -302,28 +308,30 @@ static inline int in_be16(const volatile unsigned short __iomem *addr) { int ret; - __asm__ __volatile__("lhz%U1%X1 %0,%1; twi 0,%0,0; isync" + __asm__ __volatile__("sync; lhz%U1%X1 %0,%1; twi 0,%0,0; isync" : "=r" (ret) : "m" (*addr)); return ret; } static inline void out_le16(volatile unsigned short __iomem *addr, int val) { - __asm__ __volatile__("sthbrx %1,0,%2; sync" + __asm__ __volatile__("sync; sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); + get_paca()->io_sync = 1; } static inline void out_be16(volatile unsigned short __iomem *addr, int val) { - __asm__ __volatile__("sth%U0%X0 %1,%0; sync" + __asm__ __volatile__("sync; sth%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); + get_paca()->io_sync = 1; } static inline unsigned in_le32(const volatile unsigned __iomem *addr) { unsigned ret; - __asm__ __volatile__("lwbrx %0,0,%1; twi 0,%0,0; isync" + __asm__ __volatile__("sync; lwbrx %0,0,%1; twi 0,%0,0; isync" : "=r" (ret) : "r" (addr), "m" (*addr)); return ret; } @@ -332,21 +340,23 @@ static inline unsigned in_be32(const volatile unsigned __iomem *addr) { unsigned ret; - __asm__ __volatile__("lwz%U1%X1 %0,%1; twi 0,%0,0; isync" + __asm__ __volatile__("sync; lwz%U1%X1 %0,%1; twi 0,%0,0; isync" : "=r" (ret) : "m" (*addr)); return ret; } static inline void out_le32(volatile unsigned __iomem *addr, int val) { - __asm__ __volatile__("stwbrx %1,0,%2; sync" : "=m" (*addr) + __asm__ __volatile__("sync; stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); + get_paca()->io_sync = 1; } static inline void out_be32(volatile unsigned __iomem *addr, int val) { - __asm__ __volatile__("stw%U0%X0 %1,%0; sync" + __asm__ __volatile__("sync; stw%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); + get_paca()->io_sync = 1; } static inline unsigned long in_le64(const volatile unsigned long __iomem *addr) @@ -354,6 +364,7 @@ static inline unsigned long in_le64(const volatile unsigned long __iomem *addr) unsigned long tmp, ret; __asm__ __volatile__( + "sync\n" "ld %1,0(%2)\n" "twi 0,%1,0\n" "isync\n" @@ -372,7 +383,7 @@ static inline unsigned long in_be64(const volatile unsigned long __iomem *addr) { unsigned long ret; - __asm__ __volatile__("ld%U1%X1 %0,%1; twi 0,%0,0; isync" + __asm__ __volatile__("sync; ld%U1%X1 %0,%1; twi 0,%0,0; isync" : "=r" (ret) : "m" (*addr)); return ret; } @@ -389,14 +400,16 @@ static inline void out_le64(volatile unsigned long __iomem *addr, unsigned long "rldicl %1,%1,32,0\n" "rlwimi %0,%1,8,8,31\n" "rlwimi %0,%1,24,16,23\n" - "std %0,0(%3)\n" - "sync" + "sync\n" + "std %0,0(%3)" : "=&r" (tmp) , "=&r" (val) : "1" (val) , "b" (addr) , "m" (*addr)); + get_paca()->io_sync = 1; } static inline void out_be64(volatile unsigned long __iomem *addr, unsigned long val) { - __asm__ __volatile__("std%U0%X0 %1,%0; sync" : "=m" (*addr) : "r" (val)); + __asm__ __volatile__("sync; std%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); + get_paca()->io_sync = 1; } #ifndef CONFIG_PPC_ISERIES diff --git a/include/asm-powerpc/kdump.h b/include/asm-powerpc/kdump.h index dc1574c945f..10e8eb1e6f4 100644 --- a/include/asm-powerpc/kdump.h +++ b/include/asm-powerpc/kdump.h @@ -7,7 +7,7 @@ /* How many bytes to reserve at zero for kdump. The reserve limit should * be greater or equal to the trampoline's end address. * Reserve to the end of the FWNMI area, see head_64.S */ -#define KDUMP_RESERVE_LIMIT 0x8000 +#define KDUMP_RESERVE_LIMIT 0x10000 /* 64K */ #ifdef CONFIG_CRASH_DUMP diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h index 2d4585f0620..3d5d590bc4b 100644 --- a/include/asm-powerpc/paca.h +++ b/include/asm-powerpc/paca.h @@ -93,6 +93,7 @@ struct paca_struct { u64 saved_r1; /* r1 save for RTAS calls */ u64 saved_msr; /* MSR saved here by enter_rtas */ u8 proc_enabled; /* irq soft-enable flag */ + u8 io_sync; /* writel() needs spin_unlock sync */ /* Stuff for accurate time accounting */ u64 user_time; /* accumulated usermode TB ticks */ diff --git a/include/asm-powerpc/spinlock.h b/include/asm-powerpc/spinlock.h index 895cb6d3a42..c31e4382a77 100644 --- a/include/asm-powerpc/spinlock.h +++ b/include/asm-powerpc/spinlock.h @@ -36,6 +36,19 @@ #define LOCK_TOKEN 1 #endif +#if defined(CONFIG_PPC64) && defined(CONFIG_SMP) +#define CLEAR_IO_SYNC (get_paca()->io_sync = 0) +#define SYNC_IO do { \ + if (unlikely(get_paca()->io_sync)) { \ + mb(); \ + get_paca()->io_sync = 0; \ + } \ + } while (0) +#else +#define CLEAR_IO_SYNC +#define SYNC_IO +#endif + /* * This returns the old value in the lock, so we succeeded * in getting the lock if the return value is 0. @@ -61,6 +74,7 @@ static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) { + CLEAR_IO_SYNC; return __spin_trylock(lock) == 0; } @@ -91,6 +105,7 @@ extern void __rw_yield(raw_rwlock_t *lock); static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) { + CLEAR_IO_SYNC; while (1) { if (likely(__spin_trylock(lock) == 0)) break; @@ -107,6 +122,7 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long { unsigned long flag |