aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/Makefile9
-rw-r--r--arch/arm/boot/Makefile16
-rw-r--r--arch/arm/boot/install.sh14
-rw-r--r--arch/arm/include/asm/jump_label.h2
-rw-r--r--arch/mips/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/kernel/irq.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c18
-rw-r--r--arch/s390/include/asm/jump_label.h2
-rw-r--r--arch/s390/kernel/crash_dump.c42
-rw-r--r--arch/s390/kernel/entry.S1
-rw-r--r--arch/s390/kernel/entry64.S1
-rw-r--r--arch/s390/kernel/kprobes.c6
-rw-r--r--arch/sparc/include/asm/jump_label.h2
-rw-r--r--arch/tile/include/asm/atomic.h5
-rw-r--r--arch/tile/include/asm/atomic_32.h27
-rw-r--r--arch/tile/include/asm/cmpxchg.h28
-rw-r--r--arch/tile/include/asm/percpu.h34
-rw-r--r--arch/tile/kernel/hardwall.c6
-rw-r--r--arch/tile/kernel/intvec_32.S3
-rw-r--r--arch/tile/kernel/intvec_64.S3
-rw-r--r--arch/tile/kernel/stack.c12
-rw-r--r--arch/tile/lib/atomic_32.c8
-rw-r--r--arch/x86/include/asm/cpufeature.h6
-rw-r--r--arch/x86/include/asm/jump_label.h2
-rw-r--r--arch/x86/include/asm/mutex_64.h4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c11
-rw-r--r--arch/x86/kvm/vmx.c24
-rw-r--r--drivers/char/random.c11
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-holtek-mouse.c4
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-roccat-kone.c2
-rw-r--r--drivers/hid/hid-roccat-koneplus.c4
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c4
-rw-r--r--drivers/hid/hid-roccat-pyra.c4
-rw-r--r--drivers/hid/hid-wiimote-modules.c40
-rw-r--r--drivers/hid/hid-wiimote.h4
-rw-r--r--drivers/hid/hidraw.c21
-rw-r--r--drivers/hid/uhid.c3
-rw-r--r--drivers/hwmon/applesmc.c13
-rw-r--r--drivers/md/bcache/request.c3
-rw-r--r--drivers/mtd/devices/m25p80.c17
-rw-r--r--drivers/mtd/nand/nand_base.c8
-rw-r--r--drivers/s390/char/sclp_cmd.c8
-rw-r--r--drivers/s390/char/tty3270.c2
-rw-r--r--drivers/spi/spi-atmel.c3
-rw-r--r--drivers/spi/spi-clps711x.c3
-rw-r--r--drivers/spi/spi-fsl-dspi.c10
-rw-r--r--drivers/spi/spi-mpc512x-psc.c4
-rw-r--r--drivers/spi/spi-pxa2xx.c11
-rw-r--r--drivers/spi/spi-s3c64xx.c4
-rw-r--r--drivers/spi/spi-sh-hspi.c4
-rw-r--r--drivers/watchdog/hpwdt.c6
-rw-r--r--include/linux/compiler-gcc4.h15
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/perf_event.h24
-rw-r--r--include/linux/random.h1
-rw-r--r--include/linux/timex.h14
-rw-r--r--init/main.c2
-rw-r--r--kernel/events/core.c6
-rw-r--r--lib/kobject.c2
-rw-r--r--tools/perf/Makefile1
-rw-r--r--tools/perf/builtin-stat.c1
-rw-r--r--tools/perf/config/feature-tests.mak2
-rw-r--r--tools/perf/util/dwarf-aux.c25
-rw-r--r--tools/perf/util/dwarf-aux.h6
-rw-r--r--tools/perf/util/header.c12
-rw-r--r--tools/perf/util/probe-finder.c49
-rw-r--r--tools/perf/util/session.c4
72 files changed, 456 insertions, 202 deletions
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index a37a50f575a..db50b626be9 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -296,10 +296,15 @@ archprepare:
# Convert bzImage to zImage
bzImage: zImage
-zImage Image xipImage bootpImage uImage: vmlinux
+BOOT_TARGETS = zImage Image xipImage bootpImage uImage
+INSTALL_TARGETS = zinstall uinstall install
+
+PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
+
+$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
-zinstall uinstall install: vmlinux
+$(INSTALL_TARGETS):
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
%.dtb: | scripts
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 84aa2caf07e..ec2f8065f95 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -95,24 +95,24 @@ initrd:
@test "$(INITRD)" != "" || \
(echo You must specify INITRD; exit -1)
-install: $(obj)/Image
- $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+install:
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
$(obj)/Image System.map "$(INSTALL_PATH)"
-zinstall: $(obj)/zImage
- $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+zinstall:
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
$(obj)/zImage System.map "$(INSTALL_PATH)"
-uinstall: $(obj)/uImage
- $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+uinstall:
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
$(obj)/uImage System.map "$(INSTALL_PATH)"
zi:
- $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
$(obj)/zImage System.map "$(INSTALL_PATH)"
i:
- $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
$(obj)/Image System.map "$(INSTALL_PATH)"
subdir- := bootp compressed dts
diff --git a/arch/arm/boot/install.sh b/arch/arm/boot/install.sh
index 06ea7d42ce8..2a45092a40e 100644
--- a/arch/arm/boot/install.sh
+++ b/arch/arm/boot/install.sh
@@ -20,6 +20,20 @@
# $4 - default install path (blank if root directory)
#
+verify () {
+ if [ ! -f "$1" ]; then
+ echo "" 1>&2
+ echo " *** Missing file: $1" 1>&2
+ echo ' *** You need to run "make" before "make install".' 1>&2
+ echo "" 1>&2
+ exit 1
+ fi
+}
+
+# Make sure the files actually exist
+verify "$2"
+verify "$3"
+
# User may have a custom install script
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index bfc198c7591..863c892b4aa 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -16,7 +16,7 @@
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm goto("1:\n\t"
+ asm_volatile_goto("1:\n\t"
JUMP_LABEL_NOP "\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index 4d6d77ed9b9..e194f957ca8 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -22,7 +22,7 @@
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm goto("1:\tnop\n\t"
+ asm_volatile_goto("1:\tnop\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
WORD_INSN " 1b, %l[l_yes], %0\n\t"
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index ae098c438f0..f016bb699b5 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -19,7 +19,7 @@
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm goto("1:\n\t"
+ asm_volatile_goto("1:\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 57d286a78f8..c7cb8c232d2 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -495,14 +495,15 @@ void __do_irq(struct pt_regs *regs)
void do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- struct thread_info *curtp, *irqtp;
+ struct thread_info *curtp, *irqtp, *sirqtp;
/* Switch to the irq stack to handle this */
curtp = current_thread_info();
irqtp = hardirq_ctx[raw_smp_processor_id()];
+ sirqtp = softirq_ctx[raw_smp_processor_id()];
/* Already there ? */
- if (unlikely(curtp == irqtp)) {
+ if (unlikely(curtp == irqtp || curtp == sirqtp)) {
__do_irq(regs);
set_irq_regs(old_regs);
return;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 294b7af28cd..c71103b8a74 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1066,7 +1066,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
BEGIN_FTR_SECTION
mfspr r8, SPRN_DSCR
ld r7, HSTATE_DSCR(r13)
- std r8, VCPU_DSCR(r7)
+ std r8, VCPU_DSCR(r9)
mtspr SPRN_DSCR, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 1c6a9d729df..c65593abae8 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -332,6 +332,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
unsigned long hva;
int pfnmap = 0;
int tsize = BOOK3E_PAGESZ_4K;
+ int ret = 0;
+ unsigned long mmu_seq;
+ struct kvm *kvm = vcpu_e500->vcpu.kvm;
+
+ /* used to check for invalidations in progress */
+ mmu_seq = kvm->mmu_notifier_seq;
+ smp_rmb();
/*
* Translate guest physical to true physical, acquiring
@@ -449,6 +456,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
}
+ spin_lock(&kvm->mmu_lock);
+ if (mmu_notifier_retry(kvm, mmu_seq)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
kvmppc_e500_ref_setup(ref, gtlbe, pfn);
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -457,10 +470,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
/* Clear i-cache for new pages */
kvmppc_mmu_flush_icache(pfn);
+out:
+ spin_unlock(&kvm->mmu_lock);
+
/* Drop refcount on page, so that mmu notifiers can clear it */
kvm_release_pfn_clean(pfn);
- return 0;
+ return ret;
}
/* XXX only map the one-one case, for now use TLB0 */
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 6c32190dc73..346b1c85ffb 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -15,7 +15,7 @@
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm goto("0: brcl 0,0\n"
+ asm_volatile_goto("0: brcl 0,0\n"
".pushsection __jump_table, \"aw\"\n"
ASM_ALIGN "\n"
ASM_PTR " 0b, %l[label], %0\n"
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index c84f33d51f7..7dd21720e5b 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -40,28 +40,26 @@ static inline void *load_real_addr(void *addr)
}
/*
- * Copy up to one page to vmalloc or real memory
+ * Copy real to virtual or real memory
*/
-static ssize_t copy_page_real(void *buf, void *src, size_t csize)
+static int copy_from_realmem(void *dest, void *src, size_t count)
{
- size_t size;
+ unsigned long size;
+ int rc;
- if (is_vmalloc_addr(buf)) {
- BUG_ON(csize >= PAGE_SIZE);
- /* If buf is not page aligned, copy first part */
- size = min(roundup(__pa(buf), PAGE_SIZE) - __pa(buf), csize);
- if (size) {
- if (memcpy_real(load_real_addr(buf), src, size))
- return -EFAULT;
- buf += size;
- src += size;
- }
- /* Copy second part */
- size = csize - size;
- return (size) ? memcpy_real(load_real_addr(buf), src, size) : 0;
- } else {
- return memcpy_real(buf, src, csize);
- }
+ if (!count)
+ return 0;
+ if (!is_vmalloc_or_module_addr(dest))
+ return memcpy_real(dest, src, count);
+ do {
+ size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK));
+ if (memcpy_real(load_real_addr(dest), src, size))
+ return -EFAULT;
+ count -= size;
+ dest += size;
+ src += size;
+ } while (count);
+ return 0;
}
/*
@@ -114,7 +112,7 @@ static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
rc = copy_to_user_real((void __force __user *) buf,
(void *) src, csize);
else
- rc = copy_page_real(buf, (void *) src, csize);
+ rc = copy_from_realmem(buf, (void *) src, csize);
return (rc == 0) ? rc : csize;
}
@@ -210,7 +208,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
if (OLDMEM_BASE) {
if ((unsigned long) src < OLDMEM_SIZE) {
copied = min(count, OLDMEM_SIZE - (unsigned long) src);
- rc = memcpy_real(dest, src + OLDMEM_BASE, copied);
+ rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied);
if (rc)
return rc;
}
@@ -223,7 +221,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
return rc;
}
}
- return memcpy_real(dest + copied, src + copied, count - copied);
+ return copy_from_realmem(dest + copied, src + copied, count - copied);
}
/*
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index cc30d1fb000..0dc2b6d0a1e 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -266,6 +266,7 @@ sysc_sigpending:
tm __TI_flags+3(%r12),_TIF_SYSCALL
jno sysc_return
lm %r2,%r7,__PT_R2(%r11) # load svc arguments
+ l %r10,__TI_sysc_table(%r12) # 31 bit system call table
xr %r8,%r8 # svc 0 returns -ENOSYS
clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
jnl sysc_nr_ok # invalid svc number -> do svc 0
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 2b2188b97c6..e5b43c97a83 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -297,6 +297,7 @@ sysc_sigpending:
tm __TI_flags+7(%r12),_TIF_SYSCALL
jno sysc_return
lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
+ lg %r10,__TI_sysc_table(%r12) # address of system call table
lghi %r8,0 # svc 0 returns -ENOSYS
llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
cghi %r1,NR_syscalls
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 0ce9fb24503..d86e64eddb4 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -67,6 +67,11 @@ static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
case 0xac: /* stnsm */
case 0xad: /* stosm */
return -EINVAL;
+ case 0xc6:
+ switch (insn[0] & 0x0f) {
+ case 0x00: /* exrl */
+ return -EINVAL;
+ }
}
switch (insn[0]) {
case 0x0101: /* pr */
@@ -180,7 +185,6 @@ static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
break;
case 0xc6:
switch (insn[0] & 0x0f) {
- case 0x00: /* exrl */
case 0x02: /* pfdrl */
case 0x04: /* cghrl */
case 0x05: /* chrl */
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index 5080d16a832..ec2e2e2aba7 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -9,7 +9,7 @@
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm goto("1:\n\t"
+ asm_volatile_goto("1:\n\t"
"nop\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index d385eaadece..70979846076 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
*
* Atomically sets @v to @i and returns old @v
*/
-static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
+static inline long long atomic64_xchg(atomic64_t *v, long long n)
{
return xchg64(&v->counter, n);
}
@@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
* Atomically checks if @v holds @o and replaces it with @n if so.
* Returns the old value at @v.
*/
-static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
+static inline long long atomic64_cmpxchg(atomic64_t *v, long long o,
+ long long n)
{
return cmpxchg64(&v->counter, o, n);
}
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 0d0395b1b15..1ad4a1f7d42 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n)
/* A 64bit atomic type */
typedef struct {
- u64 __aligned(8) counter;
+ long long counter;
} atomic64_t;
#define ATOMIC64_INIT(val) { (val) }
@@ -91,14 +91,14 @@ typedef struct {
*
* Atomically reads the value of @v.
*/
-static inline u64 atomic64_read(const atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
{
/*
* Requires an atomic op to read both 32-bit parts consistently.
* Casting away const is safe since the atomic support routines
* do not write to memory if the value has not been modified.
*/
- return _atomic64_xchg_add((u64 *)&v->counter, 0);
+ return _atomic64_xchg_add((long long *)&v->counter, 0);
}
/**
@@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
*
* Atomically adds @i to @v.
*/
-static inline void atomic64_add(u64 i, atomic64_t *v)
+static inline void atomic64_add(long long i, atomic64_t *v)
{
_atomic64_xchg_add(&v->counter, i);
}
@@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
*
* Atomically adds @i to @v and returns @i + @v
*/
-static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
smp_mb(); /* barrier for proper semantics */
return _atomic64_xchg_add(&v->counter, i) + i;
@@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+static inline long long atomic64_add_unless(atomic64_t *v, long long a,
+ long long u)
{
smp_mb(); /* barrier for proper semantics */
return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
@@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
* atomic64_set() can't be just a raw store, since it would be lost if it
* fell between the load and store of one of the other atomic ops.
*/
-static inline void atomic64_set(atomic64_t *v, u64 n)
+static inline void atomic64_set(atomic64_t *v, long long n)
{
_atomic64_xchg(&v->counter, n);
}
@@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
-extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
-extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
-extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
-extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
- int *lock, u64 o, u64 n);
+extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
+ long long o, long long n);
+extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
+ long long n);
+extern long long __atomic64_xchg_add_unless(volatile long long *p,
+ int *lock, long long o, long long n);
/* Return failure from the atomic wrappers. */
struct __get_user __atomic_bad_address(int __user *addr);
diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h
index 4001d5eab4b..0ccda3c425b 100644
--- a/arch/tile/include/asm/cmpxchg.h
+++ b/arch/tile/include/asm/cmpxchg.h
@@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n);
int _atomic_xchg_add(int *v, int i);
int _atomic_xchg_add_unless(int *v, int a, int u);
int _atomic_cmpxchg(int *ptr, int o, int n);
-u64 _atomic64_xchg(u64 *v, u64 n);
-u64 _atomic64_xchg_add(u64 *v, u64 i);
-u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u);
-u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
+long long _atomic64_xchg(long long *v, long long n);
+long long _atomic64_xchg_add(long long *v, long long i);
+long long _atomic64_xchg_add_unless(long long *v, long long a, long long u);
+long long _atomic64_cmpxchg(long long *v, long long o, long long n);
#define xchg(ptr, n) \
({ \
@@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
if (sizeof(*(ptr)) != 4) \
__cmpxchg_called_with_bad_pointer(); \
smp_mb(); \
- (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \
+ (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, \
+ (int)n); \
})
#define xchg64(ptr, n) \
@@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
if (sizeof(*(ptr)) != 8) \
__xchg_called_with_bad_pointer(); \
smp_mb(); \
- (typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \
+ (typeof(*(ptr)))_atomic64_xchg((long long *)(ptr), \
+ (long long)(n)); \
})
#define cmpxchg64(ptr, o, n) \
@@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
if (sizeof(*(ptr)) != 8) \
__cmpxchg_called_with_bad_pointer(); \
smp_mb(); \
- (typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \
+ (typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr, \
+ (long long)o, (long long)n);