aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/kernel/ptrace.c2
-rw-r--r--arch/arm/Makefile9
-rw-r--r--arch/arm/boot/Makefile16
-rw-r--r--arch/arm/boot/install.sh14
-rw-r--r--arch/mips/kernel/octeon_switch.S2
-rw-r--r--arch/mips/kernel/r2300_switch.S2
-rw-r--r--arch/mips/kernel/r4k_switch.S2
-rw-r--r--arch/powerpc/kernel/irq.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c18
-rw-r--r--arch/s390/kernel/crash_dump.c42
-rw-r--r--arch/s390/kernel/entry.S1
-rw-r--r--arch/s390/kernel/entry64.S1
-rw-r--r--arch/s390/kernel/kprobes.c6
-rw-r--r--arch/tile/include/asm/atomic.h5
-rw-r--r--arch/tile/include/asm/atomic_32.h27
-rw-r--r--arch/tile/include/asm/cmpxchg.h28
-rw-r--r--arch/tile/include/asm/percpu.h34
-rw-r--r--arch/tile/kernel/hardwall.c6
-rw-r--r--arch/tile/kernel/intvec_32.S3
-rw-r--r--arch/tile/kernel/intvec_64.S3
-rw-r--r--arch/tile/kernel/stack.c12
-rw-r--r--arch/tile/lib/atomic_32.c8
-rw-r--r--arch/x86/kernel/cpu/perf_event.c11
-rw-r--r--arch/x86/kvm/vmx.c24
-rw-r--r--arch/x86/pci/mmconfig-shared.c7
26 files changed, 186 insertions, 104 deletions
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
index 333238564b6..5d76706139d 100644
--- a/arch/arc/kernel/ptrace.c
+++ b/arch/arc/kernel/ptrace.c
@@ -102,7 +102,7 @@ static int genregs_set(struct task_struct *target,
REG_IGNORE_ONE(pad2);
REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
REG_IGNORE_ONE(efa); /* efa update invalid */
- REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */
+ REG_IGNORE_ONE(stop_pc); /* PC updated via @ret */
return ret;
}
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index a37a50f575a..db50b626be9 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -296,10 +296,15 @@ archprepare:
# Convert bzImage to zImage
bzImage: zImage
-zImage Image xipImage bootpImage uImage: vmlinux
+BOOT_TARGETS = zImage Image xipImage bootpImage uImage
+INSTALL_TARGETS = zinstall uinstall install
+
+PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
+
+$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
-zinstall uinstall install: vmlinux
+$(INSTALL_TARGETS):
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
%.dtb: | scripts
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 84aa2caf07e..ec2f8065f95 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -95,24 +95,24 @@ initrd:
@test "$(INITRD)" != "" || \
(echo You must specify INITRD; exit -1)
-install: $(obj)/Image
- $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+install:
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
$(obj)/Image System.map "$(INSTALL_PATH)"
-zinstall: $(obj)/zImage
- $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+zinstall:
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
$(obj)/zImage System.map "$(INSTALL_PATH)"
-uinstall: $(obj)/uImage
- $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+uinstall:
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
$(obj)/uImage System.map "$(INSTALL_PATH)"
zi:
- $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
$(obj)/zImage System.map "$(INSTALL_PATH)"
i:
- $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
$(obj)/Image System.map "$(INSTALL_PATH)"
subdir- := bootp compressed dts
diff --git a/arch/arm/boot/install.sh b/arch/arm/boot/install.sh
index 06ea7d42ce8..2a45092a40e 100644
--- a/arch/arm/boot/install.sh
+++ b/arch/arm/boot/install.sh
@@ -20,6 +20,20 @@
# $4 - default install path (blank if root directory)
#
+verify () {
+ if [ ! -f "$1" ]; then
+ echo "" 1>&2
+ echo " *** Missing file: $1" 1>&2
+ echo ' *** You need to run "make" before "make install".' 1>&2
+ echo "" 1>&2
+ exit 1
+ fi
+}
+
+# Make sure the files actually exist
+verify "$2"
+verify "$3"
+
# User may have a custom install script
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index 4204d76af85..029e002a4ea 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -73,7 +73,7 @@
3:
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
- PTR_L t8, __stack_chk_guard
+ PTR_LA t8, __stack_chk_guard
LONG_L t9, TASK_STACK_CANARY(a1)
LONG_S t9, 0(t8)
#endif
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 38af83f84c4..20b7b040e76 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -67,7 +67,7 @@ LEAF(resume)
1:
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
- PTR_L t8, __stack_chk_guard
+ PTR_LA t8, __stack_chk_guard
LONG_L t9, TASK_STACK_CANARY(a1)
LONG_S t9, 0(t8)
#endif
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 921238a6bd2..078de5eaca8 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -69,7 +69,7 @@
1:
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
- PTR_L t8, __stack_chk_guard
+ PTR_LA t8, __stack_chk_guard
LONG_L t9, TASK_STACK_CANARY(a1)
LONG_S t9, 0(t8)
#endif
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 57d286a78f8..c7cb8c232d2 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -495,14 +495,15 @@ void __do_irq(struct pt_regs *regs)
void do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- struct thread_info *curtp, *irqtp;
+ struct thread_info *curtp, *irqtp, *sirqtp;
/* Switch to the irq stack to handle this */
curtp = current_thread_info();
irqtp = hardirq_ctx[raw_smp_processor_id()];
+ sirqtp = softirq_ctx[raw_smp_processor_id()];
/* Already there ? */
- if (unlikely(curtp == irqtp)) {
+ if (unlikely(curtp == irqtp || curtp == sirqtp)) {
__do_irq(regs);
set_irq_regs(old_regs);
return;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 294b7af28cd..c71103b8a74 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1066,7 +1066,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
BEGIN_FTR_SECTION
mfspr r8, SPRN_DSCR
ld r7, HSTATE_DSCR(r13)
- std r8, VCPU_DSCR(r7)
+ std r8, VCPU_DSCR(r9)
mtspr SPRN_DSCR, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 1c6a9d729df..c65593abae8 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -332,6 +332,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
unsigned long hva;
int pfnmap = 0;
int tsize = BOOK3E_PAGESZ_4K;
+ int ret = 0;
+ unsigned long mmu_seq;
+ struct kvm *kvm = vcpu_e500->vcpu.kvm;
+
+ /* used to check for invalidations in progress */
+ mmu_seq = kvm->mmu_notifier_seq;
+ smp_rmb();
/*
* Translate guest physical to true physical, acquiring
@@ -449,6 +456,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
}
+ spin_lock(&kvm->mmu_lock);
+ if (mmu_notifier_retry(kvm, mmu_seq)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
kvmppc_e500_ref_setup(ref, gtlbe, pfn);
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -457,10 +470,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
/* Clear i-cache for new pages */
kvmppc_mmu_flush_icache(pfn);
+out:
+ spin_unlock(&kvm->mmu_lock);
+
/* Drop refcount on page, so that mmu notifiers can clear it */
kvm_release_pfn_clean(pfn);
- return 0;
+ return ret;
}
/* XXX only map the one-one case, for now use TLB0 */
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index c84f33d51f7..7dd21720e5b 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -40,28 +40,26 @@ static inline void *load_real_addr(void *addr)
}
/*
- * Copy up to one page to vmalloc or real memory
+ * Copy real to virtual or real memory
*/
-static ssize_t copy_page_real(void *buf, void *src, size_t csize)
+static int copy_from_realmem(void *dest, void *src, size_t count)
{
- size_t size;
+ unsigned long size;
+ int rc;
- if (is_vmalloc_addr(buf)) {
- BUG_ON(csize >= PAGE_SIZE);
- /* If buf is not page aligned, copy first part */
- size = min(roundup(__pa(buf), PAGE_SIZE) - __pa(buf), csize);
- if (size) {
- if (memcpy_real(load_real_addr(buf), src, size))
- return -EFAULT;
- buf += size;
- src += size;
- }
- /* Copy second part */
- size = csize - size;
- return (size) ? memcpy_real(load_real_addr(buf), src, size) : 0;
- } else {
- return memcpy_real(buf, src, csize);
- }
+ if (!count)
+ return 0;
+ if (!is_vmalloc_or_module_addr(dest))
+ return memcpy_real(dest, src, count);
+ do {
+ size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK));
+ if (memcpy_real(load_real_addr(dest), src, size))
+ return -EFAULT;
+ count -= size;
+ dest += size;
+ src += size;
+ } while (count);
+ return 0;
}
/*
@@ -114,7 +112,7 @@ static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
rc = copy_to_user_real((void __force __user *) buf,
(void *) src, csize);
else
- rc = copy_page_real(buf, (void *) src, csize);
+ rc = copy_from_realmem(buf, (void *) src, csize);
return (rc == 0) ? rc : csize;
}
@@ -210,7 +208,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
if (OLDMEM_BASE) {
if ((unsigned long) src < OLDMEM_SIZE) {
copied = min(count, OLDMEM_SIZE - (unsigned long) src);
- rc = memcpy_real(dest, src + OLDMEM_BASE, copied);
+ rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied);
if (rc)
return rc;
}
@@ -223,7 +221,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
return rc;
}
}
- return memcpy_real(dest + copied, src + copied, count - copied);
+ return copy_from_realmem(dest + copied, src + copied, count - copied);
}
/*
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index cc30d1fb000..0dc2b6d0a1e 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -266,6 +266,7 @@ sysc_sigpending:
tm __TI_flags+3(%r12),_TIF_SYSCALL
jno sysc_return
lm %r2,%r7,__PT_R2(%r11) # load svc arguments
+ l %r10,__TI_sysc_table(%r12) # 31 bit system call table
xr %r8,%r8 # svc 0 returns -ENOSYS
clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
jnl sysc_nr_ok # invalid svc number -> do svc 0
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 2b2188b97c6..e5b43c97a83 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -297,6 +297,7 @@ sysc_sigpending:
tm __TI_flags+7(%r12),_TIF_SYSCALL
jno sysc_return
lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
+ lg %r10,__TI_sysc_table(%r12) # address of system call table
lghi %r8,0 # svc 0 returns -ENOSYS
llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
cghi %r1,NR_syscalls
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 0ce9fb24503..d86e64eddb4 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -67,6 +67,11 @@ static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
case 0xac: /* stnsm */
case 0xad: /* stosm */
return -EINVAL;
+ case 0xc6:
+ switch (insn[0] & 0x0f) {
+ case 0x00: /* exrl */
+ return -EINVAL;
+ }
}
switch (insn[0]) {
case 0x0101: /* pr */
@@ -180,7 +185,6 @@ static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
break;
case 0xc6:
switch (insn[0] & 0x0f) {
- case 0x00: /* exrl */
case 0x02: /* pfdrl */
case 0x04: /* cghrl */
case 0x05: /* chrl */
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index d385eaadece..70979846076 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
*
* Atomically sets @v to @i and returns old @v
*/
-static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
+static inline long long atomic64_xchg(atomic64_t *v, long long n)
{
return xchg64(&v->counter, n);
}
@@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
* Atomically checks if @v holds @o and replaces it with @n if so.
* Returns the old value at @v.
*/
-static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
+static inline long long atomic64_cmpxchg(atomic64_t *v, long long o,
+ long long n)
{
return cmpxchg64(&v->counter, o, n);
}
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 0d0395b1b15..1ad4a1f7d42 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n)
/* A 64bit atomic type */
typedef struct {
- u64 __aligned(8) counter;
+ long long counter;
} atomic64_t;
#define ATOMIC64_INIT(val) { (val) }
@@ -91,14 +91,14 @@ typedef struct {
*
* Atomically reads the value of @v.
*/
-static inline u64 atomic64_read(const atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
{
/*
* Requires an atomic op to read both 32-bit parts consistently.
* Casting away const is safe since the atomic support routines
* do not write to memory if the value has not been modified.
*/
- return _atomic64_xchg_add((u64 *)&v->counter, 0);
+ return _atomic64_xchg_add((long long *)&v->counter, 0);
}
/**
@@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
*
* Atomically adds @i to @v.
*/
-static inline void atomic64_add(u64 i, atomic64_t *v)
+static inline void atomic64_add(long long i, atomic64_t *v)
{
_atomic64_xchg_add(&v->counter, i);
}
@@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
*
* Atomically adds @i to @v and returns @i + @v
*/
-static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
smp_mb(); /* barrier for proper semantics */
return _atomic64_xchg_add(&v->counter, i) + i;
@@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+static inline long long atomic64_add_unless(atomic64_t *v, long long a,
+ long long u)
{
smp_mb(); /* barrier for proper semantics */
return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
@@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
* atomic64_set() can't be just a raw store, since it would be lost if it
* fell between the load and store of one of the other atomic ops.
*/
-static inline void atomic64_set(atomic64_t *v, u64 n)
+static inline void atomic64_set(atomic64_t *v, long long n)
{
_atomic64_xchg(&v->counter, n);
}
@@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
-extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
-extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
-extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
-extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
- int *lock, u64 o, u64 n);
+extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
+ long long o, long long n);
+extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
+ long long n);
+extern long long __atomic64_xchg_add_unless(volatile long long *p,
+ int *lock, long long o, long long n);
/* Return failure from the atomic wrappers. */
struct __get_user __atomic_bad_address(int __user *addr);
diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h
index 4001d5eab4b..0ccda3c425b 100644
--- a/arch/tile/include/asm/cmpxchg.h
+++ b/arch/tile/include/asm/cmpxchg.h
@@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n);
int _atomic_xchg_add(int *v, int i);
int _atomic_xchg_add_unless(int *v, int a, int u);
int _atomic_cmpxchg(int *ptr, int o, int n);
-u64 _atomic64_xchg(u64 *v, u64 n);
-u64 _atomic64_xchg_add(u64 *v, u64 i);
-u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u);
-u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
+long long _atomic64_xchg(long long *v, long long n);
+long long _atomic64_xchg_add(long long *v, long long i);
+long long _atomic64_xchg_add_unless(long long *v, long long a, long long u);
+long long _atomic64_cmpxchg(long long *v, long long o, long long n);
#define xchg(ptr, n) \
({ \
@@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
if (sizeof(*(ptr)) != 4) \
__cmpxchg_called_with_bad_pointer(); \
smp_mb(); \
- (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \
+ (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, \
+ (int)n); \
})
#define xchg64(ptr, n) \
@@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
if (sizeof(*(ptr)) != 8) \
__xchg_called_with_bad_pointer(); \
smp_mb(); \
- (typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \
+ (typeof(*(ptr)))_atomic64_xchg((long long *)(ptr), \
+ (long long)(n)); \
})
#define cmpxchg64(ptr, o, n) \
@@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
if (sizeof(*(ptr)) != 8) \
__cmpxchg_called_with_bad_pointer(); \
smp_mb(); \
- (typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \
+ (typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr, \
+ (long long)o, (long long)n); \
})
#else
@@ -81,10 +84,11 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
switch (sizeof(*(ptr))) { \
case 4: \
__x = (typeof(__x))(unsigned long) \
- __insn_exch4((ptr), (u32)(unsigned long)(n)); \
+ __insn_exch4((ptr), \
+ (u32)(unsigned long)(n)); \
break; \
case 8: \
- __x = (typeof(__x)) \
+ __x = (typeof(__x)) \
__insn_exch((ptr), (unsigned long)(n)); \
break; \
default: \
@@ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
switch (sizeof(*(ptr))) { \
case 4: \
__x = (typeof(__x))(unsigned long) \
- __insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \
+ __insn_cmpexch4((ptr), \
+ (u32)(unsigned long)(n)); \
break; \
case 8: \
- __x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \
+ __x = (typeof(__x))__insn_cmpexch((ptr), \
+ (long long)(n)); \
break; \
default: \
__cmpxchg_called_with_bad_pointer(); \
diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h
index 63294f5a8ef..4f7ae39fa20 100644
--- a/arch/tile/include/asm/percpu.h
+++ b/arch/tile/include/asm/percpu.h
@@ -15,9 +15,37 @@
#ifndef _ASM_TILE_PERCPU_H
#define _ASM_TILE_PERCPU_H
-register unsigned long __my_cpu_offset __asm__("tp");
-#define __my_cpu_offset __my_cpu_offset
-#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp))
+register unsigned long my_cpu_offset_reg asm("tp");
+
+#ifdef CONFIG_PREEMPT
+/*
+ * For full preemption, we can't just use the register variable
+ * directly, since we need barrier() to hazard against it, causing the
+ * compiler to reload anything computed from a previous "tp" value.
+ * But we also don't want to use volatile asm, since we'd like the
+ * compiler to be able to cache the value across multiple percpu reads.
+ * So we use a fake stack read as a hazard against barrier().
+ * The 'U' constraint is like 'm' but disallows postincrement.
+ */
+static inline unsigned long __my_cpu_offset(void)
+{
+ unsigned long tp;
+ register unsigned long *sp asm("sp");
+ asm("move %0, tp" : "=r" (tp) : "U" (*sp));
+ return tp;
+}
+#define __my_cpu_offset __my_cpu_offset()
+#else
+/*
+ * We don't need to hazard against barrier() since "tp" doesn't ever
+ * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
+ * changes at function call points, at which we are already re-reading
+ * the value of "tp" due to "my_cpu_offset_reg" being a global variable.
+ */
+#define __my_cpu_offset my_cpu_offset_reg
+#endif
+
+#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
#include <asm-generic/percpu.h>
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index df27a1fd94a..531f4c36535 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -66,7 +66,7 @@ static struct hardwall_type hardwall_types[] = {
0,
"udn",
LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
- __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock),
+ __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock),
NULL
},
#ifndef __tilepro__
@@ -77,7 +77,7 @@ static struct hardwall_type hardwall_types[] = {
1, /* disabled pending hypervisor support */
"idn",
LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
- __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock),
+ __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock),
NULL
},
{ /* access to user-space IPI */
@@ -87,7 +87,7 @@ static struct hardwall_type hardwall_types[] = {
0,
"ipi",
LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
- __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock),
+ __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock),
NULL
},
#endif
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 088d5c141e6..2cbe6d5dd6b 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -815,6 +815,9 @@ STD_ENTRY(interrupt_return)
}
bzt r28, 1f
bnz r29, 1f
+ /* Disable interrupts explicitly for preemption. */
+ IRQ_DISABLE(r20,r21)
+ TRACE_IRQS_OFF
jal preempt_schedule_irq
FEEDBACK_REENTER(interrupt_return)
1:
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index ec755d3f373..b8fc497f243 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -841,6 +841,9 @@ STD_ENTRY(interrupt_return)
}
beqzt r28, 1f
bnez r29, 1f
+ /* Disable interrupts explicitly for preemption. */
+ IRQ_DISABLE(r20,r21)
+ TRACE_IRQS_OFF
jal preempt_schedule_irq
FEEDBACK_REENTER(interrupt_return)
1:
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index 362284af3af..c93977a6211 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -23,6 +23,7 @@
#include <linux/mmzone.h>
#include <linux/dcache.h>
#include <linux/fs.h>
+#include <linux/string.h>
#include <asm/backtrace.h>
#include <asm/page.h>
#include <asm/ucontext.h>
@@ -332,21 +333,18 @@ static void describe_addr(struct KBacktraceIterator *kbt,
}
if (vma->vm_file) {
- char *s;
p = d_path(&vma->vm_file->f_path, buf, bufsize);
if (IS_ERR(p))
p = "?";
- s = strrchr(p, '/');
- if (s)
- p = s+1;
+ name = kbasename(p);
} else {
- p = "anon";
+ name = "anon";
}
/* Generate a string description of the vma info. */
- namelen = strlen(p);
+ namelen = strlen(name);
remaining = (bufsize - 1) - namelen;
- memmove(buf, p, namelen);
+ memmove(buf, name, namelen);
snprintf(buf + namelen, remaining, "[%lx+%lx] ",
vma->vm_start, vma->vm_end - vma->vm_start);
}
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 759efa337be..c89b211fd9e 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -107,19 +107,19 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
EXPORT_SYMBOL(_atomic_xor);
-u64 _atomic64_xchg(u64 *v, u64 n)
+long long _atomic64_xchg(long long *v, long long n)
{
return __atomic64_xchg(v, __atomic_setup(v), n);
}
EXPORT_SYMBOL(_atomic64_xchg);
-u64 _atomic64_xchg_add(u64 *v, u64 i)
+long long _atomic64_xchg_add(long long *v, long long i)
{
return __atomic64_xchg_add(v, __atomic_setup(v), i);
}
EXPORT_SYMBOL(_atomic64_xchg_add);
-u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
+long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
{
/*
* Note: argument order is switched here since it is easier
@@ -130,7 +130,7 @@ u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
}
EXPORT_SYMBOL(_atomic64_xchg_add_unless);
-u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n)
+long long _atomic64_cmpxchg(long long *v, long long o, long long n)
{
return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
}
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 897783b3302..9d8449158cf 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1888,10 +1888,7 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
userpg->pmc_width = x86_pmu.cntval_bits;
- if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
- return;
-
- if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ if (!sched_clock_stable)
return;
userpg->cap_user_time = 1;
@@ -1899,10 +1896,8 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
userpg->time_shift = CYC2NS_SCALE_FACTOR;
userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
- if (sched_clock_stable && !check_tsc_disabled()) {
- userpg->cap_user_time_zero = 1;
- userpg->time_zero = this_cpu_read(cyc2ns_offset);
- }
+ userpg->cap_user_time_zero = 1;
+ userpg->time_zero = this_cpu_read(cyc2ns_offset);
}
/*
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3b8e7459dd4..2b2fce1b200 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3255,25 +3255,29 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
{
+ struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
+
if (!test_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_dirty))
return;
if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
- vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]);
- vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]);
- vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]);
- vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]);
+ vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
+ vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
+ vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
+ vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
}
}
static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
{
+ struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
+
if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
- vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
- vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
- vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
- vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
+ mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
+ mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
+ mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
+ mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
}
__set_bit(VCPU_EXREG_PDPTR,
@@ -7777,10 +7781,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
- __clear_bit(VCPU_EXREG_PDPTR,
- (unsigned long *)&vcpu->arch.regs_avail);
- __clear_bit(VCPU_EXREG_PDPTR,
- (unsigned long *)&vcpu->arch.regs_dirty);
}
kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 5596c7bdd32..082e8812971 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -700,7 +700,7 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed)
return -ENODEV;
- if (start > end || !addr)
+ if (start > end)
return -EINVAL;
mutex_lock(&pci_mmcfg_lock);
@@ -716,6 +716,11 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
return -EEXIST;
}
+ if (!addr) {
+ mutex_unlock(&pci_mmcfg_lock);
+ return -EINVAL;
+ }
+
rc = -EBUSY;
cfg = pci_mmconfig_alloc(seg, start, end, addr);
if (cfg == NULL) {