aboutsummaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/Kbuild34
-rw-r--r--include/asm-generic/Kbuild.asm46
-rw-r--r--include/asm-generic/atomic.h71
-rw-r--r--include/asm-generic/audit_change_attr.h8
-rw-r--r--include/asm-generic/audit_dir_write.h14
-rw-r--r--include/asm-generic/audit_read.h5
-rw-r--r--include/asm-generic/audit_write.h8
-rw-r--r--include/asm-generic/auxvec.h8
-rw-r--r--include/asm-generic/barrier.h89
-rw-r--r--include/asm-generic/bitops.h12
-rw-r--r--include/asm-generic/bitops/atomic.h4
-rw-r--r--include/asm-generic/bitops/builtin-__ffs.h15
-rw-r--r--include/asm-generic/bitops/builtin-__fls.h15
-rw-r--r--include/asm-generic/bitops/builtin-ffs.h17
-rw-r--r--include/asm-generic/bitops/builtin-fls.h16
-rw-r--r--include/asm-generic/bitops/const_hweight.h17
-rw-r--r--include/asm-generic/bitops/count_zeros.h57
-rw-r--r--include/asm-generic/bitops/ext2-atomic-setbit.h11
-rw-r--r--include/asm-generic/bitops/ext2-atomic.h8
-rw-r--r--include/asm-generic/bitops/ext2-non-atomic.h20
-rw-r--r--include/asm-generic/bitops/find.h16
-rw-r--r--include/asm-generic/bitops/le.h106
-rw-r--r--include/asm-generic/bitops/lock.h2
-rw-r--r--include/asm-generic/bitops/minix-le.h17
-rw-r--r--include/asm-generic/bitops/minix.h15
-rw-r--r--include/asm-generic/bitsperlong.h17
-rw-r--r--include/asm-generic/bug.h133
-rw-r--r--include/asm-generic/cacheflush.h5
-rw-r--r--include/asm-generic/checksum.h8
-rw-r--r--include/asm-generic/clkdev.h28
-rw-r--r--include/asm-generic/cmpxchg-local.h11
-rw-r--r--include/asm-generic/cmpxchg.h98
-rw-r--r--include/asm-generic/cputime.h67
-rw-r--r--include/asm-generic/cputime_jiffies.h74
-rw-r--r--include/asm-generic/cputime_nsecs.h117
-rw-r--r--include/asm-generic/delay.h37
-rw-r--r--include/asm-generic/dma-coherent.h18
-rw-r--r--include/asm-generic/dma-mapping-broken.h16
-rw-r--r--include/asm-generic/dma-mapping-common.h70
-rw-r--r--include/asm-generic/early_ioremap.h42
-rw-r--r--include/asm-generic/errno-base.h39
-rw-r--r--include/asm-generic/errno.h111
-rw-r--r--include/asm-generic/exec.h19
-rw-r--r--include/asm-generic/fcntl.h196
-rw-r--r--include/asm-generic/fixmap.h100
-rw-r--r--include/asm-generic/ftrace.h16
-rw-r--r--include/asm-generic/futex.h7
-rw-r--r--include/asm-generic/getorder.h53
-rw-r--r--include/asm-generic/gpio.h288
-rw-r--r--include/asm-generic/hash.h9
-rw-r--r--include/asm-generic/hugetlb.h40
-rw-r--r--include/asm-generic/int-l64.h73
-rw-r--r--include/asm-generic/int-ll64.h31
-rw-r--r--include/asm-generic/io-64-nonatomic-hi-lo.h28
-rw-r--r--include/asm-generic/io-64-nonatomic-lo-hi.h28
-rw-r--r--include/asm-generic/io.h102
-rw-r--r--include/asm-generic/ioctl.h96
-rw-r--r--include/asm-generic/ioctls.h112
-rw-r--r--include/asm-generic/iomap.h13
-rw-r--r--include/asm-generic/ipcbuf.h34
-rw-r--r--include/asm-generic/irq_regs.h8
-rw-r--r--include/asm-generic/kmap_types.h34
-rw-r--r--include/asm-generic/kvm_para.h26
-rw-r--r--include/asm-generic/local.h2
-rw-r--r--include/asm-generic/local64.h2
-rw-r--r--include/asm-generic/mcs_spinlock.h13
-rw-r--r--include/asm-generic/memory_model.h6
-rw-r--r--include/asm-generic/mman-common.h51
-rw-r--r--include/asm-generic/mman.h19
-rw-r--r--include/asm-generic/mmu.h6
-rw-r--r--include/asm-generic/module.h40
-rw-r--r--include/asm-generic/msgbuf.h47
-rw-r--r--include/asm-generic/mutex-dec.h10
-rw-r--r--include/asm-generic/mutex-null.h2
-rw-r--r--include/asm-generic/mutex-xchg.h19
-rw-r--r--include/asm-generic/page.h14
-rw-r--r--include/asm-generic/param.h20
-rw-r--r--include/asm-generic/parport.h4
-rw-r--r--include/asm-generic/pci-bridge.h74
-rw-r--r--include/asm-generic/pci.h24
-rw-r--r--include/asm-generic/pci_iomap.h35
-rw-r--r--include/asm-generic/percpu.h13
-rw-r--r--include/asm-generic/pgtable.h665
-rw-r--r--include/asm-generic/poll.h37
-rw-r--r--include/asm-generic/posix_types.h165
-rw-r--r--include/asm-generic/preempt.h92
-rw-r--r--include/asm-generic/ptrace.h74
-rw-r--r--include/asm-generic/qrwlock.h166
-rw-r--r--include/asm-generic/qrwlock_types.h21
-rw-r--r--include/asm-generic/resource.h70
-rw-r--r--include/asm-generic/rwsem.h132
-rw-r--r--include/asm-generic/sections.h22
-rw-r--r--include/asm-generic/sembuf.h38
-rw-r--r--include/asm-generic/setup.h6
-rw-r--r--include/asm-generic/shmbuf.h59
-rw-r--r--include/asm-generic/shmparam.h6
-rw-r--r--include/asm-generic/siginfo.h271
-rw-r--r--include/asm-generic/signal-defs.h28
-rw-r--r--include/asm-generic/signal.h119
-rw-r--r--include/asm-generic/simd.h14
-rw-r--r--include/asm-generic/sizes.h2
-rw-r--r--include/asm-generic/socket.h67
-rw-r--r--include/asm-generic/sockios.h13
-rw-r--r--include/asm-generic/stat.h72
-rw-r--r--include/asm-generic/statfs.h81
-rw-r--r--include/asm-generic/swab.h18
-rw-r--r--include/asm-generic/switch_to.h30
-rw-r--r--include/asm-generic/syscall.h12
-rw-r--r--include/asm-generic/syscalls.h34
-rw-r--r--include/asm-generic/system.h143
-rw-r--r--include/asm-generic/termbits.h199
-rw-r--r--include/asm-generic/termios.h49
-rw-r--r--include/asm-generic/tlb.h176
-rw-r--r--include/asm-generic/tlbflush.h2
-rw-r--r--include/asm-generic/trace_clock.h16
-rw-r--r--include/asm-generic/types.h42
-rw-r--r--include/asm-generic/uaccess.h39
-rw-r--r--include/asm-generic/ucontext.h12
-rw-r--r--include/asm-generic/unaligned.h21
-rw-r--r--include/asm-generic/unistd.h878
-rw-r--r--include/asm-generic/user.h4
-rw-r--r--include/asm-generic/vmlinux.lds.h299
-rw-r--r--include/asm-generic/vtime.h1
-rw-r--r--include/asm-generic/word-at-a-time.h56
-rw-r--r--include/asm-generic/xor.h6
125 files changed, 3277 insertions, 4216 deletions
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
deleted file mode 100644
index 53f91b1ae53..00000000000
--- a/include/asm-generic/Kbuild
+++ /dev/null
@@ -1,34 +0,0 @@
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += errno-base.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += int-l64.h
-header-y += int-ll64.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += mman-common.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += shmparam.h
-header-y += siginfo.h
-header-y += signal-defs.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += ucontext.h
-header-y += unistd.h
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm
index c5d2e5dd871..d2ee86b4c09 100644
--- a/include/asm-generic/Kbuild.asm
+++ b/include/asm-generic/Kbuild.asm
@@ -1,45 +1 @@
-ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h \
- $(srctree)/include/asm-$(SRCARCH)/kvm.h),)
-header-y += kvm.h
-endif
-
-ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm_para.h \
- $(srctree)/include/asm-$(SRCARCH)/kvm_para.h),)
-header-y += kvm_para.h
-endif
-
-ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \
- $(srctree)/include/asm-$(SRCARCH)/a.out.h),)
-header-y += a.out.h
-endif
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
+include include/uapi/asm-generic/Kbuild.asm
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index e994197f84b..9c79e760345 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -1,5 +1,7 @@
/*
- * Generic C implementation of atomic counter operations
+ * Generic C implementation of atomic counter operations. Usable on
+ * UP systems only. Do not include in machine independent code.
+ *
* Originally implemented for MN10300.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
@@ -13,8 +15,15 @@
#ifndef __ASM_GENERIC_ATOMIC_H
#define __ASM_GENERIC_ATOMIC_H
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+
#ifdef CONFIG_SMP
-#error not SMP safe
+/* Force people to define core atomics */
+# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \
+ !defined(atomic_clear_mask) || !defined(atomic_set_mask)
+# error "SMP requires a little arch-specific magic"
+# endif
#endif
/*
@@ -32,7 +41,9 @@
*
* Atomically reads the value of @v.
*/
+#ifndef atomic_read
#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#endif
/**
* atomic_set - set atomic variable
@@ -44,7 +55,6 @@
#define atomic_set(v, i) (((v)->counter) = (i))
#include <linux/irqflags.h>
-#include <asm/system.h>
/**
* atomic_add_return - add integer to atomic variable
@@ -53,6 +63,7 @@
*
* Atomically adds @i to @v and returns the result
*/
+#ifndef atomic_add_return
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
@@ -66,6 +77,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
return temp;
}
+#endif
/**
* atomic_sub_return - subtract integer from atomic variable
@@ -74,6 +86,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
*
* Atomically subtracts @i from @v and returns the result
*/
+#ifndef atomic_sub_return
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long flags;
@@ -87,6 +100,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
return temp;
}
+#endif
static inline int atomic_add_negative(int i, atomic_t *v)
{
@@ -117,46 +131,57 @@ static inline void atomic_dec(atomic_t *v)
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr))))
-
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
c = old;
- return c != u;
+ return c;
}
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+/**
+ * atomic_clear_mask - Atomically clear bits in atomic variable
+ * @mask: Mask of the bits to be cleared
+ * @v: pointer of type atomic_t
+ *
+ * Atomically clears the bits set in @mask from @v
+ */
+#ifndef atomic_clear_mask
+static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
{
unsigned long flags;
mask = ~mask;
raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
- *addr &= mask;
+ v->counter &= mask;
raw_local_irq_restore(flags);
}
+#endif
-/* Assume that atomic operations are already serializing */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
+/**
+ * atomic_set_mask - Atomically set bits in atomic variable
+ * @mask: Mask of the bits to be set
+ * @v: pointer of type atomic_t
+ *
+ * Atomically sets the bits set in @mask in @v
+ */
+#ifndef atomic_set_mask
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long flags;
-#include <asm-generic/atomic-long.h>
+ raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
+ v->counter |= mask;
+ raw_local_irq_restore(flags);
+}
+#endif
#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h
index bcbab3e4a3b..a1865537339 100644
--- a/include/asm-generic/audit_change_attr.h
+++ b/include/asm-generic/audit_change_attr.h
@@ -1,10 +1,14 @@
+#ifdef __NR_chmod
__NR_chmod,
+#endif
__NR_fchmod,
#ifdef __NR_chown
__NR_chown,
-__NR_fchown,
__NR_lchown,
#endif
+#ifdef __NR_fchown
+__NR_fchown,
+#endif
__NR_setxattr,
__NR_lsetxattr,
__NR_fsetxattr,
@@ -20,7 +24,9 @@ __NR_chown32,
__NR_fchown32,
__NR_lchown32,
#endif
+#ifdef __NR_link
__NR_link,
+#endif
#ifdef __NR_linkat
__NR_linkat,
#endif
diff --git a/include/asm-generic/audit_dir_write.h b/include/asm-generic/audit_dir_write.h
index 6621bd82cbe..7b61db4fe72 100644
--- a/include/asm-generic/audit_dir_write.h
+++ b/include/asm-generic/audit_dir_write.h
@@ -1,13 +1,27 @@
+#ifdef __NR_rename
__NR_rename,
+#endif
+#ifdef __NR_mkdir
__NR_mkdir,
+#endif
+#ifdef __NR_rmdir
__NR_rmdir,
+#endif
#ifdef __NR_creat
__NR_creat,
#endif
+#ifdef __NR_link
__NR_link,
+#endif
+#ifdef __NR_unlink
__NR_unlink,
+#endif
+#ifdef __NR_symlink
__NR_symlink,
+#endif
+#ifdef __NR_mknod
__NR_mknod,
+#endif
#ifdef __NR_mkdirat
__NR_mkdirat,
__NR_mknodat,
diff --git a/include/asm-generic/audit_read.h b/include/asm-generic/audit_read.h
index 0e87464d984..3b249cb857d 100644
--- a/include/asm-generic/audit_read.h
+++ b/include/asm-generic/audit_read.h
@@ -1,4 +1,6 @@
+#ifdef __NR_readlink
__NR_readlink,
+#endif
__NR_quotactl,
__NR_listxattr,
__NR_llistxattr,
@@ -6,3 +8,6 @@ __NR_flistxattr,
__NR_getxattr,
__NR_lgetxattr,
__NR_fgetxattr,
+#ifdef __NR_readlinkat
+__NR_readlinkat,
+#endif
diff --git a/include/asm-generic/audit_write.h b/include/asm-generic/audit_write.h
index c5f1c2c920e..274575d7129 100644
--- a/include/asm-generic/audit_write.h
+++ b/include/asm-generic/audit_write.h
@@ -4,10 +4,18 @@ __NR_acct,
__NR_swapon,
#endif
__NR_quotactl,
+#ifdef __NR_truncate
__NR_truncate,
+#endif
#ifdef __NR_truncate64
__NR_truncate64,
#endif
+#ifdef __NR_ftruncate
+__NR_ftruncate,
+#endif
+#ifdef __NR_ftruncate64
+__NR_ftruncate64,
+#endif
#ifdef __NR_bind
__NR_bind, /* bind can affect fs object only in one way... */
#endif
diff --git a/include/asm-generic/auxvec.h b/include/asm-generic/auxvec.h
deleted file mode 100644
index b99573b0ad1..00000000000
--- a/include/asm-generic/auxvec.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef __ASM_GENERIC_AUXVEC_H
-#define __ASM_GENERIC_AUXVEC_H
-/*
- * Not all architectures need their own auxvec.h, the most
- * common definitions are already in linux/auxvec.h.
- */
-
-#endif /* __ASM_GENERIC_AUXVEC_H */
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
new file mode 100644
index 00000000000..1402fa85538
--- /dev/null
+++ b/include/asm-generic/barrier.h
@@ -0,0 +1,89 @@
+/*
+ * Generic barrier definitions, originally based on MN10300 definitions.
+ *
+ * It should be possible to use these on really simple architectures,
+ * but it serves more as a starting point for new ports.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef __ASM_GENERIC_BARRIER_H
+#define __ASM_GENERIC_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+
+#ifndef nop
+#define nop() asm volatile ("nop")
+#endif
+
+/*
+ * Force strict CPU ordering. And yes, this is required on UP too when we're
+ * talking to devices.
+ *
+ * Fall back to compiler barriers if nothing better is provided.
+ */
+
+#ifndef mb
+#define mb() barrier()
+#endif
+
+#ifndef rmb
+#define rmb() mb()
+#endif
+
+#ifndef wmb
+#define wmb() mb()
+#endif
+
+#ifndef read_barrier_depends
+#define read_barrier_depends() do { } while (0)
+#endif
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define smp_read_barrier_depends() read_barrier_depends()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do { } while (0)
+#endif
+
+#ifndef set_mb
+#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
+#endif
+
+#ifndef smp_mb__before_atomic
+#define smp_mb__before_atomic() smp_mb()
+#endif
+
+#ifndef smp_mb__after_atomic
+#define smp_mb__after_atomic() smp_mb()
+#endif
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
index a54f4421a24..dcdcacf2fd2 100644
--- a/include/asm-generic/bitops.h
+++ b/include/asm-generic/bitops.h
@@ -11,14 +11,7 @@
#include <linux/irqflags.h>
#include <linux/compiler.h>
-
-/*
- * clear_bit may not imply a memory barrier
- */
-#ifndef smp_mb__before_clear_bit
-#define smp_mb__before_clear_bit() smp_mb()
-#define smp_mb__after_clear_bit() smp_mb()
-#endif
+#include <asm/barrier.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/ffz.h>
@@ -38,8 +31,7 @@
#include <asm-generic/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h>
-#include <asm-generic/bitops/ext2-non-atomic.h>
+#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
-#include <asm-generic/bitops/minix.h>
#endif /* __ASM_GENERIC_BITOPS_H */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index ecc44a8e2b4..49673510b48 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -2,7 +2,7 @@
#define _ASM_GENERIC_BITOPS_ATOMIC_H_
#include <asm/types.h>
-#include <asm/system.h>
+#include <linux/irqflags.h>
#ifdef CONFIG_SMP
#include <asm/spinlock.h>
@@ -80,7 +80,7 @@ static inline void set_bit(int nr, volatile unsigned long *addr)
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
* in order to ensure changes are visible on other processors.
*/
static inline void clear_bit(int nr, volatile unsigned long *addr)
diff --git a/include/asm-generic/bitops/builtin-__ffs.h b/include/asm-generic/bitops/builtin-__ffs.h
new file mode 100644
index 00000000000..90041e3a41f
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-__ffs.h
@@ -0,0 +1,15 @@
+#ifndef _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_
+#define _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __ffs(unsigned long word)
+{
+ return __builtin_ctzl(word);
+}
+
+#endif
diff --git a/include/asm-generic/bitops/builtin-__fls.h b/include/asm-generic/bitops/builtin-__fls.h
new file mode 100644
index 00000000000..0248f386635
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-__fls.h
@@ -0,0 +1,15 @@
+#ifndef _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_
+#define _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_
+
+/**
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __fls(unsigned long word)
+{
+ return (sizeof(word) * 8) - 1 - __builtin_clzl(word);
+}
+
+#endif
diff --git a/include/asm-generic/bitops/builtin-ffs.h b/include/asm-generic/bitops/builtin-ffs.h
new file mode 100644
index 00000000000..064825829e1
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-ffs.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_
+#define _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_
+
+/**
+ * ffs - find first bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+static __always_inline int ffs(int x)
+{
+ return __builtin_ffs(x);
+}
+
+#endif
diff --git a/include/asm-generic/bitops/builtin-fls.h b/include/asm-generic/bitops/builtin-fls.h
new file mode 100644
index 00000000000..eda652d0ac7
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-fls.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_
+#define _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_
+
+/**
+ * fls - find last (most-significant) bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+static __always_inline int fls(int x)
+{
+ return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
+}
+
+#endif
diff --git a/include/asm-generic/bitops/const_hweight.h b/include/asm-generic/bitops/const_hweight.h
index fa2a50b7ee6..0a7e0662347 100644
--- a/include/asm-generic/bitops/const_hweight.h
+++ b/include/asm-generic/bitops/const_hweight.h
@@ -5,14 +5,15 @@
* Compile time versions of __arch_hweightN()
*/
#define __const_hweight8(w) \
- ( (!!((w) & (1ULL << 0))) + \
- (!!((w) & (1ULL << 1))) + \
- (!!((w) & (1ULL << 2))) + \
- (!!((w) & (1ULL << 3))) + \
- (!!((w) & (1ULL << 4))) + \
- (!!((w) & (1ULL << 5))) + \
- (!!((w) & (1ULL << 6))) + \
- (!!((w) & (1ULL << 7))) )
+ ((unsigned int) \
+ ((!!((w) & (1ULL << 0))) + \
+ (!!((w) & (1ULL << 1))) + \
+ (!!((w) & (1ULL << 2))) + \
+ (!!((w) & (1ULL << 3))) + \
+ (!!((w) & (1ULL << 4))) + \
+ (!!((w) & (1ULL << 5))) + \
+ (!!((w) & (1ULL << 6))) + \
+ (!!((w) & (1ULL << 7)))))
#define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 ))
#define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16))
diff --git a/include/asm-generic/bitops/count_zeros.h b/include/asm-generic/bitops/count_zeros.h
new file mode 100644
index 00000000000..97520d21fe6
--- /dev/null
+++ b/include/asm-generic/bitops/count_zeros.h
@@ -0,0 +1,57 @@
+/* Count leading and trailing zeros functions
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_GENERIC_BITOPS_COUNT_ZEROS_H_
+#define _ASM_GENERIC_BITOPS_COUNT_ZEROS_H_
+
+#include <asm/bitops.h>
+
+/**
+ * count_leading_zeros - Count the number of zeros from the MSB back
+ * @x: The value
+ *
+ * Count the number of leading zeros from the MSB going towards the LSB in @x.
+ *
+ * If the MSB of @x is set, the result is 0.
+ * If only the LSB of @x is set, then the result is BITS_PER_LONG-1.
+ * If @x is 0 then the result is COUNT_LEADING_ZEROS_0.
+ */
+static inline int count_leading_zeros(unsigned long x)
+{
+ if (sizeof(x) == 4)
+ return BITS_PER_LONG - fls(x);
+ else
+ return BITS_PER_LONG - fls64(x);
+}
+
+#define COUNT_LEADING_ZEROS_0 BITS_PER_LONG
+
+/**
+ * count_trailing_zeros - Count the number of zeros from the LSB forwards
+ * @x: The value
+ *
+ * Count the number of trailing zeros from the LSB going towards the MSB in @x.
+ *
+ * If the LSB of @x is set, the result is 0.
+ * If only the MSB of @x is set, then the result is BITS_PER_LONG-1.
+ * If @x is 0 then the result is COUNT_TRAILING_ZEROS_0.
+ */
+static inline int count_trailing_zeros(unsigned long x)
+{
+#define COUNT_TRAILING_ZEROS_0 (-1)
+
+ if (sizeof(x) == 4)
+ return ffs(x);
+ else
+ return (x != 0) ? __ffs(x) : COUNT_TRAILING_ZEROS_0;
+}
+
+#endif /* _ASM_GENERIC_BITOPS_COUNT_ZEROS_H_ */
diff --git a/include/asm-generic/bitops/ext2-atomic-setbit.h b/include/asm-generic/bitops/ext2-atomic-setbit.h
new file mode 100644
index 00000000000..5a0997857b3
--- /dev/null
+++ b/include/asm-generic/bitops/ext2-atomic-setbit.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_
+#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_
+
+/*
+ * Atomic bitops based version of ext2 atomic bitops
+ */
+
+#define ext2_set_bit_atomic(l, nr, addr) test_and_set_bit_le(nr, addr)
+#define ext2_clear_bit_atomic(l, nr, addr) test_and_clear_bit_le(nr, addr)
+
+#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ */
diff --git a/include/asm-generic/bitops/ext2-atomic.h b/include/asm-generic/bitops/ext2-atomic.h
index ab1c875efb7..87f0f109d7f 100644
--- a/include/asm-generic/bitops/ext2-atomic.h
+++ b/include/asm-generic/bitops/ext2-atomic.h
@@ -1,11 +1,15 @@
#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_
+/*
+ * Spinlock based version of ext2 atomic bitops
+ */
+
#define ext2_set_bit_atomic(lock, nr, addr) \
({ \
int ret; \
spin_lock(lock); \
- ret = ext2_set_bit((nr), (unsigned long *)(addr)); \
+ ret = __test_and_set_bit_le(nr, addr); \
spin_unlock(lock); \
ret; \
})
@@ -14,7 +18,7 @@
({ \
int ret; \
spin_lock(lock); \
- ret = ext2_clear_bit((nr), (unsigned long *)(addr)); \
+ ret = __test_and_clear_bit_le(nr, addr); \
spin_unlock(lock); \
ret; \
})
diff --git a/include/asm-generic/bitops/ext2-non-atomic.h b/include/asm-generic/bitops/ext2-non-atomic.h
deleted file mode 100644
index 63cf822431a..00000000000
--- a/include/asm-generic/bitops/ext2-non-atomic.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_
-#define _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_
-
-#include <asm-generic/bitops/le.h>
-
-#define ext2_set_bit(nr,addr) \
- generic___test_and_set_le_bit((nr),(unsigned long *)(addr))
-#define ext2_clear_bit(nr,addr) \
- generic___test_and_clear_le_bit((nr),(unsigned long *)(addr))
-
-#define ext2_test_bit(nr,addr) \
- generic_test_le_bit((nr),(unsigned long *)(addr))
-#define ext2_find_first_zero_bit(addr, size) \
- generic_find_first_zero_le_bit((unsigned long *)(addr), (size))
-#define ext2_find_next_zero_bit(addr, size, off) \
- generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
-#define ext2_find_next_bit(addr, size, off) \
- generic_find_next_le_bit((unsigned long *)(addr), (size), (off))
-
-#endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
index 110fa700f85..998d4d544f1 100644
--- a/include/asm-generic/bitops/find.h
+++ b/include/asm-generic/bitops/find.h
@@ -1,32 +1,43 @@
#ifndef _ASM_GENERIC_BITOPS_FIND_H_
#define _ASM_GENERIC_BITOPS_FIND_H_
+#ifndef find_next_bit
/**
* find_next_bit - find the next set bit in a memory region
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
*/
extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
size, unsigned long offset);
+#endif
+#ifndef find_next_zero_bit
/**
* find_next_zero_bit - find the next cleared bit in a memory region
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
+ *
+ * Returns the bit number of the next zero bit
+ * If no bits are zero, returns @size.
*/
extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
long size, unsigned long offset);
+#endif
#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
/**
* find_first_bit - find the first set bit in a memory region
* @addr: The address to start the search at
- * @size: The maximum size to search
+ * @size: The maximum number of bits to search
*
* Returns the bit number of the first set bit.
+ * If no bits are set, returns @size.
*/
extern unsigned long find_first_bit(const unsigned long *addr,
unsigned long size);
@@ -34,9 +45,10 @@ extern unsigned long find_first_bit(const unsigned long *addr,
/**
* find_first_zero_bit - find the first cleared bit in a memory region
* @addr: The address to start the search at
- * @size: The maximum size to search
+ * @size: The maximum number of bits to search
*
* Returns the bit number of the first cleared bit.
+ * If no bits are zero, returns @size.
*/
extern unsigned long find_first_zero_bit(const unsigned long *addr,
unsigned long size);
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
index 80e3bf13b2b..61731543c00 100644
--- a/include/asm-generic/bitops/le.h
+++ b/include/asm-generic/bitops/le.h
@@ -4,54 +4,94 @@
#include <asm/types.h>
#include <asm/byteorder.h>
-#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
-
#if defined(__LITTLE_ENDIAN)
-#define generic_test_le_bit(nr, addr) test_bit(nr, addr)
-#define generic___set_le_bit(nr, addr) __set_bit(nr, addr)
-#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr)
+#define BITOP_LE_SWIZZLE 0
-#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr)
-#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr)
+static inline unsigned long find_next_zero_bit_le(const void *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_zero_bit(addr, size, offset);
+}
-#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr)
-#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr)
+static inline unsigned long find_next_bit_le(const void *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_bit(addr, size, offset);
+}
-#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset)
-#define generic_find_next_le_bit(addr, size, offset) \
- find_next_bit(addr, size, offset)
+static inline unsigned long find_first_zero_bit_le(const void *addr,
+ unsigned long size)
+{
+ return find_first_zero_bit(addr, size);
+}
#elif defined(__BIG_ENDIAN)
-#define generic_test_le_bit(nr, addr) \
- test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-#define generic___set_le_bit(nr, addr) \
- __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-#define generic___clear_le_bit(nr, addr) \
- __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-
-#define generic_test_and_set_le_bit(nr, addr) \
- test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-#define generic_test_and_clear_le_bit(nr, addr) \
- test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-
-#define generic___test_and_set_le_bit(nr, addr) \
- __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-#define generic___test_and_clear_le_bit(nr, addr) \
- __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
-extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
+#ifndef find_next_zero_bit_le
+extern unsigned long find_next_zero_bit_le(const void *addr,
unsigned long size, unsigned long offset);
-extern unsigned long generic_find_next_le_bit(const unsigned long *addr,
+#endif
+
+#ifndef find_next_bit_le
+extern unsigned long find_next_bit_le(const void *addr,
unsigned long size, unsigned long offset);
+#endif
+
+#ifndef find_first_zero_bit_le
+#define find_first_zero_bit_le(addr, size) \
+ find_next_zero_bit_le((addr), (size), 0)
+#endif
#else
#error "Please fix <asm/byteorder.h>"
#endif
-#define generic_find_first_zero_le_bit(addr, size) \
- generic_find_next_zero_le_bit((addr), (size), 0)
+static inline int test_bit_le(int nr, const void *addr)
+{
+ return test_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline void set_bit_le(int nr, void *addr)
+{
+ set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline void clear_bit_le(int nr, void *addr)
+{
+ clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline void __set_bit_le(int nr, void *addr)
+{
+ __set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline void __clear_bit_le(int nr, void *addr)
+{
+ __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline int test_and_set_bit_le(int nr, void *addr)
+{
+ return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline int test_and_clear_bit_le(int nr, void *addr)
+{
+ return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline int __test_and_set_bit_le(int nr, void *addr)
+{
+ return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline int __test_and_clear_bit_le(int nr, void *addr)
+{
+ return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
#endif /* _ASM_GENERIC_BITOPS_LE_H_ */
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index 308a9e22c80..c30266e9480 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -20,7 +20,7 @@
*/
#define clear_bit_unlock(nr, addr) \
do { \
- smp_mb__before_clear_bit(); \
+ smp_mb__before_atomic(); \
clear_bit(nr, addr); \
} while (0)
diff --git a/include/asm-generic/bitops/minix-le.h b/include/asm-generic/bitops/minix-le.h
deleted file mode 100644
index 4a981c1bb1a..00000000000
--- a/include/asm-generic/bitops/minix-le.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _ASM_GENERIC_BITOPS_MINIX_LE_H_
-#define _ASM_GENERIC_BITOPS_MINIX_LE_H_
-
-#include <asm-generic/bitops/le.h>
-
-#define minix_test_and_set_bit(nr,addr) \
- generic___test_and_set_le_bit((nr),(unsigned long *)(addr))
-#define minix_set_bit(nr,addr) \
- generic___set_le_bit((nr),(unsigned long *)(addr))
-#define minix_test_and_clear_bit(nr,addr) \
- generic___test_and_clear_le_bit((nr),(unsigned long *)(addr))
-#define minix_test_bit(nr,addr) \
- generic_test_le_bit((nr),(unsigned long *)(addr))
-#define minix_find_first_zero_bit(addr,size) \
- generic_find_first_zero_le_bit((unsigned long *)(addr),(size))
-
-#endif /* _ASM_GENERIC_BITOPS_MINIX_LE_H_ */
diff --git a/include/asm-generic/bitops/minix.h b/include/asm-generic/bitops/minix.h
deleted file mode 100644
index 91f42e87aa5..00000000000
--- a/include/asm-generic/bitops/minix.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _ASM_GENERIC_BITOPS_MINIX_H_
-#define _ASM_GENERIC_BITOPS_MINIX_H_
-
-#define minix_test_and_set_bit(nr,addr) \
- __test_and_set_bit((nr),(unsigned long *)(addr))
-#define minix_set_bit(nr,addr) \
- __set_bit((nr),(unsigned long *)(addr))
-#define minix_test_and_clear_bit(nr,addr) \
- __test_and_clear_bit((nr),(unsigned long *)(addr))
-#define minix_test_bit(nr,addr) \
- test_bit((nr),(unsigned long *)(addr))
-#define minix_find_first_zero_bit(addr,size) \
- find_first_zero_bit((unsigned long *)(addr),(size))
-
-#endif /* _ASM_GENERIC_BITOPS_MINIX_H_ */
diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h
index 4ae54e07de8..d1d70aa1902 100644
--- a/include/asm-generic/bitsperlong.h
+++ b/include/asm-generic/bitsperlong.h
@@ -1,18 +1,8 @@
#ifndef __ASM_GENERIC_BITS_PER_LONG
#define __ASM_GENERIC_BITS_PER_LONG
-/*
- * There seems to be no way of detecting this automatically from user
- * space, so 64 bit architectures should override this in their
- * bitsperlong.h. In particular, an architecture that supports
- * both 32 and 64 bit user space must not rely on CONFIG_64BIT
- * to decide it, but rather check a compiler provided macro.
- */
-#ifndef __BITS_PER_LONG
-#define __BITS_PER_LONG 32
-#endif
+#include <uapi/asm-generic/bitsperlong.h>
-#ifdef __KERNEL__
#ifdef CONFIG_64BIT
#define BITS_PER_LONG 64
@@ -28,5 +18,8 @@
#error Inconsistent word size. Check asm/bitsperlong.h
#endif
-#endif /* __KERNEL__ */
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG 64
+#endif
+
#endif /* __ASM_GENERIC_BITS_PER_LONG */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index c2c9ba032d4..630dd237223 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -3,10 +3,18 @@
#include <linux/compiler.h>
+#ifdef CONFIG_GENERIC_BUG
+#define BUGFLAG_WARNING (1 << 0)
+#define BUGFLAG_TAINT(taint) (BUGFLAG_WARNING | ((taint) << 8))
+#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
+#endif
+
+#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
+
#ifdef CONFIG_BUG
#ifdef CONFIG_GENERIC_BUG
-#ifndef __ASSEMBLY__
struct bug_entry {
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
unsigned long bug_addr;
@@ -23,12 +31,6 @@ struct bug_entry {
#endif
unsigned short flags;
};
-#endif /* __ASSEMBLY__ */
-
-#define BUGFLAG_WARNING (1 << 0)
-#define BUGFLAG_TAINT(taint) (BUGFLAG_WARNING | ((taint) << 8))
-#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
-
#endif /* CONFIG_GENERIC_BUG */
/*
@@ -50,7 +52,7 @@ struct bug_entry {
#endif
#ifndef HAVE_ARCH_BUG_ON
-#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
+#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
#endif
/*
@@ -60,15 +62,14 @@ struct bug_entry {
* to provide better diagnostics.
*/
#ifndef __WARN_TAINT
-#ifndef __ASSEMBLY__
-extern void warn_slowpath_fmt(const char *file, const int line,
- const char *fmt, ...) __attribute__((format(printf, 3, 4)));
-extern void warn_slowpath_fmt_taint(const char *file, const int line,
- unsigned taint, const char *fmt, ...)
- __attribute__((format(printf, 4, 5)));
+extern __printf(3, 4)
+void warn_slowpath_fmt(const char *file, const int line,
+ const char *fmt, ...);
+extern __printf(4, 5)
+void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint,
+ const char *fmt, ...);
extern void warn_slowpath_null(const char *file, const int line);
#define WANT_WARN_ON_SLOWPATH
-#endif
#define __WARN() warn_slowpath_null(__FILE__, __LINE__)
#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg)
#define __WARN_printf_taint(taint, arg...) \
@@ -105,35 +106,8 @@ extern void warn_slowpath_null(const char *file, const int line);
unlikely(__ret_warn_on); \
})
-#else /* !CONFIG_BUG */
-#ifndef HAVE_ARCH_BUG
-#define BUG() do {} while(0)
-#endif
-
-#ifndef HAVE_ARCH_BUG_ON
-#define BUG_ON(condition) do { if (condition) ; } while(0)
-#endif
-
-#ifndef HAVE_ARCH_WARN_ON
-#define WARN_ON(condition) ({ \
- int __ret_warn_on = !!(condition); \
- unlikely(__ret_warn_on); \
-})
-#endif
-
-#ifndef WARN
-#define WARN(condition, format...) ({ \
- int __ret_warn_on = !!(condition); \
- unlikely(__ret_warn_on); \
-})
-#endif
-
-#define WARN_TAINT(condition, taint, format...) WARN_ON(condition)
-
-#endif
-
#define WARN_ON_ONCE(condition) ({ \
- static bool __warned; \
+ static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
@@ -143,7 +117,7 @@ extern void warn_slowpath_null(const char *file, const int line);
})
#define WARN_ONCE(condition, format...) ({ \
- static bool __warned; \
+ static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
@@ -153,7 +127,7 @@ extern void warn_slowpath_null(const char *file, const int line);
})
#define WARN_TAINT_ONCE(condition, taint, format...) ({ \
- static bool __warned; \
+ static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
@@ -162,13 +136,76 @@ extern void warn_slowpath_null(const char *file, const int line);
unlikely(__ret_warn_once); \
})
-#define WARN_ON_RATELIMIT(condition, state) \
- WARN_ON((condition) && __ratelimit(state))
+#else /* !CONFIG_BUG */
+#ifndef HAVE_ARCH_BUG
+#define BUG() do {} while (1)
+#endif
+
+#ifndef HAVE_ARCH_BUG_ON
+#define BUG_ON(condition) do { if (condition) ; } while (0)
+#endif
+
+#ifndef HAVE_ARCH_WARN_ON
+#define WARN_ON(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#ifndef WARN
+#define WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ no_printk(format); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#define WARN_ON_ONCE(condition) WARN_ON(condition)
+#define WARN_ONCE(condition, format...) WARN(condition, format)
+#define WARN_TAINT(condition, taint, format...) WARN(condition, format)
+#define WARN_TAINT_ONCE(condition, taint, format...) WARN(condition, format)
+
+#endif
+/*
+ * WARN_ON_SMP() is for cases that the warning is either
+ * meaningless for !SMP or may even cause failures.
+ * This is usually used for cases that we have
+ * WARN_ON(!spin_is_locked(&lock)) checks, as spin_is_locked()
+ * returns 0 for uniprocessor settings.
+ * It can also be used with values that are only defined
+ * on SMP:
+ *
+ * struct foo {
+ * [...]
+ * #ifdef CONFIG_SMP
+ * int bar;
+ * #endif
+ * };
+ *
+ * void func(struct foo *zoot)
+ * {
+ * WARN_ON_SMP(!zoot->bar);
+ *
+ * For CONFIG_SMP, WARN_ON_SMP() should act the same as WARN_ON(),
+ * and should be a nop and return false for uniprocessor.
+ *
+ * if (WARN_ON_SMP(x)) returns true only when CONFIG_SMP is set
+ * and x is true.
+ */
#ifdef CONFIG_SMP
# define WARN_ON_SMP(x) WARN_ON(x)
#else
-# define WARN_ON_SMP(x) do { } while (0)
+/*
+ * Use of ({0;}) because WARN_ON_SMP(x) may be used either as
+ * a stand alone line statement or as a condition in an if ()
+ * statement.
+ * A simple "0" would cause gcc to give a "statement has no effect"
+ * warning.
+ */
+# define WARN_ON_SMP(x) ({0;})
#endif
+#endif /* __ASSEMBLY__ */
+
#endif
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index 57b5c3c82e8..87bc536ccde 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -24,7 +24,10 @@
#define flush_cache_vunmap(start, end) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+ do { \
+ memcpy(dst, src, len); \
+ flush_icache_user_range(vma, page, vaddr, len); \
+ } while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
index 4647c762d97..59811df58c5 100644
--- a/include/asm-generic/checksum.h
+++ b/include/asm-generic/checksum.h
@@ -33,15 +33,20 @@ extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *csum_err);
+#ifndef csum_partial_copy_nocheck
#define csum_partial_copy_nocheck(src, dst, len, sum) \
csum_partial_copy((src), (dst), (len), (sum))
+#endif
+#ifndef ip_fast_csum
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+#endif
+#ifndef csum_fold
/*
* Fold a partial checksum
*/
@@ -52,6 +57,7 @@ static inline __sum16 csum_fold(__wsum csum)
sum = (sum & 0xffff) + (sum >> 16);
return (__force __sum16)~sum;
}
+#endif
#ifndef csum_tcpudp_nofold
/*
@@ -63,12 +69,14 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum);
#endif
+#ifndef csum_tcpudp_magic
static inline __sum16
csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
+#endif
/*
* this routine is used for miscellaneous IP-like checksums, mainly
diff --git a/include/asm-generic/clkdev.h b/include/asm-generic/clkdev.h
new file mode 100644
index 00000000000..90a32a61dd2
--- /dev/null
+++ b/include/asm-generic/clkdev.h
@@ -0,0 +1,28 @@
+/*
+ * include/asm-generic/clkdev.h
+ *
+ * Based on the ARM clkdev.h:
+ * Copyright (C) 2008 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Helper for the clk API to assist looking up a struct clk.
+ */
+#ifndef __ASM_CLKDEV_H
+#define __ASM_CLKDEV_H
+
+#include <linux/slab.h>
+
+struct clk;
+
+static inline int __clk_get(struct clk *clk) { return 1; }
+static inline void __clk_put(struct clk *clk) { }
+
+static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
+{
+ return kzalloc(size, GFP_KERNEL);
+}
+
+#endif
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
index 2533fddd34a..70bef78912b 100644
--- a/include/asm-generic/cmpxchg-local.h
+++ b/include/asm-generic/cmpxchg-local.h
@@ -4,7 +4,8 @@
#include <linux/types.h>
#include <linux/irqflags.h>
-extern unsigned long wrong_size_cmpxchg(volatile void *ptr);
+extern unsigned long wrong_size_cmpxchg(volatile void *ptr)
+ __noreturn;
/*
* Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned
@@ -21,7 +22,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
if (size == 8 && sizeof(unsigned long) != 8)
wrong_size_cmpxchg(ptr);
- local_irq_save(flags);
+ raw_local_irq_save(flags);
switch (size) {
case 1: prev = *(u8 *)ptr;
if (prev == old)
@@ -42,7 +43,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
default:
wrong_size_cmpxchg(ptr);
}
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
return prev;
}
@@ -55,11 +56,11 @@ static inline u64 __cmpxchg64_local_generic(volatile void *ptr,
u64 prev;
unsigned long flags;
- local_irq_save(flags);
+ raw_local_irq_save(flags);
prev = *(u64 *)ptr;
if (prev == old)
*(u64 *)ptr = new;
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
return prev;
}
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
index 213ac6e8fe3..811fb1e9b06 100644
--- a/include/asm-generic/cmpxchg.h
+++ b/include/asm-generic/cmpxchg.h
@@ -1,22 +1,108 @@
+/*
+ * Generic UP xchg and cmpxchg using interrupt disablement. Does not
+ * support SMP.
+ */
+
#ifndef __ASM_GENERIC_CMPXCHG_H
#define __ASM_GENERIC_CMPXCHG_H
-/*
- * Generic cmpxchg
- *
- * Uses the local cmpxchg. Does not support SMP.
- */
#ifdef CONFIG_SMP
#error "Cannot use generic cmpxchg on SMP"
#endif
+#include <linux/types.h>
+#include <linux/irqflags.h>
+
+#ifndef xchg
+
+/*
+ * This function doesn't exist, so you'll get a linker error if
+ * something tries to do an invalidly-sized xchg().
+ */
+extern void __xchg_called_with_bad_pointer(void);
+
+static inline
+unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+{
+ unsigned long ret, flags;
+
+ switch (size) {
+ case 1:
+#ifdef __xchg_u8
+ return __xchg_u8(x, ptr);
+#else
+ local_irq_save(flags);
+ ret = *(volatile u8 *)ptr;
+ *(volatile u8 *)ptr = x;
+ local_irq_restore(flags);
+ return ret;
+#endif /* __xchg_u8 */
+
+ case 2:
+#ifdef __xchg_u16
+ return __xchg_u16(x, ptr);
+#else
+ local_irq_save(flags);
+ ret = *(volatile u16 *)ptr;
+ *(volatile u16 *)ptr = x;
+ local_irq_restore(flags);
+ return ret;
+#endif /* __xchg_u16 */
+
+ case 4:
+#ifdef __xchg_u32
+ return __xchg_u32(x, ptr);
+#else
+ local_irq_save(flags);
+ ret = *(volatile u32 *)ptr;
+ *(volatile u32 *)ptr = x;
+ local_irq_restore(flags);
+ return ret;
+#endif /* __xchg_u32 */
+
+#ifdef CONFIG_64BIT
+ case 8:
+#ifdef __xchg_u64
+ return __xchg_u64(x, ptr);
+#else
+ local_irq_save(flags);
+ ret = *(volatile u64 *)ptr;
+ *(volatile u64 *)ptr = x;
+ local_irq_restore(flags);
+ return ret;
+#endif /* __xchg_u64 */
+#endif /* CONFIG_64BIT */
+
+ default:
+ __xchg_called_with_bad_pointer();
+ return x;
+ }
+}
+
+#define xchg(ptr, x) \
+ ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
+
+#endif /* xchg */
+
/*
* Atomic compare and exchange.
*
* Do not define __HAVE_ARCH_CMPXCHG because we want to use it to check whether
* a cmpxchg primitive faster than repeated local irq save/restore exists.
*/
+#include <asm-generic/cmpxchg-local.h>
+
+#ifndef cmpxchg_local
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
+#endif
+
+#ifndef cmpxchg64_local
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#endif
+
#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n))
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
-#endif
+#endif /* __ASM_GENERIC_CMPXCHG_H */
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 2bcc5c7c22a..51969436b8b 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -4,67 +4,12 @@
#include <linux/time.h>
#include <linux/jiffies.h>
-typedef unsigned long cputime_t;
-
-#define cputime_zero (0UL)
-#define cputime_one_jiffy jiffies_to_cputime(1)
-#define cputime_max ((~0UL >> 1) - 1)
-#define cputime_add(__a, __b) ((__a) + (__b))
-#define cputime_sub(__a, __b) ((__a) - (__b))
-#define cputime_div(__a, __n) ((__a) / (__n))
-#define cputime_halve(__a) ((__a) >> 1)
-#define cputime_eq(__a, __b) ((__a) == (__b))
-#define cputime_gt(__a, __b) ((__a) > (__b))
-#define cputime_ge(__a, __b) ((__a) >= (__b))
-#define cputime_lt(__a, __b) ((__a) < (__b))
-#define cputime_le(__a, __b) ((__a) <= (__b))
-#define cputime_to_jiffies(__ct) (__ct)
-#define cputime_to_scaled(__ct) (__ct)
-#define jiffies_to_cputime(__hz) (__hz)
-
-typedef u64 cputime64_t;
-
-#define cputime64_zero (0ULL)
-#define cputime64_add(__a, __b) ((__a) + (__b))
-#define cputime64_sub(__a, __b) ((__a) - (__b))
-#define cputime64_to_jiffies64(__ct) (__ct)
-#define jiffies64_to_cputime64(__jif) (__jif)
-#define cputime_to_cputime64(__ct) ((u64) __ct)
-
-
-/*
- * Convert cputime to microseconds and back.
- */
-#define cputime_to_usecs(__ct) jiffies_to_usecs(__ct);
-#define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs);
-
-/*
- * Convert cputime to seconds and back.
- */
-#define cputime_to_secs(jif) ((jif) / HZ)
-#define secs_to_cputime(sec) ((sec) * HZ)
-
-/*
- * Convert cputime to timespec and back.
- */
-#define timespec_to_cputime(__val) timespec_to_jiffies(__val)
-#define cputime_to_timespec(__ct,__val) jiffies_to_timespec(__ct,__val)
-
-/*
- * Convert cputime to timeval and back.
- */
-#define timeval_to_cputime(__val) timeval_to_jiffies(__val)
-#define cputime_to_timeval(__ct,__val) jiffies_to_timeval(__ct,__val)
-
-/*
- * Convert cputime to clock and back.
- */
-#define cputime_to_clock_t(__ct) jiffies_to_clock_t(__ct)
-#define clock_t_to_cputime(__x) clock_t_to_jiffies(__x)
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+# include <asm-generic/cputime_jiffies.h>
+#endif
-/*
- * Convert cputime64 to clock.
- */
-#define cputime64_to_clock_t(__ct) jiffies_64_to_clock_t(__ct)
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+# include <asm-generic/cputime_nsecs.h>
+#endif
#endif
diff --git a/include/asm-generic/cputime_jiffies.h b/include/asm-generic/cputime_jiffies.h
new file mode 100644
index 00000000000..d5cb78f5398
--- /dev/null
+++ b/include/asm-generic/cputime_jiffies.h
@@ -0,0 +1,74 @@
+#ifndef _ASM_GENERIC_CPUTIME_JIFFIES_H
+#define _ASM_GENERIC_CPUTIME_JIFFIES_H
+
+typedef unsigned long __nocast cputime_t;
+
+#define cputime_one_jiffy jiffies_to_cputime(1)
+#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
+#define cputime_to_scaled(__ct) (__ct)
+#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz)
+
+typedef u64 __nocast cputime64_t;
+
+#define cputime64_to_jiffies64(__ct) (__force u64)(__ct)
+#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif)
+
+
+/*
+ * Convert nanoseconds <-> cputime
+ */
+#define cputime_to_nsecs(__ct) \
+ jiffies_to_nsecs(cputime_to_jiffies(__ct))
+#define nsecs_to_cputime64(__nsec) \
+ jiffies64_to_cputime64(nsecs_to_jiffies64(__nsec))
+#define nsecs_to_cputime(__nsec) \
+ jiffies_to_cputime(nsecs_to_jiffies(__nsec))
+
+
+/*
+ * Convert cputime to microseconds and back.
+ */
+#define cputime_to_usecs(__ct) \
+ jiffies_to_usecs(cputime_to_jiffies(__ct))
+#define usecs_to_cputime(__usec) \
+ jiffies_to_cputime(usecs_to_jiffies(__usec))
+#define usecs_to_cputime64(__usec) \
+ jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
+
+/*
+ * Convert cputime to seconds and back.
+ */
+#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ)
+#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ)
+
+/*
+ * Convert cputime to timespec and back.
+ */
+#define timespec_to_cputime(__val) \
+ jiffies_to_cputime(timespec_to_jiffies(__val))
+#define cputime_to_timespec(__ct,__val) \
+ jiffies_to_timespec(cputime_to_jiffies(__ct),__val)
+
+/*
+ * Convert cputime to timeval and back.
+ */
+#define timeval_to_cputime(__val) \
+ jiffies_to_cputime(timeval_to_jiffies(__val))
+#define cputime_to_timeval(__ct,__val) \
+ jiffies_to_timeval(cputime_to_jiffies(__ct),__val)
+
+/*
+ * Convert cputime to clock and back.
+ */
+#define cputime_to_clock_t(__ct) \
+ jiffies_to_clock_t(cputime_to_jiffies(__ct))
+#define clock_t_to_cputime(__x) \
+ jiffies_to_cputime(clock_t_to_jiffies(__x))
+
+/*
+ * Convert cputime64 to clock.
+ */
+#define cputime64_to_clock_t(__ct) \
+ jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct))
+
+#endif
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
new file mode 100644
index 00000000000..4e817606c54
--- /dev/null
+++ b/include/asm-generic/cputime_nsecs.h
@@ -0,0 +1,117 @@
+/*
+ * Definitions for measuring cputime in nsecs resolution.
+ *
+ * Based on <arch/ia64/include/asm/cputime.h>
+ *
+ * Copyright (C) 2007 FUJITSU LIMITED
+ * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _ASM_GENERIC_CPUTIME_NSECS_H
+#define _ASM_GENERIC_CPUTIME_NSECS_H
+
+#include <linux/math64.h>
+
+typedef u64 __nocast cputime_t;
+typedef u64 __nocast cputime64_t;
+
+#define cputime_one_jiffy jiffies_to_cputime(1)
+
+#define cputime_div(__ct, divisor) div_u64((__force u64)__ct, divisor)
+#define cputime_div_rem(__ct, divisor, remainder) \
+ div_u64_rem((__force u64)__ct, divisor, remainder);
+
+/*
+ * Convert cputime <-> jiffies (HZ)
+ */
+#define cputime_to_jiffies(__ct) \
+ cputime_div(__ct, NSEC_PER_SEC / HZ)
+#define cputime_to_scaled(__ct) (__ct)
+#define jiffies_to_cputime(__jif) \
+ (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
+#define cputime64_to_jiffies64(__ct) \
+ cputime_div(__ct, NSEC_PER_SEC / HZ)
+#define jiffies64_to_cputime64(__jif) \
+ (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ))
+
+
+/*
+ * Convert cputime <-> nanoseconds
+ */
+#define cputime_to_nsecs(__ct) \
+ (__force u64)(__ct)
+#define nsecs_to_cputime(__nsecs) \
+ (__force cputime_t)(__nsecs)
+
+
+/*
+ * Convert cputime <-> microseconds
+ */
+#define cputime_to_usecs(__ct) \
+ cputime_div(__ct, NSEC_PER_USEC)
+#define usecs_to_cputime(__usecs) \
+ (__force cputime_t)((__usecs) * NSEC_PER_USEC)
+#define usecs_to_cputime64(__usecs) \
+ (__force cputime64_t)((__usecs) * NSEC_PER_USEC)
+
+/*
+ * Convert cputime <-> seconds
+ */
+#define cputime_to_secs(__ct) \
+ cputime_div(__ct, NSEC_PER_SEC)
+#define secs_to_cputime(__secs) \
+ (__force cputime_t)((__secs) * NSEC_PER_SEC)
+
+/*
+ * Convert cputime <-> timespec (nsec)
+ */
+static inline cputime_t timespec_to_cputime(const struct timespec *val)
+{
+ u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
+ return (__force cputime_t) ret;
+}
+static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
+{
+ u32 rem;
+
+ val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem);
+ val->tv_nsec = rem;
+}
+
+/*
+ * Convert cputime <-> timeval (msec)
+ */
+static inline cputime_t timeval_to_cputime(const struct timeval *val)
+{
+ u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
+ return (__force cputime_t) ret;
+}
+static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
+{
+ u32 rem;
+
+ val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem);
+ val->tv_usec = rem / NSEC_PER_USEC;
+}
+
+/*
+ * Convert cputime <-> clock (USER_HZ)
+ */
+#define cputime_to_clock_t(__ct) \
+ cputime_div(__ct, (NSEC_PER_SEC / USER_HZ))
+#define clock_t_to_cputime(__x) \
+ (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ))
+
+/*
+ * Convert cputime64 to clock.
+ */
+#define cputime64_to_clock_t(__ct) \
+ cputime_to_clock_t((__force cputime_t)__ct)
+
+#endif
diff --git a/include/asm-generic/delay.h b/include/asm-generic/delay.h
index 4586fec75dd..0f79054ce7c 100644
--- a/include/asm-generic/delay.h
+++ b/include/asm-generic/delay.h
@@ -1,9 +1,44 @@
#ifndef __ASM_GENERIC_DELAY_H
#define __ASM_GENERIC_DELAY_H
+/* Undefined functions to get compile-time errors */
+extern void __bad_udelay(void);
+extern void __bad_ndelay(void);
+
extern void __udelay(unsigned long usecs);
+extern void __ndelay(unsigned long nsecs);
+extern void __const_udelay(unsigned long xloops);
extern void __delay(unsigned long loops);
-#define udelay(n) __udelay(n)
+/*
+ * The weird n/20000 thing suppresses a "comparison is always false due to
+ * limited range of data type" warning with non-const 8-bit arguments.
+ */
+
+/* 0x10c7 is 2**32 / 1000000 (rounded up) */
+#define udelay(n) \
+ ({ \
+ if (__builtin_constant_p(n)) { \
+ if ((n) / 20000 >= 1) \
+ __bad_udelay(); \
+ else \
+ __const_udelay((n) * 0x10c7ul); \
+ } else { \
+ __udelay(n); \
+ } \
+ })
+
+/* 0x5 is 2**32 / 1000000000 (rounded up) */
+#define ndelay(n) \
+ ({ \
+ if (__builtin_constant_p(n)) { \
+ if ((n) / 20000 >= 1) \
+ __bad_ndelay(); \
+ else \
+ __const_udelay((n) * 5ul); \
+ } else { \
+ __ndelay(n); \
+ } \
+ })
#endif /* __ASM_GENERIC_DELAY_H */
diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h
index 85a3ffaa024..0297e587579 100644
--- a/include/asm-generic/dma-coherent.h
+++ b/include/asm-generic/dma-coherent.h
@@ -3,30 +3,30 @@
#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
/*
- * These two functions are only for dma allocator.
+ * These three functions are only for dma allocator.
* Don't use them in device drivers.
*/
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret);
int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, size_t size, int *ret);
/*
* Standard interface
*/
#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
-extern int
-dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
- dma_addr_t device_addr, size_t size, int flags);
+int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+ dma_addr_t device_addr, size_t size, int flags);
-extern void
-dma_release_declared_memory(struct device *dev);
+void dma_release_declared_memory(struct device *dev);
-extern void *
-dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size);
+void *dma_mark_declared_memory_occupied(struct device *dev,
+ dma_addr_t device_addr, size_t size);
#else
#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
#define dma_release_from_coherent(dev, order, vaddr) (0)
+#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
#endif
#endif
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h
index ccf7b4f34a3..6c32af918c2 100644
--- a/include/asm-generic/dma-mapping-broken.h
+++ b/include/asm-generic/dma-mapping-broken.h
@@ -16,6 +16,22 @@ extern void
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle);
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
+{
+ /* attrs is not supported and ignored */
+ return dma_alloc_coherent(dev, size, dma_handle, flag);
+}
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ /* attrs is not supported and ignored */
+ dma_free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index 0c80bb38773..de8bf89940f 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -2,6 +2,7 @@
#define _ASM_GENERIC_DMA_MAPPING_H
#include <linux/kmemcheck.h>
+#include <linux/bug.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/dma-attrs.h>
@@ -123,7 +124,12 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
size_t size,
enum dma_data_direction dir)
{
- dma_sync_single_for_cpu(dev, addr + offset, size, dir);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_for_cpu)
+ ops->sync_single_for_cpu(dev, addr + offset, size, dir);
+ debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
}
static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -132,7 +138,12 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
size_t size,
enum dma_data_direction dir)
{
- dma_sync_single_for_device(dev, addr + offset, size, dir);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_for_device)
+ ops->sync_single_for_device(dev, addr + offset, size, dir);
+ debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
}
static inline void
@@ -165,4 +176,59 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+/**
+ * dma_mmap_attrs - map a coherent DMA allocation into user space
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @vma: vm_area_struct describing requested user mapping
+ * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
+ * @handle: device-view address returned from dma_alloc_attrs
+ * @size: size of memory originally requested in dma_alloc_attrs
+ * @attrs: attributes of mapping properties requested in dma_alloc_attrs
+ *
+ * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
+ * into user space. The coherent DMA buffer must not be freed by the
+ * driver until the user space mapping has been released.
+ */
+static inline int
+dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+ if (ops->mmap)
+ return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
+
+static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
+
+int
+dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+static inline int
+dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+ if (ops->get_sgtable)
+ return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
+ attrs);
+ return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
+}
+
+#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
+
#endif
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
new file mode 100644
index 00000000000..a5de55c04fb
--- /dev/null
+++ b/include/asm-generic/early_ioremap.h
@@ -0,0 +1,42 @@
+#ifndef _ASM_EARLY_IOREMAP_H_
+#define _ASM_EARLY_IOREMAP_H_
+
+#include <linux/types.h>
+
+/*
+ * early_ioremap() and early_iounmap() are for temporary early boot-time
+ * mappings, before the real ioremap() is functional.
+ */
+extern void __iomem *early_ioremap(resource_size_t phys_addr,
+ unsigned long size);
+extern void *early_memremap(resource_size_t phys_addr,
+ unsigned long size);
+extern void early_iounmap(void __iomem *addr, unsigned long size);
+extern void early_memunmap(void *addr, unsigned long size);
+
+/*
+ * Weak function called by early_ioremap_reset(). It does nothing, but
+ * architectures may provide their own version to do any needed cleanups.
+ */
+extern void early_ioremap_shutdown(void);
+
+#if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU)
+/* Arch-specific initialization */
+extern void early_ioremap_init(void);
+
+/* Generic initialization called by architecture code */
+extern void early_ioremap_setup(void);
+
+/*
+ * Called as last step in paging_init() so library can act
+ * accordingly for subsequent map/unmap requests.
+ */
+extern void early_ioremap_reset(void);
+
+#else
+static inline void early_ioremap_init(void) { }
+static inline void early_ioremap_setup(void) { }
+static inline void early_ioremap_reset(void) { }
+#endif
+
+#endif /* _ASM_EARLY_IOREMAP_H_ */
diff --git a/include/asm-generic/errno-base.h b/include/asm-generic/errno-base.h
deleted file mode 100644
index 65115978510..00000000000
--- a/include/asm-generic/errno-base.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef _ASM_GENERIC_ERRNO_BASE_H
-#define _ASM_GENERIC_ERRNO_BASE_H
-
-#define EPERM 1 /* Operation not permitted */
-#define ENOENT 2 /* No such file or directory */
-#define ESRCH 3 /* No such process */
-#define EINTR 4 /* Interrupted system call */
-#define EIO 5 /* I/O error */
-#define ENXIO 6 /* No such device or address */
-#define E2BIG 7 /* Argument list too long */
-#define ENOEXEC 8 /* Exec format error */
-#define EBADF 9 /* Bad file number */
-#define ECHILD 10 /* No child processes */
-#define EAGAIN 11 /* Try again */
-#define ENOMEM 12 /* Out of memory */
-#define EACCES 13 /* Permission denied */
-#define EFAULT 14 /* Bad address */
-#define ENOTBLK 15 /* Block device required */
-#define EBUSY 16 /* Device or resource busy */
-#define EEXIST 17 /* File exists */
-#define EXDEV 18 /* Cross-device link */
-#define ENODEV 19 /* No such device */
-#define ENOTDIR 20 /* Not a directory */
-#define EISDIR 21 /* Is a directory */
-#define EINVAL 22 /* Invalid argument */
-#define ENFILE 23 /* File table overflow */
-#define EMFILE 24 /* Too many open files */
-#define ENOTTY 25 /* Not a typewriter */
-#define ETXTBSY 26 /* Text file busy */
-#define EFBIG 27 /* File too large */
-#define ENOSPC 28 /* No space left on device */
-#define ESPIPE 29 /* Illegal seek */
-#define EROFS 30 /* Read-only file system */
-#define EMLINK 31 /* Too many links */
-#define EPIPE 32 /* Broken pipe */
-#define EDOM 33 /* Math argument out of domain of func */
-#define ERANGE 34 /* Math result not representable */
-
-#endif
diff --git a/include/asm-generic/errno.h b/include/asm-generic/errno.h
deleted file mode 100644
index 28cc03bf19e..00000000000
--- a/include/asm-generic/errno.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#ifndef _ASM_GENERIC_ERRNO_H
-#define _ASM_GENERIC_ERRNO_H
-
-#include <asm-generic/errno-base.h>
-
-#define EDEADLK 35 /* Resource deadlock would occur */
-#define ENAMETOOLONG 36 /* File name too long */
-#define ENOLCK 37 /* No record locks available */
-#define ENOSYS 38 /* Function not implemented */
-#define ENOTEMPTY 39 /* Directory not empty */
-#define ELOOP 40 /* Too many symbolic links encountered */
-#define EWOULDBLOCK EAGAIN /* Operation would block */
-#define ENOMSG 42 /* No message of desired type */
-#define EIDRM 43 /* Identifier removed */
-#define ECHRNG 44 /* Channel number out of range */
-#define EL2NSYNC 45 /* Level 2 not synchronized */
-#define EL3HLT 46 /* Level 3 halted */
-#define EL3RST 47 /* Level 3 reset */
-#define ELNRNG 48 /* Link number out of range */
-#define EUNATCH 49 /* Protocol driver not attached */
-#define ENOCSI 50 /* No CSI structure available */
-#define EL2HLT 51 /* Level 2 halted */
-#define EBADE 52 /* Invalid exchange */
-#define EBADR 53 /* Invalid request descriptor */
-#define EXFULL 54 /* Exchange full */
-#define ENOANO 55 /* No anode */
-#define EBADRQC 56 /* Invalid request code */
-#define EBADSLT 57 /* Invalid slot */
-
-#define EDEADLOCK EDEADLK
-
-#define EBFONT 59 /* Bad font file format */
-#define ENOSTR 60 /* Device not a stream */
-#define ENODATA 61 /* No data available */
-#define ETIME 62 /* Timer expired */
-#define ENOSR 63 /* Out of streams resources */
-#define ENONET 64 /* Machine is not on the network */
-#define ENOPKG 65 /* Package not installed */
-#define EREMOTE 66 /* Object is remote */
-#define ENOLINK 67 /* Link has been severed */
-#define EADV 68 /* Advertise error */
-#define ESRMNT 69 /* Srmount error */
-#define ECOMM 70 /* Communication error on send */
-#define EPROTO 71 /* Protocol error */
-#define EMULTIHOP 72 /* Multihop attempted */
-#define EDOTDOT 73 /* RFS specific error */
-#define EBADMSG 74 /* Not a data message */
-#define EOVERFLOW 75 /* Value too large for defined data type */
-#define ENOTUNIQ 76 /* Name not unique on network */
-#define EBADFD 77 /* File descriptor in bad state */
-#define EREMCHG 78 /* Remote address changed */
-#define ELIBACC 79 /* Can not access a needed shared library */
-#define ELIBBAD 80 /* Accessing a corrupted shared library */
-#define ELIBSCN 81 /* .lib section in a.out corrupted */
-#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
-#define ELIBEXEC 83 /* Cannot exec a shared library directly */
-#define EILSEQ 84 /* Illegal byte sequence */
-#define ERESTART 85 /* Interrupted system call should be restarted */
-#define ESTRPIPE 86 /* Streams pipe error */
-#define EUSERS 87 /* Too many users */
-#define ENOTSOCK 88 /* Socket operation on non-socket */
-#define EDESTADDRREQ 89 /* Destination address required */
-#define EMSGSIZE 90 /* Message too long */
-#define EPROTOTYPE 91 /* Protocol wrong type for socket */
-#define ENOPROTOOPT 92 /* Protocol not available */
-#define EPROTONOSUPPORT 93 /* Protocol not supported */
-#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
-#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
-#define EPFNOSUPPORT 96 /* Protocol family not supported */
-#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
-#define EADDRINUSE 98 /* Address already in use */
-#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
-#define ENETDOWN 100 /* Network is down */
-#define ENETUNREACH 101 /* Network is unreachable */
-#define ENETRESET 102 /* Network dropped connection because of reset */
-#define ECONNABORTED 103 /* Software caused connection abort */
-#define ECONNRESET 104 /* Connection reset by peer */
-#define ENOBUFS 105 /* No buffer space available */
-#define EISCONN 106 /* Transport endpoint is already connected */
-#define ENOTCONN 107 /* Transport endpoint is not connected */
-#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
-#define ETOOMANYREFS 109 /* Too many references: cannot splice */
-#define ETIMEDOUT 110 /* Connection timed out */
-#define ECONNREFUSED 111 /* Connection refused */
-#define EHOSTDOWN 112 /* Host is down */
-#define EHOSTUNREACH 113 /* No route to host */
-#define EALREADY 114 /* Operation already in progress */
-#define EINPROGRESS 115 /* Operation now in progress */
-#define ESTALE 116 /* Stale NFS file handle */
-#define EUCLEAN 117 /* Structure needs cleaning */
-#define ENOTNAM 118 /* Not a XENIX named type file */
-#define ENAVAIL 119 /* No XENIX semaphores available */
-#define EISNAM 120 /* Is a named type file */
-#define EREMOTEIO 121 /* Remote I/O error */
-#define EDQUOT 122 /* Quota exceeded */
-
-#define ENOMEDIUM 123 /* No medium found */
-#define EMEDIUMTYPE 124 /* Wrong medium type */
-#define ECANCELED 125 /* Operation Canceled */
-#define ENOKEY 126 /* Required key not available */
-#define EKEYEXPIRED 127 /* Key has expired */
-#define EKEYREVOKED 128 /* Key has been revoked */
-#define EKEYREJECTED 129 /* Key was rejected by service */
-
-/* for robust mutexes */
-#define EOWNERDEAD 130 /* Owner died */
-#define ENOTRECOVERABLE 131 /* State not recoverable */
-
-#define ERFKILL 132 /* Operation not possible due to RF-kill */
-
-#endif
diff --git a/include/asm-generic/exec.h b/include/asm-generic/exec.h
new file mode 100644
index 00000000000..567766b0074
--- /dev/null
+++ b/include/asm-generic/exec.h
@@ -0,0 +1,19 @@
+/* Generic process execution definitions, based on MN10300 definitions.
+ *
+ * It should be possible to use these on really simple architectures,
+ * but it serves more as a starting point for new ports.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef __ASM_GENERIC_EXEC_H
+#define __ASM_GENERIC_EXEC_H
+
+#define arch_align_stack(x) (x)
+
+#endif /* __ASM_GENERIC_EXEC_H */
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h
deleted file mode 100644
index 0fc16e3f0bf..00000000000
--- a/include/asm-generic/fcntl.h
+++ /dev/null
@@ -1,196 +0,0 @@
-#ifndef _ASM_GENERIC_FCNTL_H
-#define _ASM_GENERIC_FCNTL_H
-
-#include <linux/types.h>
-
-/*
- * FMODE_EXEC is 0x20
- * FMODE_NONOTIFY is 0x1000000
- * These cannot be used by userspace O_* until internal and external open
- * flags are split.
- * -Eric Paris
- */
-
-/*
- * When introducing new O_* bits, please check its uniqueness in fcntl_init().
- */
-
-#define O_ACCMODE 00000003
-#define O_RDONLY 00000000
-#define O_WRONLY 00000001
-#define O_RDWR 00000002
-#ifndef O_CREAT
-#define O_CREAT 00000100 /* not fcntl */
-#endif
-#ifndef O_EXCL
-#define O_EXCL 00000200 /* not fcntl */
-#endif
-#ifndef O_NOCTTY
-#define O_NOCTTY 00000400 /* not fcntl */
-#endif
-#ifndef O_TRUNC
-#define O_TRUNC 00001000 /* not fcntl */
-#endif
-#ifndef O_APPEND
-#define O_APPEND 00002000
-#endif
-#ifndef O_NONBLOCK
-#define O_NONBLOCK 00004000
-#endif
-#ifndef O_DSYNC
-#define O_DSYNC 00010000 /* used to be O_SYNC, see below */
-#endif
-#ifndef FASYNC
-#define FASYNC 00020000 /* fcntl, for BSD compatibility */
-#endif
-#ifndef O_DIRECT
-#define O_DIRECT 00040000 /* direct disk access hint */
-#endif
-#ifndef O_LARGEFILE
-#define O_LARGEFILE 00100000
-#endif
-#ifndef O_DIRECTORY
-#define O_DIRECTORY 00200000 /* must be a directory */
-#endif
-#ifndef O_NOFOLLOW
-#define O_NOFOLLOW 00400000 /* don't follow links */
-#endif
-#ifndef O_NOATIME
-#define O_NOATIME 01000000
-#endif
-#ifndef O_CLOEXEC
-#define O_CLOEXEC 02000000 /* set close_on_exec */
-#endif
-
-/*
- * Before Linux 2.6.33 only O_DSYNC semantics were implemented, but using
- * the O_SYNC flag. We continue to use the existing numerical value
- * for O_DSYNC semantics now, but using the correct symbolic name for it.
- * This new value is used to request true Posix O_SYNC semantics. It is
- * defined in this strange way to make sure applications compiled against
- * new headers get at least O_DSYNC semantics on older kernels.
- *
- * This has the nice side-effect that we can simply test for O_DSYNC
- * wherever we do not care if O_DSYNC or O_SYNC is used.
- *
- * Note: __O_SYNC must never be used directly.
- */
-#ifndef O_SYNC
-#define __O_SYNC 04000000
-#define O_SYNC (__O_SYNC|O_DSYNC)
-#endif
-
-#ifndef O_NDELAY
-#define O_NDELAY O_NONBLOCK
-#endif
-
-#define F_DUPFD 0 /* dup */
-#define F_GETFD 1 /* get close_on_exec */
-#define F_SETFD 2 /* set/clear close_on_exec */
-#define F_GETFL 3 /* get file->f_flags */
-#define F_SETFL 4 /* set file->f_flags */
-#ifndef F_GETLK
-#define F_GETLK 5
-#define F_SETLK 6
-#define F_SETLKW 7
-#endif
-#ifndef F_SETOWN
-#define F_SETOWN 8 /* for sockets. */
-#define F_GETOWN 9 /* for sockets. */
-#endif
-#ifndef F_SETSIG
-#define F_SETSIG 10 /* for sockets. */
-#define F_GETSIG 11 /* for sockets. */
-#endif
-
-#ifndef CONFIG_64BIT
-#ifndef F_GETLK64
-#define F_GETLK64 12 /* using 'struct flock64' */
-#define F_SETLK64 13
-#define F_SETLKW64 14
-#endif
-#endif
-
-#ifndef F_SETOWN_EX
-#define F_SETOWN_EX 15
-#define F_GETOWN_EX 16
-#endif
-
-#define F_OWNER_TID 0
-#define F_OWNER_PID 1
-#define F_OWNER_PGRP 2
-
-struct f_owner_ex {
- int type;
- __kernel_pid_t pid;
-};
-
-/* for F_[GET|SET]FL */
-#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
-
-/* for posix fcntl() and lockf() */
-#ifndef F_RDLCK
-#define F_RDLCK 0
-#define F_WRLCK 1
-#define F_UNLCK 2
-#endif
-
-/* for old implementation of bsd flock () */
-#ifndef F_EXLCK
-#define F_EXLCK 4 /* or 3 */
-#define F_SHLCK 8 /* or 4 */
-#endif
-
-/* for leases */
-#ifndef F_INPROGRESS
-#define F_INPROGRESS 16
-#endif
-
-/* operations for bsd flock(), also used by the kernel implementation */
-#define LOCK_SH 1 /* shared lock */
-#define LOCK_EX 2 /* exclusive lock */
-#define LOCK_NB 4 /* or'd with one of the above to prevent
- blocking */
-#define LOCK_UN 8 /* remove lock */
-
-#define LOCK_MAND 32 /* This is a mandatory flock ... */
-#define LOCK_READ 64 /* which allows concurrent read operations */
-#define LOCK_WRITE 128 /* which allows concurrent write operations */
-#define LOCK_RW 192 /* which allows concurrent read & write ops */
-
-#define F_LINUX_SPECIFIC_BASE 1024
-
-#ifndef HAVE_ARCH_STRUCT_FLOCK
-#ifndef __ARCH_FLOCK_PAD
-#define __ARCH_FLOCK_PAD
-#endif
-
-struct flock {
- short l_type;
- short l_whence;
- __kernel_off_t l_start;
- __kernel_off_t l_len;
- __kernel_pid_t l_pid;
- __ARCH_FLOCK_PAD
-};
-#endif
-
-#ifndef CONFIG_64BIT
-
-#ifndef HAVE_ARCH_STRUCT_FLOCK64
-#ifndef __ARCH_FLOCK64_PAD
-#define __ARCH_FLOCK64_PAD
-#endif
-
-struct flock64 {
- short l_type;
- short l_whence;
- __kernel_loff_t l_start;
- __kernel_loff_t l_len;
- __kernel_pid_t l_pid;
- __ARCH_FLOCK64_PAD
-};
-#endif
-#endif /* !CONFIG_64BIT */
-
-#endif /* _ASM_GENERIC_FCNTL_H */
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
new file mode 100644
index 00000000000..f23174fb9ec
--- /dev/null
+++ b/include/asm-generic/fixmap.h
@@ -0,0 +1,100 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009
+ * Break out common bits to asm-generic by Mark Salter, November 2013
+ */
+
+#ifndef __ASM_GENERIC_FIXMAP_H
+#define __ASM_GENERIC_FIXMAP_H
+
+#include <linux/bug.h>
+
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
+
+#ifndef __ASSEMBLY__
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without translation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
+{
+ BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
+ return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+ return __virt_to_fix(vaddr);
+}
+
+/*
+ * Provide some reasonable defaults for page flags.
+ * Not all architectures use all of these different types and some
+ * architectures use different names.
+ */
+#ifndef FIXMAP_PAGE_NORMAL
+#define FIXMAP_PAGE_NORMAL PAGE_KERNEL
+#endif
+#ifndef FIXMAP_PAGE_NOCACHE
+#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE
+#endif
+#ifndef FIXMAP_PAGE_IO
+#define FIXMAP_PAGE_IO PAGE_KERNEL_IO
+#endif
+#ifndef FIXMAP_PAGE_CLEAR
+#define FIXMAP_PAGE_CLEAR __pgprot(0)
+#endif
+
+#ifndef set_fixmap
+#define set_fixmap(idx, phys) \
+ __set_fixmap(idx, phys, FIXMAP_PAGE_NORMAL)
+#endif
+
+#ifndef clear_fixmap
+#define clear_fixmap(idx) \
+ __set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR)
+#endif
+
+/* Return a pointer with offset calculated */
+#define __set_fixmap_offset(idx, phys, flags) \
+({ \
+ unsigned long addr; \
+ __set_fixmap(idx, phys, flags); \
+ addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
+ addr; \
+})
+
+#define set_fixmap_offset(idx, phys) \
+ __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NORMAL)
+
+/*
+ * Some hardware wants to get fixmapped without caching.
+ */
+#define set_fixmap_nocache(idx, phys) \
+ __set_fixmap(idx, phys, FIXMAP_PAGE_NOCACHE)
+
+#define set_fixmap_offset_nocache(idx, phys) \
+ __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NOCACHE)
+
+/*
+ * Some fixmaps are for IO
+ */
+#define set_fixmap_io(idx, phys) \
+ __set_fixmap(idx, phys, FIXMAP_PAGE_IO)
+
+#define set_fixmap_offset_io(idx, phys) \
+ __set_fixmap_offset(idx, phys, FIXMAP_PAGE_IO)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_GENERIC_FIXMAP_H */
diff --git a/include/asm-generic/ftrace.h b/include/asm-generic/ftrace.h
new file mode 100644
index 00000000000..51abba9ea7a
--- /dev/null
+++ b/include/asm-generic/ftrace.h
@@ -0,0 +1,16 @@
+/*
+ * linux/include/asm-generic/ftrace.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_GENERIC_FTRACE_H__
+#define __ASM_GENERIC_FTRACE_H__
+
+/*
+ * Not all architectures need their own ftrace.h, the most
+ * common definitions are already in linux/ftrace.h.
+ */
+
+#endif /* __ASM_GENERIC_FTRACE_H__ */
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index 3c2344f4813..01f227e1425 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -6,7 +6,7 @@
#include <asm/errno.h>
static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -16,7 +16,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
+ if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
pagefault_disable();
@@ -48,7 +48,8 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
}
static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
{
return -ENOSYS;
}
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
index 67e7245dc9b..65e4468ac53 100644
--- a/include/asm-generic/getorder.h
+++ b/include/asm-generic/getorder.h
@@ -4,21 +4,58 @@
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
+#include <linux/log2.h>
-/* Pure 2^n version of get_order */
-static inline __attribute_const__ int get_order(unsigned long size)
+/*
+ * Runtime evaluation of get_order()
+ */
+static inline __attribute_const__
+int __get_order(unsigned long size)
{
int order;
- size = (size - 1) >> (PAGE_SHIFT - 1);
- order = -1;
- do {
- size >>= 1;
- order++;
- } while (size);
+ size--;
+ size >>= PAGE_SHIFT;
+#if BITS_PER_LONG == 32
+ order = fls(size);
+#else
+ order = fls64(size);
+#endif
return order;
}
+/**
+ * get_order - Determine the allocation order of a memory size
+ * @size: The size for which to get the order
+ *
+ * Determine the allocation order of a particular sized block of memory. This
+ * is on a logarithmic scale, where:
+ *
+ * 0 -> 2^0 * PAGE_SIZE and below
+ * 1 -> 2^1 * PAGE_SIZE to 2^0 * PAGE_SIZE + 1
+ * 2 -> 2^2 * PAGE_SIZE to 2^1 * PAGE_SIZE + 1
+ * 3 -> 2^3 * PAGE_SIZE to 2^2 * PAGE_SIZE + 1
+ * 4 -> 2^4 * PAGE_SIZE to 2^3 * PAGE_SIZE + 1
+ * ...
+ *
+ * The order returned is used to find the smallest allocation granule required
+ * to hold an object of the specified size.
+ *
+ * The result is undefined if the size is 0.
+ *
+ * This function may be used to initialise variables with compile time
+ * evaluations of constants.
+ */
+#define get_order(n) \
+( \
+ __builtin_constant_p(n) ? ( \
+ ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
+ (((n) < (1UL << PAGE_SHIFT)) ? 0 : \
+ ilog2((n) - 1) - PAGE_SHIFT + 1) \
+ ) : \
+ __get_order(n) \
+)
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_GENERIC_GETORDER_H */
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index ff5c66080c8..23e364538ab 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -4,10 +4,14 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinctrl.h>
#ifdef CONFIG_GPIOLIB
#include <linux/compiler.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
/* Platforms may implement their GPIO interface with library code,
* at a small performance cost for non-inlined operations and some
@@ -35,114 +39,23 @@
* platform data and other tables.
*/
-static inline int gpio_is_valid(int number)
+static inline bool gpio_is_valid(int number)
{
- return ((unsigned)number) < ARCH_NR_GPIOS;
+ return number >= 0 && number < ARCH_NR_GPIOS;
}
struct device;
+struct gpio;
struct seq_file;
struct module;
struct device_node;
+struct gpio_desc;
-/**
- * struct gpio_chip - abstract a GPIO controller
- * @label: for diagnostics
- * @dev: optional device providing the GPIOs
- * @owner: helps prevent removal of modules exporting active GPIOs
- * @request: optional hook for chip-specific activation, such as
- * enabling module power and clock; may sleep
- * @free: optional hook for chip-specific deactivation, such as
- * disabling module power and clock; may sleep
- * @direction_input: configures signal "offset" as input, or returns error
- * @get: returns value for signal "offset"; for output signals this
- * returns either the value actually sensed, or zero
- * @direction_output: configures signal "offset" as output, or returns error
- * @set: assigns output value for signal "offset"
- * @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
- * implementation may not sleep
- * @dbg_show: optional routine to show contents in debugfs; default code
- * will be used when this is omitted, but custom code can show extra
- * state (such as pullup/pulldown configuration).
- * @base: identifies the first GPIO number handled by this chip; or, if
- * negative during registration, requests dynamic ID allocation.
- * @ngpio: the number of GPIOs handled by this controller; the last GPIO
- * handled is (base + ngpio - 1).
- * @can_sleep: flag must be set iff get()/set() methods sleep, as they
- * must while accessing GPIO expander chips over I2C or SPI
- * @names: if set, must be an array of strings to use as alternative
- * names for the GPIOs in this chip. Any entry in the array
- * may be NULL if there is no alias for the GPIO, however the
- * array must be @ngpio entries long. A name can include a single printk
- * format specifier for an unsigned int. It is substituted by the actual
- * number of the gpio.
- *
- * A gpio_chip can help platforms abstract various sources of GPIOs so
- * they can all be accessed through a common programing interface.
- * Example sources would be SOC controllers, FPGAs, multifunction
- * chips, dedicated GPIO expanders, and so on.
- *
- * Each chip controls a number of signals, identified in method calls
- * by "offset" values in the range 0..(@ngpio - 1). When those signals
- * are referenced through calls like gpio_get_value(gpio), the offset
- * is calculated by subtracting @base from the gpio number.
- */
-struct gpio_chip {
- const char *label;
- struct device *dev;
- struct module *owner;
-
- int (*request)(struct gpio_chip *chip,
- unsigned offset);
- void (*free)(struct gpio_chip *chip,
- unsigned offset);
-
- int (*direction_input)(struct gpio_chip *chip,
- unsigned offset);
- int (*get)(struct gpio_chip *chip,
- unsigned offset);
- int (*direction_output)(struct gpio_chip *chip,
- unsigned offset, int value);
- int (*set_debounce)(struct gpio_chip *chip,
- unsigned offset, unsigned debounce);
-
- void (*set)(struct gpio_chip *chip,
- unsigned offset, int value);
-
- int (*to_irq)(struct gpio_chip *chip,
- unsigned offset);
-
- void (*dbg_show)(struct seq_file *s,
- struct gpio_chip *chip);
- int base;
- u16 ngpio;
- const char *const *names;
- unsigned can_sleep:1;
- unsigned exported:1;
-
-#if defined(CONFIG_OF_GPIO)
- /*
- * If CONFIG_OF is enabled, then all GPIO controllers described in the
- * device tree automatically may have an OF translation
- */
- struct device_node *of_node;
- int of_gpio_n_cells;
- int (*of_xlate)(struct gpio_chip *gc, struct device_node *np,
- const void *gpio_spec, u32 *flags);
-#endif
-};
-
-extern const char *gpiochip_is_requested(struct gpio_chip *chip,
- unsigned offset);
-extern int __must_check gpiochip_reserve(int start, int ngpio);
-
-/* add/remove chips */
-extern int gpiochip_add(struct gpio_chip *chip);
-extern int __must_check gpiochip_remove(struct gpio_chip *chip);
-extern struct gpio_chip *gpiochip_find(void *data,
- int (*match)(struct gpio_chip *chip,
- void *data));
-
+/* caller holds gpio_lock *OR* gpio is marked as requested */
+static inline struct gpio_chip *gpio_to_chip(unsigned gpio)
+{
+ return gpiod_to_chip(gpio_to_desc(gpio));
+}
/* Always use the library code for GPIO management calls,
* or when sleeping may be involved.
@@ -150,69 +63,135 @@ extern struct gpio_chip *gpiochip_find(void *data,
extern int gpio_request(unsigned gpio, const char *label);
extern void gpio_free(unsigned gpio);
-extern int gpio_direction_input(unsigned gpio);
-extern int gpio_direction_output(unsigned gpio, int value);
+static inline int gpio_direction_input(unsigned gpio)
+{
+ return gpiod_direction_input(gpio_to_desc(gpio));
+}
+static inline int gpio_direction_output(unsigned gpio, int value)
+{
+ return gpiod_direction_output_raw(gpio_to_desc(gpio), value);
+}
-extern int gpio_set_debounce(unsigned gpio, unsigned debounce);
+static inline int gpio_set_debounce(unsigned gpio, unsigned debounce)
+{
+ return gpiod_set_debounce(gpio_to_desc(gpio), debounce);
+}
-extern int gpio_get_value_cansleep(unsigned gpio);
-extern void gpio_set_value_cansleep(unsigned gpio, int value);
+static inline int gpio_get_value_cansleep(unsigned gpio)
+{
+ return gpiod_get_raw_value_cansleep(gpio_to_desc(gpio));
+}
+static inline void gpio_set_value_cansleep(unsigned gpio, int value)
+{
+ return gpiod_set_raw_value_cansleep(gpio_to_desc(gpio), value);
+}
/* A platform's <asm/gpio.h> code may want to inline the I/O calls when
* the GPIO is constant and refers to some always-present controller,
* giving direct access to chip registers and tight bitbanging loops.
*/
-extern int __gpio_get_value(unsigned gpio);
-extern void __gpio_set_value(unsigned gpio, int value);
+static inline int __gpio_get_value(unsigned gpio)
+{
+ return gpiod_get_raw_value(gpio_to_desc(gpio));
+}
+static inline void __gpio_set_value(unsigned gpio, int value)
+{
+ return gpiod_set_raw_value(gpio_to_desc(gpio), value);
+}
-extern int __gpio_cansleep(unsigned gpio);
+static inline int __gpio_cansleep(unsigned gpio)
+{
+ return gpiod_cansleep(gpio_to_desc(gpio));
+}
-extern int __gpio_to_irq(unsigned gpio);
+static inline int __gpio_to_irq(unsigned gpio)
+{
+ return gpiod_to_irq(gpio_to_desc(gpio));
+}
-#define GPIOF_DIR_OUT (0 << 0)
-#define GPIOF_DIR_IN (1 << 0)
+extern int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
+extern void gpio_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
+
+extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
+extern int gpio_request_array(const struct gpio *array, size_t num);
+extern void gpio_free_array(const struct gpio *array, size_t num);
-#define GPIOF_INIT_LOW (0 << 1)
-#define GPIOF_INIT_HIGH (1 << 1)
+/*
+ * A sysfs interface can be exported by individual drivers if they want,
+ * but more typically is configured entirely from userspace.
+ */
+static inline int gpio_export(unsigned gpio, bool direction_may_change)
+{
+ return gpiod_export(gpio_to_desc(gpio), direction_may_change);
+}
-#define GPIOF_IN (GPIOF_DIR_IN)
-#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
-#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
+static inline int gpio_export_link(struct device *dev, const char *name,
+ unsigned gpio)
+{
+ return gpiod_export_link(dev, name, gpio_to_desc(gpio));
+}
+
+static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
+{
+ return gpiod_sysfs_set_active_low(gpio_to_desc(gpio), value);
+}
+
+static inline void gpio_unexport(unsigned gpio)
+{
+ gpiod_unexport(gpio_to_desc(gpio));
+}
+
+#ifdef CONFIG_PINCTRL
/**
- * struct gpio - a structure describing a GPIO with configuration
- * @gpio: the GPIO number
- * @flags: GPIO configuration as specified by GPIOF_*
- * @label: a literal description string of this GPIO
+ * struct gpio_pin_range - pin range controlled by a gpio chip
+ * @head: list for maintaining set of pin ranges, used internally
+ * @pctldev: pinctrl device which handles corresponding pins
+ * @range: actual range of pins controlled by a gpio controller
*/
-struct gpio {
- unsigned gpio;
- unsigned long flags;
- const char *label;
+
+struct gpio_pin_range {
+ struct list_head node;
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_gpio_range range;
};
-extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
-extern int gpio_request_array(struct gpio *array, size_t num);
-extern void gpio_free_array(struct gpio *array, size_t num);
+int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+ unsigned int gpio_offset, unsigned int pin_offset,
+ unsigned int npins);
+int gpiochip_add_pingroup_range(struct gpio_chip *chip,
+ struct pinctrl_dev *pctldev,
+ unsigned int gpio_offset, const char *pin_group);
+void gpiochip_remove_pin_ranges(struct gpio_chip *chip);
-#ifdef CONFIG_GPIO_SYSFS
+#else
-/*
- * A sysfs interface can be exported by individual drivers if they want,
- * but more typically is configured entirely from userspace.
- */
-extern int gpio_export(unsigned gpio, bool direction_may_change);
-extern int gpio_export_link(struct device *dev, const char *name,
- unsigned gpio);
-extern int gpio_sysfs_set_active_low(unsigned gpio, int value);
-extern void gpio_unexport(unsigned gpio);
+static inline int
+gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+ unsigned int gpio_offset, unsigned int pin_offset,
+ unsigned int npins)
+{
+ return 0;
+}
+static inline int
+gpiochip_add_pingroup_range(struct gpio_chip *chip,
+ struct pinctrl_dev *pctldev,
+ unsigned int gpio_offset, const char *pin_group)
+{
+ return 0;
+}
-#endif /* CONFIG_GPIO_SYSFS */
+static inline void
+gpiochip_remove_pin_ranges(struct gpio_chip *chip)
+{
+}
+
+#endif /* CONFIG_PINCTRL */
#else /* !CONFIG_GPIOLIB */
-static inline int gpio_is_valid(int number)
+static inline bool gpio_is_valid(int number)
{
/* only non-negative numbers are valid */
return number >= 0;
@@ -230,42 +209,15 @@ static inline int gpio_cansleep(unsigned gpio)
static inline int gpio_get_value_cansleep(unsigned gpio)
{
might_sleep();
- return gpio_get_value(gpio);
+ return __gpio_get_value(gpio);
}
static inline void gpio_set_value_cansleep(unsigned gpio, int value)
{
might_sleep();
- gpio_set_value(gpio, value);
+ __gpio_set_value(gpio, value);
}
#endif /* !CONFIG_GPIOLIB */
-#ifndef CONFIG_GPIO_SYSFS
-
-struct device;
-
-/* sysfs support is only available with gpiolib, where it's optional */
-
-static inline int gpio_export(unsigned gpio, bool direction_may_change)
-{
- return -ENOSYS;
-}
-
-static inline int gpio_export_link(struct device *dev, const char *name,
- unsigned gpio)
-{
- return -ENOSYS;
-}
-
-static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
-{
- return -ENOSYS;
-}
-
-static inline void gpio_unexport(unsigned gpio)
-{
-}
-#endif /* CONFIG_GPIO_SYSFS */
-
#endif /* _ASM_GENERIC_GPIO_H */
diff --git a/include/asm-generic/hash.h b/include/asm-generic/hash.h
new file mode 100644
index 00000000000..b6312843dbd
--- /dev/null
+++ b/include/asm-generic/hash.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_GENERIC_HASH_H
+#define __ASM_GENERIC_HASH_H
+
+struct fast_hash_ops;
+static inline void setup_arch_fast_hash(struct fast_hash_ops *ops)
+{
+}
+
+#endif /* __ASM_GENERIC_HASH_H */
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
new file mode 100644
index 00000000000..99b490b4d05
--- /dev/null
+++ b/include/asm-generic/hugetlb.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_GENERIC_HUGETLB_H
+#define _ASM_GENERIC_HUGETLB_H
+
+static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
+{
+ return mk_pte(page, pgprot);
+}
+
+static inline unsigned long huge_pte_write(pte_t pte)
+{
+ return pte_write(pte);
+}
+
+static inline unsigned long huge_pte_dirty(pte_t pte)
+{
+ return pte_dirty(pte);
+}
+
+static inline pte_t huge_pte_mkwrite(pte_t pte)
+{
+ return pte_mkwrite(pte);
+}
+
+static inline pte_t huge_pte_mkdirty(pte_t pte)
+{
+ return pte_mkdirty(pte);
+}
+
+static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return pte_modify(pte, newprot);
+}
+
+static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_clear(mm, addr, ptep);
+}
+
+#endif /* _ASM_GENERIC_HUGETLB_H */
diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
deleted file mode 100644
index 1ca3efc976c..00000000000
--- a/include/asm-generic/int-l64.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * asm-generic/int-l64.h
- *
- * Integer declarations for architectures which use "long"
- * for 64-bit types.
- */
-
-#ifndef _ASM_GENERIC_INT_L64_H
-#define _ASM_GENERIC_INT_L64_H
-
-#include <asm/bitsperlong.h>
-
-#ifndef __ASSEMBLY__
-/*
- * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
- * header files exported to user space
- */
-
-typedef __signed__ char __s8;
-typedef unsigned char __u8;
-
-typedef __signed__ short __s16;
-typedef unsigned short __u16;
-
-typedef __signed__ int __s32;
-typedef unsigned int __u32;
-
-typedef __signed__ long __s64;
-typedef unsigned long __u64;
-
-#endif /* __ASSEMBLY__ */
-
-#ifdef __KERNEL__
-
-#ifndef __ASSEMBLY__
-
-typedef signed char s8;
-typedef unsigned char u8;
-
-typedef signed short s16;
-typedef unsigned short u16;
-
-typedef signed int s32;
-typedef unsigned int u32;
-
-typedef signed long s64;
-typedef unsigned long u64;
-
-#define S8_C(x) x
-#define U8_C(x) x ## U
-#define S16_C(x) x
-#define U16_C(x) x ## U
-#define S32_C(x) x
-#define U32_C(x) x ## U
-#define S64_C(x) x ## L
-#define U64_C(x) x ## UL
-
-#else /* __ASSEMBLY__ */
-
-#define S8_C(x) x
-#define U8_C(x) x
-#define S16_C(x) x
-#define U16_C(x) x
-#define S32_C(x) x
-#define U32_C(x) x
-#define S64_C(x) x
-#define U64_C(x) x
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_GENERIC_INT_L64_H */
diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
index f394147c073..4cd84855cb4 100644
--- a/include/asm-generic/int-ll64.h
+++ b/include/asm-generic/int-ll64.h
@@ -4,38 +4,11 @@
* Integer declarations for architectures which use "long long"
* for 64-bit types.
*/
-
#ifndef _ASM_GENERIC_INT_LL64_H
#define _ASM_GENERIC_INT_LL64_H
-#include <asm/bitsperlong.h>
-
-#ifndef __ASSEMBLY__
-/*
- * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
- * header files exported to user space
- */
-
-typedef __signed__ char __s8;
-typedef unsigned char __u8;
+#include <uapi/asm-generic/int-ll64.h>
-typedef __signed__ short __s16;
-typedef unsigned short __u16;
-
-typedef __signed__ int __s32;
-typedef unsigned int __u32;
-
-#ifdef __GNUC__
-__extension__ typedef __signed__ long long __s64;
-__extension__ typedef unsigned long long __u64;
-#else
-typedef __signed__ long long __s64;
-typedef unsigned long long __u64;
-#endif
-
-#endif /* __ASSEMBLY__ */
-
-#ifdef __KERNEL__
#ifndef __ASSEMBLY__
@@ -73,6 +46,4 @@ typedef unsigned long long u64;
#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
#endif /* _ASM_GENERIC_INT_LL64_H */
diff --git a/include/asm-generic/io-64-nonatomic-hi-lo.h b/include/asm-generic/io-64-nonatomic-hi-lo.h
new file mode 100644
index 00000000000..a6806a94250
--- /dev/null
+++ b/include/asm-generic/io-64-nonatomic-hi-lo.h
@@ -0,0 +1,28 @@
+#ifndef _ASM_IO_64_NONATOMIC_HI_LO_H_
+#define _ASM_IO_64_NONATOMIC_HI_LO_H_
+
+#include <linux/io.h>
+#include <asm-generic/int-ll64.h>
+
+#ifndef readq
+static inline __u64 readq(const volatile void __iomem *addr)
+{
+ const volatile u32 __iomem *p = addr;
+ u32 low, high;
+
+ high = readl(p + 1);
+ low = readl(p);
+
+ return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(__u64 val, volatile void __iomem *addr)
+{
+ writel(val >> 32, addr + 4);
+ writel(val, addr);
+}
+#endif
+
+#endif /* _ASM_IO_64_NONATOMIC_HI_LO_H_ */
diff --git a/include/asm-generic/io-64-nonatomic-lo-hi.h b/include/asm-generic/io-64-nonatomic-lo-hi.h
new file mode 100644
index 00000000000..ca546b1ff8b
--- /dev/null
+++ b/include/asm-generic/io-64-nonatomic-lo-hi.h
@@ -0,0 +1,28 @@
+#ifndef _ASM_IO_64_NONATOMIC_LO_HI_H_
+#define _ASM_IO_64_NONATOMIC_LO_HI_H_
+
+#include <linux/io.h>
+#include <asm-generic/int-ll64.h>
+
+#ifndef readq
+static inline __u64 readq(const volatile void __iomem *addr)
+{
+ const volatile u32 __iomem *p = addr;
+ u32 low, high;
+
+ low = readl(p);
+ high = readl(p + 1);
+
+ return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(__u64 val, volatile void __iomem *addr)
+{
+ writel(val, addr);
+ writel(val >> 32, addr + 4);
+}
+#endif
+
+#endif /* _ASM_IO_64_NONATOMIC_LO_HI_H_ */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 3577ca11a0b..975e1cc75ed 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -12,13 +12,14 @@
#define __ASM_GENERIC_IO_H
#include <asm/page.h> /* I/O is all done through memory accesses */
-#include <asm/cacheflush.h>
#include <linux/types.h>
#ifdef CONFIG_GENERIC_IOMAP
#include <asm-generic/iomap.h>
#endif
+#include <asm-generic/pci_iomap.h>
+
#ifndef mmiowb
#define mmiowb() do {} while (0)
#endif
@@ -52,8 +53,18 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
#endif
#define readb __raw_readb
-#define readw(addr) __le16_to_cpu(__raw_readw(addr))
-#define readl(addr) __le32_to_cpu(__raw_readl(addr))
+
+#define readw readw
+static inline u16 readw(const volatile void __iomem *addr)
+{
+ return __le16_to_cpu(__raw_readw(addr));
+}
+
+#define readl readl
+static inline u32 readl(const volatile void __iomem *addr)
+{
+ return __le32_to_cpu(__raw_readl(addr));
+}
#ifndef __raw_writeb
static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
@@ -81,17 +92,31 @@ static inline void __raw_writel(u32 b, volatile void __iomem *addr)
#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr)
#ifdef CONFIG_64BIT
+#ifndef __raw_readq
static inline u64 __raw_readq(const volatile void __iomem *addr)
{
return *(const volatile u64 __force *) addr;
}
-#define readq(addr) __le64_to_cpu(__raw_readq(addr))
+#endif
+
+#define readq readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+ return __le64_to_cpu(__raw_readq(addr));
+}
+#ifndef __raw_writeq
static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
{
*(volatile u64 __force *) addr = b;
}
-#define writeq(b,addr) __raw_writeq(__cpu_to_le64(b),addr)
+#endif
+
+#define writeq(b, addr) __raw_writeq(__cpu_to_le64(b), addr)
+#endif /* CONFIG_64BIT */
+
+#ifndef PCI_IOBASE
+#define PCI_IOBASE ((void __iomem *) 0)
#endif
/*****************************************************************************/
@@ -101,32 +126,32 @@ static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
static inline u8 inb(unsigned long addr)
{
- return readb((volatile void __iomem *) addr);
+ return readb(addr + PCI_IOBASE);
}
static inline u16 inw(unsigned long addr)
{
- return readw((volatile void __iomem *) addr);
+ return readw(addr + PCI_IOBASE);
}
static inline u32 inl(unsigned long addr)
{
- return readl((volatile void __iomem *) addr);
+ return readl(addr + PCI_IOBASE);
}
static inline void outb(u8 b, unsigned long addr)
{
- writeb(b, (volatile void __iomem *) addr);
+ writeb(b, addr + PCI_IOBASE);
}
static inline void outw(u16 b, unsigned long addr)
{
- writew(b, (volatile void __iomem *) addr);
+ writew(b, addr + PCI_IOBASE);
}
static inline void outl(u32 b, unsigned long addr)
{
- writel(b, (volatile void __iomem *) addr);
+ writel(b, addr + PCI_IOBASE);
}
#define inb_p(addr) inb(addr)
@@ -142,7 +167,7 @@ static inline void insb(unsigned long addr, void *buffer, int count)
if (count) {
u8 *buf = buffer;
do {
- u8 x = inb(addr);
+ u8 x = __raw_readb(addr + PCI_IOBASE);
*buf++ = x;
} while (--count);
}
@@ -155,7 +180,7 @@ static inline void insw(unsigned long addr, void *buffer, int count)
if (count) {
u16 *buf = buffer;
do {
- u16 x = inw(addr);
+ u16 x = __raw_readw(addr + PCI_IOBASE);
*buf++ = x;
} while (--count);
}
@@ -168,7 +193,7 @@ static inline void insl(unsigned long addr, void *buffer, int count)
if (count) {
u32 *buf = buffer;
do {
- u32 x = inl(addr);
+ u32 x = __raw_readl(addr + PCI_IOBASE);
*buf++ = x;
} while (--count);
}
@@ -181,7 +206,7 @@ static inline void outsb(unsigned long addr, const void *buffer, int count)
if (count) {
const u8 *buf = buffer;
do {
- outb(*buf++, addr);
+ __raw_writeb(*buf++, addr + PCI_IOBASE);
} while (--count);
}
}
@@ -193,7 +218,7 @@ static inline void outsw(unsigned long addr, const void *buffer, int count)
if (count) {
const u16 *buf = buffer;
do {
- outw(*buf++, addr);
+ __raw_writew(*buf++, addr + PCI_IOBASE);
} while (--count);
}
}
@@ -205,7 +230,7 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
if (count) {
const u32 *buf = buffer;
do {
- outl(*buf++, addr);
+ __raw_writel(*buf++, addr + PCI_IOBASE);
} while (--count);
}
}
@@ -214,15 +239,15 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
#ifndef CONFIG_GENERIC_IOMAP
#define ioread8(addr) readb(addr)
#define ioread16(addr) readw(addr)
-#define ioread16be(addr) be16_to_cpu(ioread16(addr))
+#define ioread16be(addr) __be16_to_cpu(__raw_readw(addr))
#define ioread32(addr) readl(addr)
-#define ioread32be(addr) be32_to_cpu(ioread32(addr))
+#define ioread32be(addr) __be32_to_cpu(__raw_readl(addr))
#define iowrite8(v, addr) writeb((v), (addr))
#define iowrite16(v, addr) writew((v), (addr))
-#define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr))
+#define iowrite16be(v, addr) __raw_writew(__cpu_to_be16(v), addr)
#define iowrite32(v, addr) writel((v), (addr))
-#define iowrite32be(v, addr) iowrite32(be32_to_cpu(v), (addr))
+#define iowrite32be(v, addr) __raw_writel(__cpu_to_be32(v), addr)
#define ioread8_rep(p, dst, count) \
insb((unsigned long) (p), (dst), (count))
@@ -239,8 +264,9 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
outsl((unsigned long) (p), (src), (count))
#endif /* CONFIG_GENERIC_IOMAP */
-
-#define IO_SPACE_LIMIT 0xffffffff
+#ifndef IO_SPACE_LIMIT
+#define IO_SPACE_LIMIT 0xffff
+#endif
#ifdef __KERNEL__
@@ -248,18 +274,21 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
#define __io_virt(x) ((void __force *) (x))
#ifndef CONFIG_GENERIC_IOMAP
-/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
struct pci_dev;
extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+
+#ifndef pci_iounmap
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
{
}
+#endif
#endif /* CONFIG_GENERIC_IOMAP */
/*
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
*/
+#ifndef virt_to_phys
static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa((unsigned long)address);
@@ -269,10 +298,15 @@ static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
+#endif
/*
* Change "struct page" to physical address.
+ *
+ * This implementation is for the no-MMU case only... if you have an MMU
+ * you'll need to provide your own definitions.
*/
+#ifndef CONFIG_MMU
static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
{
return (void __iomem*) (unsigned long)offset;
@@ -288,10 +322,12 @@ static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
#define ioremap_wc ioremap_nocache
#endif
-static inline void iounmap(void *addr)
+static inline void iounmap(void __iomem *addr)
{
}
+#endif /* CONFIG_MMU */
+#ifdef CONFIG_HAS_IOPORT_MAP
#ifndef CONFIG_GENERIC_IOMAP
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
@@ -305,10 +341,16 @@ static inline void ioport_unmap(void __iomem *p)
extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
extern void ioport_unmap(void __iomem *p);
#endif /* CONFIG_GENERIC_IOMAP */
+#endif /* CONFIG_HAS_IOPORT_MAP */
+#ifndef xlate_dev_kmem_ptr
#define xlate_dev_kmem_ptr(p) p
-#define xlate_dev_mem_ptr(p) ((void *) (p))
+#endif
+#ifndef xlate_dev_mem_ptr
+#define xlate_dev_mem_ptr(p) __va(p)
+#endif
+#ifdef CONFIG_VIRT_TO_BUS
#ifndef virt_to_bus
static inline unsigned long virt_to_bus(volatile void *address)
{
@@ -320,10 +362,18 @@ static inline void *bus_to_virt(unsigned long address)
return (void *) address;
}
#endif
+#endif
+#ifndef memset_io
#define memset_io(a, b, c) memset(__io_virt(a), (b), (c))
+#endif
+
+#ifndef memcpy_fromio
#define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c))
+#endif
+#ifndef memcpy_toio
#define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c))
+#endif
#endif /* __KERNEL__ */
diff --git a/include/asm-generic/ioctl.h b/include/asm-generic/ioctl.h
index 15828b2d663..297fb0d7cd6 100644
--- a/include/asm-generic/ioctl.h
+++ b/include/asm-generic/ioctl.h
@@ -1,105 +1,17 @@
#ifndef _ASM_GENERIC_IOCTL_H
#define _ASM_GENERIC_IOCTL_H
-/* ioctl command encoding: 32 bits total, command in lower 16 bits,
- * size of the parameter structure in the lower 14 bits of the
- * upper 16 bits.
- * Encoding the size of the parameter structure in the ioctl request
- * is useful for catching programs compiled with old versions
- * and to avoid overwriting user space outside the user buffer area.
- * The highest 2 bits are reserved for indicating the ``access mode''.
- * NOTE: This limits the max parameter size to 16kB -1 !
- */
+#include <uapi/asm-generic/ioctl.h>
-/*
- * The following is for compatibility across the various Linux
- * platforms. The generic ioctl numbering scheme doesn't really enforce
- * a type field. De facto, however, the top 8 bits of the lower 16
- * bits are indeed used as a type field, so we might just as well make
- * this explicit here. Please be sure to use the decoding macros
- * below from now on.
- */
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-
-/*
- * Let any architecture override either of the following before
- * including this file.
- */
-
-#ifndef _IOC_SIZEBITS
-# define _IOC_SIZEBITS 14
-#endif
-
-#ifndef _IOC_DIRBITS
-# define _IOC_DIRBITS 2
-#endif
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-/*
- * Direction bits, which any architecture can choose to override
- * before including this file.
- */
-
-#ifndef _IOC_NONE
-# define _IOC_NONE 0U
-#endif
-
-#ifndef _IOC_WRITE
-# define _IOC_WRITE 1U
-#endif
-
-#ifndef _IOC_READ
-# define _IOC_READ 2U
-#endif
-
-#define _IOC(dir,type,nr,size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-#ifdef __KERNEL__
+#ifdef __CHECKER__
+#define _IOC_TYPECHECK(t) (sizeof(t))
+#else
/* provoke compile error for invalid uses of size argument */
extern unsigned int __invalid_size_argument_for_IOC;
#define _IOC_TYPECHECK(t) \
((sizeof(t) == sizeof(t[1]) && \
sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
sizeof(t) : __invalid_size_argument_for_IOC)
-#else
-#define _IOC_TYPECHECK(t) (sizeof(t))
#endif
-/* used to create numbers */
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size)))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
-#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
-
-/* used to decode ioctl numbers.. */
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-/* ...and for the drivers/sound files... */
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-
#endif /* _ASM_GENERIC_IOCTL_H */
diff --git a/include/asm-generic/ioctls.h b/include/asm-generic/ioctls.h
deleted file mode 100644
index a3216655d65..00000000000
--- a/include/asm-generic/ioctls.h
+++ /dev/null
@@ -1,112 +0,0 @@
-#ifndef __ASM_GENERIC_IOCTLS_H
-#define __ASM_GENERIC_IOCTLS_H
-
-#include <linux/ioctl.h>
-
-/*
- * These are the most common definitions for tty ioctl numbers.
- * Most of them do not use the recommended _IOC(), but there is
- * probably some source code out there hardcoding the number,
- * so we might as well use them for all new platforms.
- *
- * The architectures that use different values here typically
- * try to be compatible with some Unix variants for the same
- * architecture.
- */
-
-/* 0x54 is just a magic number to make these relatively unique ('T') */
-
-#define TCGETS 0x5401
-#define TCSETS 0x5402
-#define TCSETSW 0x5403
-#define TCSETSF 0x5404
-#define TCGETA 0x5405
-#define TCSETA 0x5406
-#define TCSETAW 0x5407
-#define TCSETAF 0x5408
-#define TCSBRK 0x5409
-#define TCXONC 0x540A
-#define TCFLSH 0x540B
-#define TIOCEXCL 0x540C
-#define TIOCNXCL 0x540D
-#define TIOCSCTTY 0x540E
-#define TIOCGPGRP 0x540F
-#define TIOCSPGRP 0x5410
-#define TIOCOUTQ 0x5411
-#define TIOCSTI 0x5412
-#define TIOCGWINSZ 0x5413
-#define TIOCSWINSZ 0x5414
-#define TIOCMGET 0x5415
-#define TIOCMBIS 0x5416
-#define TIOCMBIC 0x5417
-#define TIOCMSET 0x5418
-#define TIOCGSOFTCAR 0x5419
-#define TIOCSSOFTCAR 0x541A
-#define FIONREAD 0x541B
-#define TIOCINQ FIONREAD
-#define TIOCLINUX 0x541C
-#define TIOCCONS 0x541D
-#define TIOCGSERIAL 0x541E
-#define TIOCSSERIAL 0x541F
-#define TIOCPKT 0x5420
-#define FIONBIO 0x5421
-#define TIOCNOTTY 0x5422
-#define TIOCSETD 0x5423
-#define TIOCGETD 0x5424
-#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
-#define TIOCSBRK 0x5427 /* BSD compatibility */
-#define TIOCCBRK 0x5428 /* BSD compatibility */
-#define TIOCGSID 0x5429 /* Return the session ID of FD */
-#define TCGETS2 _IOR('T', 0x2A, struct termios2)
-#define TCSETS2 _IOW('T', 0x2B, struct termios2)
-#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
-#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
-#define TIOCGRS485 0x542E
-#ifndef TIOCSRS485
-#define TIOCSRS485 0x542F
-#endif
-#define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
-#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
-#define TCGETX 0x5432 /* SYS5 TCGETX compatibility */
-#define TCSETX 0x5433
-#define TCSETXF 0x5434
-#define TCSETXW 0x5435
-#define TIOCSIG _IOW('T', 0x36, int) /* pty: generate signal */
-
-#define FIONCLEX 0x5450
-#define FIOCLEX 0x5451
-#define FIOASYNC 0x5452
-#define TIOCSERCONFIG 0x5453
-#define TIOCSERGWILD 0x5454
-#define TIOCSERSWILD 0x5455
-#define TIOCGLCKTRMIOS 0x5456
-#define TIOCSLCKTRMIOS 0x5457
-#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
-#define TIOCSERGETLSR 0x5459 /* Get line status register */
-#define TIOCSERGETMULTI 0x545A /* Get multiport config */
-#define TIOCSERSETMULTI 0x545B /* Set multiport config */
-
-#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
-#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
-
-/*
- * Some arches already define FIOQSIZE due to a historical
- * conflict with a Hayes modem-specific ioctl value.
- */
-#ifndef FIOQSIZE
-# define FIOQSIZE 0x5460
-#endif
-
-/* Used for packet mode */
-#define TIOCPKT_DATA 0
-#define TIOCPKT_FLUSHREAD 1
-#define TIOCPKT_FLUSHWRITE 2
-#define TIOCPKT_STOP 4
-#define TIOCPKT_START 8
-#define TIOCPKT_NOSTOP 16
-#define TIOCPKT_DOSTOP 32
-#define TIOCPKT_IOCTL 64
-
-#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
-
-#endif /* __ASM_GENERIC_IOCTLS_H */
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 76b0cc5637f..1b41011643a 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -56,17 +56,26 @@ extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long coun
extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
+#ifdef CONFIG_HAS_IOPORT_MAP
/* Create a virtual mapping cookie for an IO port range */
extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
extern void ioport_unmap(void __iomem *);
+#endif
#ifndef ARCH_HAS_IOREMAP_WC
#define ioremap_wc ioremap_nocache
#endif
-/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+#ifdef CONFIG_PCI
+/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+#elif defined(CONFIG_GENERIC_IOMAP)
+struct pci_dev;
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{ }
+#endif
+
+#include <asm-generic/pci_iomap.h>
#endif
diff --git a/include/asm-generic/ipcbuf.h b/include/asm-generic/ipcbuf.h
deleted file mode 100644
index 76982b2a1b5..00000000000
--- a/include/asm-generic/ipcbuf.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __ASM_GENERIC_IPCBUF_H
-#define __ASM_GENERIC_IPCBUF_H
-
-/*
- * The generic ipc64_perm structure:
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * ipc64_perm was originally meant to be architecture specific, but
- * everyone just ended up making identical copies without specific
- * optimizations, so we may just as well all use the same one.
- *
- * Pad space is left for:
- * - 32-bit mode_t on architectures that only had 16 bit
- * - 32-bit seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm {
- __kernel_key_t key;
- __kernel_uid32_t uid;
- __kernel_gid32_t gid;
- __kernel_uid32_t cuid;
- __kernel_gid32_t cgid;
- __kernel_mode_t mode;
- /* pad if mode_t is u16: */
- unsigned char __pad1[4 - sizeof(__kernel_mode_t)];
- unsigned short seq;
- unsigned short __pad2;
- unsigned long __unused1;
- unsigned long __unused2;
-};
-
-#endif /* __ASM_GENERIC_IPCBUF_H */
diff --git a/include/asm-generic/irq_regs.h b/include/asm-generic/irq_regs.h
index 5ae1d07d4a1..6bf9355fa7e 100644
--- a/include/asm-generic/irq_regs.h
+++ b/include/asm-generic/irq_regs.h
@@ -22,15 +22,15 @@ DECLARE_PER_CPU(struct pt_regs *, __irq_regs);
static inline struct pt_regs *get_irq_regs(void)
{
- return __get_cpu_var(__irq_regs);
+ return __this_cpu_read(__irq_regs);
}
static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
{
- struct pt_regs *old_regs, **pp_regs = &__get_cpu_var(__irq_regs);
+ struct pt_regs *old_regs;
- old_regs = *pp_regs;
- *pp_regs = new_regs;
+ old_regs = __this_cpu_read(__irq_regs);
+ __this_cpu_write(__irq_regs, new_regs);
return old_regs;
}
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
index 0232ccb76f2..90f99c74dd3 100644
--- a/include/asm-generic/kmap_types.h
+++ b/include/asm-generic/kmap_types.h
@@ -2,39 +2,9 @@
#define _ASM_GENERIC_KMAP_TYPES_H
#ifdef __WITH_KM_FENCE
-# define KMAP_D(n) __KM_FENCE_##n ,
+# define KM_TYPE_NR 41
#else
-# define KMAP_D(n)
+# define KM_TYPE_NR 20
#endif
-enum km_type {
-KMAP_D(0) KM_BOUNCE_READ,
-KMAP_D(1) KM_SKB_SUNRPC_DATA,
-KMAP_D(2) KM_SKB_DATA_SOFTIRQ,
-KMAP_D(3) KM_USER0,
-KMAP_D(4) KM_USER1,
-KMAP_D(5) KM_BIO_SRC_IRQ,
-KMAP_D(6) KM_BIO_DST_IRQ,
-KMAP_D(7) KM_PTE0,
-KMAP_D(8) KM_PTE1,
-KMAP_D(9) KM_IRQ0,
-KMAP_D(10) KM_IRQ1,
-KMAP_D(11) KM_SOFTIRQ0,
-KMAP_D(12) KM_SOFTIRQ1,
-KMAP_D(13) KM_SYNC_ICACHE,
-KMAP_D(14) KM_SYNC_DCACHE,
-/* UML specific, for copy_*_user - used in do_op_one_page */
-KMAP_D(15) KM_UML_USERCOPY,
-KMAP_D(16) KM_IRQ_PTE,
-KMAP_D(17) KM_NMI,
-KMAP_D(18) KM_NMI_PTE,
-KMAP_D(19) KM_KDB,
-/*
- * Remember to update debug_kmap_atomic() when adding new kmap types!
- */
-KMAP_D(20) KM_TYPE_NR
-};
-
-#undef KMAP_D
-
#endif
diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h
new file mode 100644
index 00000000000..fa25becbdca
--- /dev/null
+++ b/include/asm-generic/kvm_para.h
@@ -0,0 +1,26 @@
+#ifndef _ASM_GENERIC_KVM_PARA_H
+#define _ASM_GENERIC_KVM_PARA_H
+
+#include <uapi/asm-generic/kvm_para.h>
+
+
+/*
+ * This function is used by architectures that support kvm to avoid issuing
+ * false soft lockup messages.
+ */
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+static inline bool kvm_para_available(void)
+{
+ return false;
+}
+
+#endif
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index c8a5d68541d..9ceb03b4f46 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -2,7 +2,7 @@
#define _ASM_GENERIC_LOCAL_H
#include <linux/percpu.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/types.h>
/*
diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h
index 02ac760c1a8..5980002b8b7 100644
--- a/include/asm-generic/local64.h
+++ b/include/asm-generic/local64.h
@@ -55,7 +55,7 @@ typedef struct {
#else /* BITS_PER_LONG != 64 */
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/* Don't use typedef: don't want them to be mixed with atomic_t's. */
typedef struct {
diff --git a/include/asm-generic/mcs_spinlock.h b/include/asm-generic/mcs_spinlock.h
new file mode 100644
index 00000000000..10cd4ffc6ba
--- /dev/null
+++ b/include/asm-generic/mcs_spinlock.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_MCS_SPINLOCK_H
+#define __ASM_MCS_SPINLOCK_H
+
+/*
+ * Architectures can define their own:
+ *
+ * arch_mcs_spin_lock_contended(l)
+ * arch_mcs_spin_unlock_contended(l)
+ *
+ * See kernel/locking/mcs_spinlock.c.
+ */
+
+#endif /* __ASM_MCS_SPINLOCK_H */
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index fb2d63f13f4..14909b0b9ca 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -39,7 +39,7 @@
})
#define __page_to_pfn(pg) \
-({ struct page *__pg = (pg); \
+({ const struct page *__pg = (pg); \
struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
(unsigned long)(__pg - __pgdat->node_mem_map) + \
__pgdat->node_start_pfn; \
@@ -53,11 +53,11 @@
#elif defined(CONFIG_SPARSEMEM)
/*
- * Note: section's mem_map is encorded to reflect its start_pfn.
+ * Note: section's mem_map is encoded to reflect its start_pfn.
* section[i].section_mem_map == mem_map's address - start_pfn;
*/
#define __page_to_pfn(pg) \
-({ struct page *__pg = (pg); \
+({ const struct page *__pg = (pg); \
int __sec = page_to_section(__pg); \
(unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
})
diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h
deleted file mode 100644
index 3da9e2742fa..00000000000
--- a/include/asm-generic/mman-common.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __ASM_GENERIC_MMAN_COMMON_H
-#define __ASM_GENERIC_MMAN_COMMON_H
-
-/*
- Author: Michael S. Tsirkin <mst@mellanox.co.il>, Mellanox Technologies Ltd.
- Based on: asm-xxx/mman.h
-*/
-
-#define PROT_READ 0x1 /* page can be read */
-#define PROT_WRITE 0x2 /* page can be written */
-#define PROT_EXEC 0x4 /* page can be executed */
-#define PROT_SEM 0x8 /* page may be used for atomic ops */
-#define PROT_NONE 0x0 /* page can not be accessed */
-#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
-#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
-
-#define MAP_SHARED 0x01 /* Share changes */
-#define MAP_PRIVATE 0x02 /* Changes are private */
-#define MAP_TYPE 0x0f /* Mask for type of mapping */
-#define MAP_FIXED 0x10 /* Interpret addr exactly */
-#define MAP_ANONYMOUS 0x20 /* don't use a file */
-#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED
-# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */
-#else
-# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
-#endif
-
-#define MS_ASYNC 1 /* sync memory asynchronously */
-#define MS_INVALIDATE 2 /* invalidate the caches */
-#define MS_SYNC 4 /* synchronous memory sync */
-
-#define MADV_NORMAL 0 /* no further special treatment */
-#define MADV_RANDOM 1 /* expect random page references */
-#define MADV_SEQUENTIAL 2 /* expect sequential page references */
-#define MADV_WILLNEED 3 /* will need these pages */
-#define MADV_DONTNEED 4 /* don't need these pages */
-
-/* common parameters: try to keep these consistent across architectures */
-#define MADV_REMOVE 9 /* remove these pages & resources */
-#define MADV_DONTFORK 10 /* don't inherit across fork */
-#define MADV_DOFORK 11 /* do inherit across fork */
-#define MADV_HWPOISON 100 /* poison a page for testing */
-#define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */
-
-#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
-#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
-
-/* compatibility flags */
-#define MAP_FILE 0
-
-#endif /* __ASM_GENERIC_MMAN_COMMON_H */
diff --git a/include/asm-generic/mman.h b/include/asm-generic/mman.h
deleted file mode 100644
index 32c8bd6a196..00000000000
--- a/include/asm-generic/mman.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef __ASM_GENERIC_MMAN_H
-#define __ASM_GENERIC_MMAN_H
-
-#include <asm-generic/mman-common.h>
-
-#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define MAP_LOCKED 0x2000 /* pages are locked */
-#define MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
-#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
-
-#define MCL_CURRENT 1 /* lock all current mappings */
-#define MCL_FUTURE 2 /* lock all future mappings */
-
-#endif /* __ASM_GENERIC_MMAN_H */
diff --git a/include/asm-generic/mmu.h b/include/asm-generic/mmu.h
index 4f4aa56d6b5..0ed3f1cfb85 100644
--- a/include/asm-generic/mmu.h
+++ b/include/asm-generic/mmu.h
@@ -7,8 +7,12 @@
*/
#ifndef __ASSEMBLY__
typedef struct {
- struct vm_list_struct *vmlist;
unsigned long end_brk;
+
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ unsigned long exec_fdpic_loadmap;
+ unsigned long interp_fdpic_loadmap;
+#endif
} mm_context_t;
#endif
diff --git a/include/asm-generic/module.h b/include/asm-generic/module.h
index ed5b44de4c9..14dc41d185a 100644
--- a/include/asm-generic/module.h
+++ b/include/asm-generic/module.h
@@ -5,18 +5,44 @@
* Many architectures just need a simple module
* loader without arch specific data.
*/
+#ifndef CONFIG_HAVE_MOD_ARCH_SPECIFIC
struct mod_arch_specific
{
};
+#endif
#ifdef CONFIG_64BIT
-#define Elf_Shdr Elf64_Shdr
-#define Elf_Sym Elf64_Sym
-#define Elf_Ehdr Elf64_Ehdr
-#else
-#define Elf_Shdr Elf32_Shdr
-#define Elf_Sym Elf32_Sym
-#define Elf_Ehdr Elf32_Ehdr
+#define Elf_Shdr Elf64_Shdr
+#define Elf_Phdr Elf64_Phdr
+#define Elf_Sym Elf64_Sym
+#define Elf_Dyn Elf64_Dyn
+#define Elf_Ehdr Elf64_Ehdr
+#define Elf_Addr Elf64_Addr
+#ifdef CONFIG_MODULES_USE_ELF_REL
+#define Elf_Rel Elf64_Rel
+#endif
+#ifdef CONFIG_MODULES_USE_ELF_RELA
+#define Elf_Rela Elf64_Rela
+#endif
+#define ELF_R_TYPE(X) ELF64_R_TYPE(X)
+#define ELF_R_SYM(X) ELF64_R_SYM(X)
+
+#else /* CONFIG_64BIT */
+
+#define Elf_Shdr Elf32_Shdr
+#define Elf_Phdr Elf32_Phdr
+#define Elf_Sym Elf32_Sym
+#define Elf_Dyn Elf32_Dyn
+#define Elf_Ehdr Elf32_Ehdr
+#define Elf_Addr Elf32_Addr
+#ifdef CONFIG_MODULES_USE_ELF_REL
+#define Elf_Rel Elf32_Rel
+#endif
+#ifdef CONFIG_MODULES_USE_ELF_RELA
+#define Elf_Rela Elf32_Rela
+#endif
+#define ELF_R_TYPE(X) ELF32_R_TYPE(X)
+#define ELF_R_SYM(X) ELF32_R_SYM(X)
#endif
#endif /* __ASM_GENERIC_MODULE_H */
diff --git a/include/asm-generic/msgbuf.h b/include/asm-generic/msgbuf.h
deleted file mode 100644
index aec850d9159..00000000000
--- a/include/asm-generic/msgbuf.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#ifndef __ASM_GENERIC_MSGBUF_H
-#define __ASM_GENERIC_MSGBUF_H
-
-#include <asm/bitsperlong.h>
-/*
- * generic msqid64_ds structure.
- *
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * msqid64_ds was originally meant to be architecture specific, but
- * everyone just ended up making identical copies without specific
- * optimizations, so we may just as well all use the same one.
- *
- * 64 bit architectures typically define a 64 bit __kernel_time_t,
- * so they do not need the first three padding words.
- * On big-endian systems, the padding is in the wrong place.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct msqid64_ds {
- struct ipc64_perm msg_perm;
- __kernel_time_t msg_stime; /* last msgsnd time */
-#if __BITS_PER_LONG != 64
- unsigned long __unused1;
-#endif
- __kernel_time_t msg_rtime; /* last msgrcv time */
-#if __BITS_PER_LONG != 64
- unsigned long __unused2;
-#endif
- __kernel_time_t msg_ctime; /* last change time */
-#if __BITS_PER_LONG != 64
- unsigned long __unused3;
-#endif
- unsigned long msg_cbytes; /* current number of bytes on queue */
- unsigned long msg_qnum; /* number of messages in queue */
- unsigned long msg_qbytes; /* max number of bytes on queue */
- __kernel_pid_t msg_lspid; /* pid of last msgsnd */
- __kernel_pid_t msg_lrpid; /* last receive pid */
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-#endif /* __ASM_GENERIC_MSGBUF_H */
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index f104af7cf43..d4f9fb4e53d 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -28,17 +28,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
*
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
- * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
- * or anything the slow path function returns.
+ * Change the count from 1 to a value lower than 1. This function returns 0
+ * if the fastpath succeeds, or -1 otherwise.
*/
static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+__mutex_fastpath_lock_retval(atomic_t *count)
{
if (unlikely(atomic_dec_return(count) < 0))
- return fail_fn(count);
+ return -1;
return 0;
}
diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h
index e1bbbc72b6a..61069ed334e 100644
--- a/include/asm-generic/mutex-null.h
+++ b/include/asm-generic/mutex-null.h
@@ -11,7 +11,7 @@
#define _ASM_GENERIC_MUTEX_NULL_H
#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
-#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count)
+#define __mutex_fastpath_lock_retval(count) (-1)
#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
#define __mutex_slowpath_needs_to_unlock() 1
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index 580a6d35c70..f169ec06478 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -26,24 +26,29 @@ static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
- fail_fn(count);
+ /*
+ * We failed to acquire the lock, so mark it contended
+ * to ensure that any waiting tasks are woken up by the
+ * unlock slow path.
+ */
+ if (likely(atomic_xchg(count, -1) != 1))
+ fail_fn(count);
}
/**
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
*
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
- * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
- * or anything the slow path function returns
+ * Change the count from 1 to a value lower than 1. This function returns 0
+ * if the fastpath succeeds, or -1 otherwise.
*/
static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+__mutex_fastpath_lock_retval(atomic_t *count)
{
if (unlikely(atomic_xchg(count, 0) != 1))
- return fail_fn(count);
+ if (likely(atomic_xchg(count, -1) != 1))
+ return -1;
return 0;
}
diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h
index 75fec18cdc5..37d1fe28960 100644
--- a/include/asm-generic/page.h
+++ b/include/asm-generic/page.h
@@ -71,22 +71,26 @@ extern unsigned long memory_end;
#define PAGE_OFFSET (0)
#endif
+#ifndef ARCH_PFN_OFFSET
+#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
+#endif
+
#ifndef __ASSEMBLY__
-#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
-#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
+#define __va(x) ((void *)((unsigned long) (x)))
+#define __pa(x) ((unsigned long) (x))
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
-#define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
-#define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
+#define virt_to_page(addr) pfn_to_page(virt_to_pfn(addr))
+#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
#ifndef page_to_phys
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
#endif
-#define pfn_valid(pfn) ((pfn) < max_mapnr)
+#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
((void *)(kaddr) < (void *)memory_end))
diff --git a/include/asm-generic/param.h b/include/asm-generic/param.h
index cdf8251bfb6..04e715bccce 100644
--- a/include/asm-generic/param.h
+++ b/include/asm-generic/param.h
@@ -1,24 +1,10 @@
#ifndef __ASM_GENERIC_PARAM_H
#define __ASM_GENERIC_PARAM_H
-#ifdef __KERNEL__
+#include <uapi/asm-generic/param.h>
+
+# undef HZ
# define HZ CONFIG_HZ /* Internal kernel timer frequency */
# define USER_HZ 100 /* some user interfaces are */
# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */
-#endif
-
-#ifndef HZ
-#define HZ 100
-#endif
-
-#ifndef EXEC_PAGESIZE
-#define EXEC_PAGESIZE 4096
-#endif
-
-#ifndef NOGROUP
-#define NOGROUP (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64 /* max length of hostname */
-
#endif /* __ASM_GENERIC_PARAM_H */
diff --git a/include/asm-generic/parport.h b/include/asm-generic/parport.h
index 40528cb977e..2c9f9d4336c 100644
--- a/include/asm-generic/parport.h
+++ b/include/asm-generic/parport.h
@@ -10,8 +10,8 @@
* to devices on the PCI bus.
*/
-static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma);
-static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma)
+static int parport_pc_find_isa_ports(int autoirq, int autodma);
+static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
{
#ifdef CONFIG_ISA
return parport_pc_find_isa_ports(autoirq, autodma);
diff --git a/include/asm-generic/pci-bridge.h b/include/asm-generic/pci-bridge.h
new file mode 100644
index 00000000000..20db2e5a0a6
--- /dev/null
+++ b/include/asm-generic/pci-bridge.h
@@ -0,0 +1,74 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_GENERIC_PCI_BRIDGE_H
+#define _ASM_GENERIC_PCI_BRIDGE_H
+
+#ifdef __KERNEL__
+
+enum {
+ /* Force re-assigning all resources (ignore firmware
+ * setup completely)
+ */
+ PCI_REASSIGN_ALL_RSRC = 0x00000001,
+
+ /* Re-assign all bus numbers */
+ PCI_REASSIGN_ALL_BUS = 0x00000002,
+
+ /* Do not try to assign, just use existing setup */
+ PCI_PROBE_ONLY = 0x00000004,
+
+ /* Don't bother with ISA alignment unless the bridge has
+ * ISA forwarding enabled
+ */
+ PCI_CAN_SKIP_ISA_ALIGN = 0x00000008,
+
+ /* Enable domain numbers in /proc */
+ PCI_ENABLE_PROC_DOMAINS = 0x00000010,
+ /* ... except for domain 0 */
+ PCI_COMPAT_DOMAIN_0 = 0x00000020,
+
+ /* PCIe downstream ports are bridges that normally lead to only a
+ * device 0, but if this is set, we scan all possible devices, not
+ * just device 0.
+ */
+ PCI_SCAN_ALL_PCIE_DEVS = 0x00000040,
+};
+
+#ifdef CONFIG_PCI
+extern unsigned int pci_flags;
+
+static inline void pci_set_flags(int flags)
+{
+ pci_flags = flags;
+}
+
+static inline void pci_add_flags(int flags)
+{
+ pci_flags |= flags;
+}
+
+static inline void pci_clear_flags(int flags)
+{
+ pci_flags &= ~flags;
+}
+
+static inline int pci_has_flag(int flag)
+{
+ return pci_flags & flag;
+}
+#else
+static inline void pci_set_flags(int flags) { }
+static inline void pci_add_flags(int flags) { }
+static inline void pci_clear_flags(int flags) { }
+static inline int pci_has_flag(int flag)
+{
+ return 0;
+}
+#endif /* CONFIG_PCI */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_GENERIC_PCI_BRIDGE_H */
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
index 26373cff454..e80a0495e5b 100644
--- a/include/asm-generic/pci.h
+++ b/include/asm-generic/pci.h
@@ -6,30 +6,6 @@
#ifndef _ASM_GENERIC_PCI_H
#define _ASM_GENERIC_PCI_H
-/**
- * pcibios_resource_to_bus - convert resource to PCI bus address
- * @dev: device which owns this resource
- * @region: converted bus-centric region (start,end)
- * @res: resource to convert
- *
- * Convert a resource to a PCI device bus address or bus window.
- */
-static inline void
-pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
- struct resource *res)
-{
- region->start = res->start;
- region->end = res->end;
-}
-
-static inline void
-pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
- struct pci_bus_region *region)
-{
- res->start = region->start;
- res->end = region->end;
-}
-
static inline struct resource *
pcibios_select_root(struct pci_dev *pdev, struct resource *res)
{
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
new file mode 100644
index 00000000000..ce37349860f
--- /dev/null
+++ b/include/asm-generic/pci_iomap.h
@@ -0,0 +1,35 @@
+/* Generic I/O port emulation, based on MN10300 code
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef __ASM_GENERIC_PCI_IOMAP_H
+#define __ASM_GENERIC_PCI_IOMAP_H
+
+struct pci_dev;
+#ifdef CONFIG_PCI
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+/* Create a virtual mapping cookie for a port on a given PCI device.
+ * Do not call this directly, it exists to make it easier for architectures
+ * to override */
+#ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP
+extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port,
+ unsigned int nr);
+#else
+#define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr))
+#endif
+
+#elif defined(CONFIG_GENERIC_PCI_IOMAP)
+static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
+{
+ return NULL;
+}
+#endif
+
+#endif /* __ASM_GENERIC_IO_H */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index d17784ea37f..0703aa75b5e 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -56,17 +56,17 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define per_cpu(var, cpu) \
(*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
-#ifndef __this_cpu_ptr
-#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
+#ifndef raw_cpu_ptr
+#define raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
#endif
#ifdef CONFIG_DEBUG_PREEMPT
#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
#else
-#define this_cpu_ptr(ptr) __this_cpu_ptr(ptr)
+#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
#endif
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
-#define __raw_get_cpu_var(var) (*__this_cpu_ptr(&(var)))
+#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var)))
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void setup_per_cpu_areas(void);
@@ -83,7 +83,7 @@ extern void setup_per_cpu_areas(void);
#define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
-#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
+#define raw_cpu_ptr(ptr) this_cpu_ptr(ptr)
#endif /* SMP */
@@ -122,4 +122,7 @@ extern void setup_per_cpu_areas(void);
#define PER_CPU_DEF_ATTRIBUTES
#endif
+/* Keep until we have removed all uses of __this_cpu_ptr */
+#define __this_cpu_ptr raw_cpu_ptr
+
#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 6f3c6ae4fe0..53b2acc3821 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -4,68 +4,114 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_MMU
-#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+#include <linux/mm_types.h>
+#include <linux/bug.h>
+
/*
- * Largely same as above, but only sets the access flags (dirty,
- * accessed, and writable). Furthermore, we know it always gets set
- * to a "more permissive" setting, which allows most architectures
- * to optimize this. We return whether the PTE actually changed, which
- * in turn instructs the caller to do things like update__mmu_cache.
- * This used to be done in the caller, but sparc needs minor faults to
- * force that call on sun4c so we changed this macro slightly
+ * On almost all architectures and configurations, 0 can be used as the
+ * upper ceiling to free_pgtables(): on many architectures it has the same
+ * effect as using TASK_SIZE. However, there is one configuration which
+ * must impose a more careful limit, to avoid freeing kernel pgtables.
*/
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-({ \
- int __changed = !pte_same(*(__ptep), __entry); \
- if (__changed) { \
- set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
- flush_tlb_page(__vma, __address); \
- } \
- __changed; \
-})
+#ifndef USER_PGTABLES_CEILING
+#define USER_PGTABLES_CEILING 0UL
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+extern int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty);
#endif
#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define ptep_test_and_clear_young(__vma, __address, __ptep) \
-({ \
- pte_t __pte = *(__ptep); \
- int r = 1; \
- if (!pte_young(__pte)) \
- r = 0; \
- else \
- set_pte_at((__vma)->vm_mm, (__address), \
- (__ptep), pte_mkold(__pte)); \
- r; \
-})
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ int r = 1;
+ if (!pte_young(pte))
+ r = 0;
+ else
+ set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
+ return r;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+ int r = 1;
+ if (!pmd_young(pmd))
+ r = 0;
+ else
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
+ return r;
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-#define ptep_clear_flush_young(__vma, __address, __ptep) \
-({ \
- int __young; \
- __young = ptep_test_and_clear_young(__vma, __address, __ptep); \
- if (__young) \
- flush_tlb_page(__vma, __address); \
- __young; \
-})
+int ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
#endif
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
-#define ptep_get_and_clear(__mm, __address, __ptep) \
-({ \
- pte_t __pte = *(__ptep); \
- pte_clear((__mm), (__address), (__ptep)); \
- __pte; \
-})
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ pte_clear(mm, address, ptep);
+ return pte;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+ pmd_clear(pmdp);
+ return pmd;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
-#define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \
-({ \
- pte_t __pte; \
- __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \
- __pte; \
-})
+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep,
+ int full)
+{
+ pte_t pte;
+ pte = ptep_get_and_clear(mm, address, ptep);
+ return pte;
+}
#endif
/*
@@ -74,20 +120,25 @@
* not present, or in the process of an address space destruction.
*/
#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
-#define pte_clear_not_present_full(__mm, __address, __ptep, __full) \
-do { \
- pte_clear((__mm), (__address), (__ptep)); \
-} while (0)
+static inline void pte_clear_not_present_full(struct mm_struct *mm,
+ unsigned long address,
+ pte_t *ptep,
+ int full)
+{
+ pte_clear(mm, address, ptep);
+}
#endif
#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
-#define ptep_clear_flush(__vma, __address, __ptep) \
-({ \
- pte_t __pte; \
- __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \
- flush_tlb_page(__vma, __address); \
- __pte; \
-})
+extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
+extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp);
#endif
#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
@@ -99,26 +150,75 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
}
#endif
-#ifndef __HAVE_ARCH_PTE_SAME
-#define pte_same(A,B) (pte_val(A) == pte_val(B))
+#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t old_pmd = *pmdp;
+ set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ BUG();
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
-#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
-#define page_test_dirty(page) (0)
+#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+extern void pmdp_splitting_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
#endif
-#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
-#define page_clear_dirty(page, mapped) do { } while (0)
+#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
+extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable);
#endif
-#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
-#define pte_maybe_dirty(pte) pte_dirty(pte)
-#else
-#define pte_maybe_dirty(pte) (1)
+#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
+extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_INVALIDATE
+extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp);
#endif
-#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
-#define page_test_and_clear_young(page) (0)
+#ifndef __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+ return pte_val(pte_a) == pte_val(pte_b);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTE_UNUSED
+/*
+ * Some architectures provide facilities to virtualization guests
+ * so that they can flag allocated pages as unused. This allows the
+ * host to transparently reclaim unused pages. This function returns
+ * whether the pte's page is unused.
+ */
+static inline int pte_unused(pte_t pte)
+{
+ return 0;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMD_SAME
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+ return pmd_val(pmd_a) == pmd_val(pmd_b);
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+ BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
@@ -129,6 +229,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
#define move_pte(pte, prot, old_addr, new_addr) (pte)
#endif
+#ifndef pte_accessible
+# define pte_accessible(mm, pte) ((void)(pte), 1)
+#endif
+
+#ifndef pte_present_nonuma
+#define pte_present_nonuma(pte) pte_present(pte)
+#endif
+
#ifndef flush_tlb_fix_spurious_fault
#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
#endif
@@ -302,52 +410,421 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
#define arch_start_context_switch(prev) do {} while (0)
#endif
+#ifndef CONFIG_HAVE_ARCH_SOFT_DIRTY
+static inline int pte_soft_dirty(pte_t pte)
+{
+ return 0;
+}
+
+static inline int pmd_soft_dirty(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pte_t pte_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+ return 0;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pte_t pte_file_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline int pte_file_soft_dirty(pte_t pte)
+{
+ return 0;
+}
+#endif
+
#ifndef __HAVE_PFNMAP_TRACKING
/*
- * Interface that can be used by architecture code to keep track of
- * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
- *
- * track_pfn_vma_new is called when a _new_ pfn mapping is being established
- * for physical range indicated by pfn and size.
+ * Interfaces that can be used by architecture code to keep track of
+ * memory type of pfn mappings specified by the remap_pfn_range,
+ * vm_insert_pfn.
+ */
+
+/*
+ * track_pfn_remap is called when a _new_ pfn mapping is being established
+ * by remap_pfn_range() for physical range indicated by pfn and size.
*/
-static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
- unsigned long pfn, unsigned long size)
+static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
+ unsigned long pfn, unsigned long addr,
+ unsigned long size)
{
return 0;
}
/*
- * Interface that can be used by architecture code to keep track of
- * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
- *
- * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
+ * track_pfn_insert is called when a _new_ single pfn is established
+ * by vm_insert_pfn().
+ */
+static inline int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
+ unsigned long pfn)
+{
+ return 0;
+}
+
+/*
+ * track_pfn_copy is called when vma that is covering the pfnmap gets
* copied through copy_page_range().
*/
-static inline int track_pfn_vma_copy(struct vm_area_struct *vma)
+static inline int track_pfn_copy(struct vm_area_struct *vma)
{
return 0;
}
/*
- * Interface that can be used by architecture code to keep track of
- * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
- *
* untrack_pfn_vma is called while unmapping a pfnmap for a region.
* untrack can be called for a specific region indicated by pfn and size or
- * can be for the entire vma (in which case size can be zero).
+ * can be for the entire vma (in which case pfn, size are zero).
*/
-static inline void untrack_pfn_vma(struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size)
+static inline void untrack_pfn(struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size)
{
}
#else
-extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
- unsigned long pfn, unsigned long size);
-extern int track_pfn_vma_copy(struct vm_area_struct *vma);
-extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
- unsigned long size);
+extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
+ unsigned long pfn, unsigned long addr,
+ unsigned long size);
+extern int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
+ unsigned long pfn);
+extern int track_pfn_copy(struct vm_area_struct *vma);
+extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
+ unsigned long size);
#endif
+#ifdef __HAVE_COLOR_ZERO_PAGE
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ extern unsigned long zero_pfn;
+ unsigned long offset_from_zero_pfn = pfn - zero_pfn;
+ return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
+}
+
+#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
+
+#else
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ extern unsigned long zero_pfn;
+ return pfn == zero_pfn;
+}
+
+static inline unsigned long my_zero_pfn(unsigned long addr)
+{
+ extern unsigned long zero_pfn;
+ return zero_pfn;
+}
+#endif
+
+#ifdef CONFIG_MMU
+
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ return 0;
+}
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+ return 0;
+}
+#ifndef __HAVE_ARCH_PMD_WRITE
+static inline int pmd_write(pmd_t pmd)
+{
+ BUG();
+ return 0;
+}
+#endif /* __HAVE_ARCH_PMD_WRITE */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifndef pmd_read_atomic
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+ /*
+ * Depend on compiler for an atomic pmd read. NOTE: this is
+ * only going to work, if the pmdval_t isn't larger than
+ * an unsigned long.
+ */
+ return *pmdp;
+}
+#endif
+
+#ifndef pmd_move_must_withdraw
+static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
+ spinlock_t *old_pmd_ptl)
+{
+ /*
+ * With split pmd lock we also need to move preallocated
+ * PTE page table if new_pmd is on different PMD page table.
+ */
+ return new_pmd_ptl != old_pmd_ptl;
+}
+#endif
+
+/*
+ * This function is meant to be used by sites walking pagetables with
+ * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
+ * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
+ * into a null pmd and the transhuge page fault can convert a null pmd
+ * into an hugepmd or into a regular pmd (if the hugepage allocation
+ * fails). While holding the mmap_sem in read mode the pmd becomes
+ * stable and stops changing under us only if it's not null and not a
+ * transhuge pmd. When those races occurs and this function makes a
+ * difference vs the standard pmd_none_or_clear_bad, the result is
+ * undefined so behaving like if the pmd was none is safe (because it
+ * can return none anyway). The compiler level barrier() is critically
+ * important to compute the two checks atomically on the same pmdval.
+ *
+ * For 32bit kernels with a 64bit large pmd_t this automatically takes
+ * care of reading the pmd atomically to avoid SMP race conditions
+ * against pmd_populate() when the mmap_sem is hold for reading by the
+ * caller (a special atomic read not done by "gcc" as in the generic
+ * version above, is also needed when THP is disabled because the page
+ * fault can populate the pmd from under us).
+ */
+static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
+{
+ pmd_t pmdval = pmd_read_atomic(pmd);
+ /*
+ * The barrier will stabilize the pmdval in a register or on
+ * the stack so that it will stop changing under the code.
+ *
+ * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
+ * pmd_read_atomic is allowed to return a not atomic pmdval
+ * (for example pointing to an hugepage that has never been
+ * mapped in the pmd). The below checks will only care about
+ * the low part of the pmd with 32bit PAE x86 anyway, with the
+ * exception of pmd_none(). So the important thing is that if
+ * the low part of the pmd is found null, the high part will
+ * be also null or the pmd_none() check below would be
+ * confused.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ barrier();
+#endif
+ if (pmd_none(pmdval) || pmd_trans_huge(pmdval))
+ return 1;
+ if (unlikely(pmd_bad(pmdval))) {
+ pmd_clear_bad(pmd);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * This is a noop if Transparent Hugepage Support is not built into
+ * the kernel. Otherwise it is equivalent to
+ * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
+ * places that already verified the pmd is not none and they want to
+ * walk ptes while holding the mmap sem in read mode (write mode don't
+ * need this). If THP is not enabled, the pmd can't go away under the
+ * code even if MADV_DONTNEED runs, but if THP is enabled we need to
+ * run a pmd_trans_unstable before walking the ptes after
+ * split_huge_page_pmd returns (because it may have run when the pmd
+ * become null, but then a page fault can map in a THP and not a
+ * regular page).
+ */
+static inline int pmd_trans_unstable(pmd_t *pmd)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return pmd_none_or_trans_huge_or_clear_bad(pmd);
+#else
+ return 0;
+#endif
+}
+
+#ifdef CONFIG_NUMA_BALANCING
+#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
+/*
+ * _PAGE_NUMA works identical to _PAGE_PROTNONE (it's actually the
+ * same bit too). It's set only when _PAGE_PRESET is not set and it's
+ * never set if _PAGE_PRESENT is set.
+ *
+ * pte/pmd_present() returns true if pte/pmd_numa returns true. Page
+ * fault triggers on those regions if pte/pmd_numa returns true
+ * (because _PAGE_PRESENT is not set).
+ */
+#ifndef pte_numa
+static inline int pte_numa(pte_t pte)
+{
+ return (pte_flags(pte) &
+ (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA;
+}
+#endif
+
+#ifndef pmd_numa
+static inline int pmd_numa(pmd_t pmd)
+{
+ return (pmd_flags(pmd) &
+ (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA;
+}
+#endif
+
+/*
+ * pte/pmd_mknuma sets the _PAGE_ACCESSED bitflag automatically
+ * because they're called by the NUMA hinting minor page fault. If we
+ * wouldn't set the _PAGE_ACCESSED bitflag here, the TLB miss handler
+ * would be forced to set it later while filling the TLB after we
+ * return to userland. That would trigger a second write to memory
+ * that we optimize away by setting _PAGE_ACCESSED here.
+ */
+#ifndef pte_mknonnuma
+static inline pte_t pte_mknonnuma(pte_t pte)
+{
+ pteval_t val = pte_val(pte);
+
+ val &= ~_PAGE_NUMA;
+ val |= (_PAGE_PRESENT|_PAGE_ACCESSED);
+ return __pte(val);
+}
+#endif
+
+#ifndef pmd_mknonnuma
+static inline pmd_t pmd_mknonnuma(pmd_t pmd)
+{
+ pmdval_t val = pmd_val(pmd);
+
+ val &= ~_PAGE_NUMA;
+ val |= (_PAGE_PRESENT|_PAGE_ACCESSED);
+
+ return __pmd(val);
+}
+#endif
+
+#ifndef pte_mknuma
+static inline pte_t pte_mknuma(pte_t pte)
+{
+ pteval_t val = pte_val(pte);
+
+ val &= ~_PAGE_PRESENT;
+ val |= _PAGE_NUMA;
+
+ return __pte(val);
+}
+#endif
+
+#ifndef ptep_set_numa
+static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_t ptent = *ptep;
+
+ ptent = pte_mknuma(ptent);
+ set_pte_at(mm, addr, ptep, ptent);
+ return;
+}
+#endif
+
+#ifndef pmd_mknuma
+static inline pmd_t pmd_mknuma(pmd_t pmd)
+{
+ pmdval_t val = pmd_val(pmd);
+
+ val &= ~_PAGE_PRESENT;
+ val |= _PAGE_NUMA;
+
+ return __pmd(val);
+}
+#endif
+
+#ifndef pmdp_set_numa
+static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+
+ pmd = pmd_mknuma(pmd);
+ set_pmd_at(mm, addr, pmdp, pmd);
+ return;
+}
+#endif
+#else
+extern int pte_numa(pte_t pte);
+extern int pmd_numa(pmd_t pmd);
+extern pte_t pte_mknonnuma(pte_t pte);
+extern pmd_t pmd_mknonnuma(pmd_t pmd);
+extern pte_t pte_mknuma(pte_t pte);
+extern pmd_t pmd_mknuma(pmd_t pmd);
+extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp);
+#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
+#else
+static inline int pmd_numa(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline int pte_numa(pte_t pte)
+{
+ return 0;
+}
+
+static inline pte_t pte_mknonnuma(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_mknonnuma(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline pte_t pte_mknuma(pte_t pte)
+{
+ return pte;
+}
+
+static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ return;
+}
+
+
+static inline pmd_t pmd_mknuma(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp)
+{
+ return ;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#endif /* CONFIG_MMU */
+
#endif /* !__ASSEMBLY__ */
+#ifndef io_remap_pfn_range
+#define io_remap_pfn_range remap_pfn_range
+#endif
+
#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/asm-generic/poll.h b/include/asm-generic/poll.h
deleted file mode 100644
index 44bce836d35..00000000000
--- a/include/asm-generic/poll.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef __ASM_GENERIC_POLL_H
-#define __ASM_GENERIC_POLL_H
-
-/* These are specified by iBCS2 */
-#define POLLIN 0x0001
-#define POLLPRI 0x0002
-#define POLLOUT 0x0004
-#define POLLERR 0x0008
-#define POLLHUP 0x0010
-#define POLLNVAL 0x0020
-
-/* The rest seem to be more-or-less nonstandard. Check them! */
-#define POLLRDNORM 0x0040
-#define POLLRDBAND 0x0080
-#ifndef POLLWRNORM
-#define POLLWRNORM 0x0100
-#endif
-#ifndef POLLWRBAND
-#define POLLWRBAND 0x0200
-#endif
-#ifndef POLLMSG
-#define POLLMSG 0x0400
-#endif
-#ifndef POLLREMOVE
-#define POLLREMOVE 0x1000
-#endif
-#ifndef POLLRDHUP
-#define POLLRDHUP 0x2000
-#endif
-
-struct pollfd {
- int fd;
- short events;
- short revents;
-};
-
-#endif /* __ASM_GENERIC_POLL_H */
diff --git a/include/asm-generic/posix_types.h b/include/asm-generic/posix_types.h
deleted file mode 100644
index 3dab00860e7..00000000000
--- a/include/asm-generic/posix_types.h
+++ /dev/null
@@ -1,165 +0,0 @@
-#ifndef __ASM_GENERIC_POSIX_TYPES_H
-#define __ASM_GENERIC_POSIX_TYPES_H
-
-#include <asm/bitsperlong.h>
-/*
- * This file is generally used by user-level software, so you need to
- * be a little careful about namespace pollution etc.
- *
- * First the types that are often defined in different ways across
- * architectures, so that you can override them.
- */
-
-#ifndef __kernel_ino_t
-typedef unsigned long __kernel_ino_t;
-#endif
-
-#ifndef __kernel_mode_t
-typedef unsigned int __kernel_mode_t;
-#endif
-
-#ifndef __kernel_nlink_t
-typedef unsigned long __kernel_nlink_t;
-#endif
-
-#ifndef __kernel_pid_t
-typedef int __kernel_pid_t;
-#endif
-
-#ifndef __kernel_ipc_pid_t
-typedef int __kernel_ipc_pid_t;
-#endif
-
-#ifndef __kernel_uid_t
-typedef unsigned int __kernel_uid_t;
-typedef unsigned int __kernel_gid_t;
-#endif
-
-#ifndef __kernel_suseconds_t
-typedef long __kernel_suseconds_t;
-#endif
-
-#ifndef __kernel_daddr_t
-typedef int __kernel_daddr_t;
-#endif
-
-#ifndef __kernel_uid32_t
-typedef __kernel_uid_t __kernel_uid32_t;
-typedef __kernel_gid_t __kernel_gid32_t;
-#endif
-
-#ifndef __kernel_old_uid_t
-typedef __kernel_uid_t __kernel_old_uid_t;
-typedef __kernel_gid_t __kernel_old_gid_t;
-#endif
-
-#ifndef __kernel_old_dev_t
-typedef unsigned int __kernel_old_dev_t;
-#endif
-
-/*
- * Most 32 bit architectures use "unsigned int" size_t,
- * and all 64 bit architectures use "unsigned long" size_t.
- */
-#ifndef __kernel_size_t
-#if __BITS_PER_LONG != 64
-typedef unsigned int __kernel_size_t;
-typedef int __kernel_ssize_t;
-typedef int __kernel_ptrdiff_t;
-#else
-typedef unsigned long __kernel_size_t;
-typedef long __kernel_ssize_t;
-typedef long __kernel_ptrdiff_t;
-#endif
-#endif
-
-/*
- * anything below here should be completely generic
- */
-typedef long __kernel_off_t;
-typedef long long __kernel_loff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-#ifdef __KERNEL__
-
-#undef __FD_SET
-static inline void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
-}
-
-#undef __FD_CLR
-static inline void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
-}
-
-#undef __FD_ISSET
-static inline int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
-{
- unsigned long __tmp = __fd / __NFDBITS;
- unsigned long __rem = __fd % __NFDBITS;
- return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant case (8 ints,
- * for a 256-bit fd_set)
- */
-#undef __FD_ZERO
-static inline void __FD_ZERO(__kernel_fd_set *__p)
-{
- unsigned long *__tmp = __p->fds_bits;
- int __i;
-
- if (__builtin_constant_p(__FDSET_LONGS)) {
- switch (__FDSET_LONGS) {
- case 16:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- __tmp[ 4] = 0; __tmp[ 5] = 0;
- __tmp[ 6] = 0; __tmp[ 7] = 0;
- __tmp[ 8] = 0; __tmp[ 9] = 0;
- __tmp[10] = 0; __tmp[11] = 0;
- __tmp[12] = 0; __tmp[13] = 0;
- __tmp[14] = 0; __tmp[15] = 0;
- return;
-
- case 8:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- __tmp[ 4] = 0; __tmp[ 5] = 0;
- __tmp[ 6] = 0; __tmp[ 7] = 0;
- return;
-
- case 4:
- __tmp[ 0] = 0; __tmp[ 1] = 0;
- __tmp[ 2] = 0; __tmp[ 3] = 0;
- return;
- }
- }
- __i = __FDSET_LONGS;
- while (__i) {
- __i--;
- *__tmp = 0;
- __tmp++;
- }
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* __ASM_GENERIC_POSIX_TYPES_H */
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
new file mode 100644
index 00000000000..1cd3f5d767a
--- /dev/null
+++ b/include/asm-generic/preempt.h
@@ -0,0 +1,92 @@
+#ifndef __ASM_PREEMPT_H
+#define __ASM_PREEMPT_H
+
+#include <linux/thread_info.h>
+
+#define PREEMPT_ENABLED (0)
+
+static __always_inline int preempt_count(void)
+{
+ return current_thread_info()->preempt_count;
+}
+
+static __always_inline int *preempt_count_ptr(void)
+{
+ return &current_thread_info()->preempt_count;
+}
+
+static __always_inline void preempt_count_set(int pc)
+{
+ *preempt_count_ptr() = pc;
+}
+
+/*
+ * must be macros to avoid header recursion hell
+ */
+#define task_preempt_count(p) \
+ (task_thread_info(p)->preempt_count & ~PREEMPT_NEED_RESCHED)
+
+#define init_task_preempt_count(p) do { \
+ task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
+} while (0)
+
+#define init_idle_preempt_count(p, cpu) do { \
+ task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
+} while (0)
+
+static __always_inline void set_preempt_need_resched(void)
+{
+}
+
+static __always_inline void clear_preempt_need_resched(void)
+{
+}
+
+static __always_inline bool test_preempt_need_resched(void)
+{
+ return false;
+}
+
+/*
+ * The various preempt_count add/sub methods
+ */
+
+static __always_inline void __preempt_count_add(int val)
+{
+ *preempt_count_ptr() += val;
+}
+
+static __always_inline void __preempt_count_sub(int val)
+{
+ *preempt_count_ptr() -= val;
+}
+
+static __always_inline bool __preempt_count_dec_and_test(void)
+{
+ /*
+ * Because of load-store architectures cannot do per-cpu atomic
+ * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
+ * lost.
+ */
+ return !--*preempt_count_ptr() && tif_need_resched();
+}
+
+/*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+static __always_inline bool should_resched(void)
+{
+ return unlikely(!preempt_count() && tif_need_resched());
+}
+
+#ifdef CONFIG_PREEMPT
+extern asmlinkage void preempt_schedule(void);
+#define __preempt_schedule() preempt_schedule()
+
+#ifdef CONFIG_CONTEXT_TRACKING
+extern asmlinkage void preempt_schedule_context(void);
+#define __preempt_schedule_context() preempt_schedule_context()
+#endif
+#endif /* CONFIG_PREEMPT */
+
+#endif /* __ASM_PREEMPT_H */
diff --git a/include/asm-generic/ptrace.h b/include/asm-generic/ptrace.h
new file mode 100644
index 00000000000..82e674f6b33
--- /dev/null
+++ b/include/asm-generic/ptrace.h
@@ -0,0 +1,74 @@
+/*
+ * Common low level (register) ptrace helpers
+ *
+ * Copyright 2004-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __ASM_GENERIC_PTRACE_H__
+#define __ASM_GENERIC_PTRACE_H__
+
+#ifndef __ASSEMBLY__
+
+/* Helpers for working with the instruction pointer */
+#ifndef GET_IP
+#define GET_IP(regs) ((regs)->pc)
+#endif
+#ifndef SET_IP
+#define SET_IP(regs, val) (GET_IP(regs) = (val))
+#endif
+
+static inline unsigned long instruction_pointer(struct pt_regs *regs)
+{
+ return GET_IP(regs);
+}
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ SET_IP(regs, val);
+}
+
+#ifndef profile_pc
+#define profile_pc(regs) instruction_pointer(regs)
+#endif
+
+/* Helpers for working with the user stack pointer */
+#ifndef GET_USP
+#define GET_USP(regs) ((regs)->usp)
+#endif
+#ifndef SET_USP
+#define SET_USP(regs, val) (GET_USP(regs) = (val))
+#endif
+
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+ return GET_USP(regs);
+}
+static inline void user_stack_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ SET_USP(regs, val);
+}
+
+/* Helpers for working with the frame pointer */
+#ifndef GET_FP
+#define GET_FP(regs) ((regs)->fp)
+#endif
+#ifndef SET_FP
+#define SET_FP(regs, val) (GET_FP(regs) = (val))
+#endif
+
+static inline unsigned long frame_pointer(struct pt_regs *regs)
+{
+ return GET_FP(regs);
+}
+static inline void frame_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ SET_FP(regs, val);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
new file mode 100644
index 00000000000..6383d54bf98
--- /dev/null
+++ b/include/asm-generic/qrwlock.h
@@ -0,0 +1,166 @@
+/*
+ * Queue read/write lock
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
+ *
+ * Authors: Waiman Long <waiman.long@hp.com>
+ */
+#ifndef __ASM_GENERIC_QRWLOCK_H
+#define __ASM_GENERIC_QRWLOCK_H
+
+#include <linux/atomic.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
+#include <asm-generic/qrwlock_types.h>
+
+/*
+ * Writer states & reader shift and bias
+ */
+#define _QW_WAITING 1 /* A writer is waiting */
+#define _QW_LOCKED 0xff /* A writer holds the lock */
+#define _QW_WMASK 0xff /* Writer mask */
+#define _QR_SHIFT 8 /* Reader count shift */
+#define _QR_BIAS (1U << _QR_SHIFT)
+
+/*
+ * External function declarations
+ */
+extern void queue_read_lock_slowpath(struct qrwlock *lock);
+extern void queue_write_lock_slowpath(struct qrwlock *lock);
+
+/**
+ * queue_read_can_lock- would read_trylock() succeed?
+ * @lock: Pointer to queue rwlock structure
+ */
+static inline int queue_read_can_lock(struct qrwlock *lock)
+{
+ return !(atomic_read(&lock->cnts) & _QW_WMASK);
+}
+
+/**
+ * queue_write_can_lock- would write_trylock() succeed?
+ * @lock: Pointer to queue rwlock structure
+ */
+static inline int queue_write_can_lock(struct qrwlock *lock)
+{
+ return !atomic_read(&lock->cnts);
+}
+
+/**
+ * queue_read_trylock - try to acquire read lock of a queue rwlock
+ * @lock : Pointer to queue rwlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static inline int queue_read_trylock(struct qrwlock *lock)
+{
+ u32 cnts;
+
+ cnts = atomic_read(&lock->cnts);
+ if (likely(!(cnts & _QW_WMASK))) {
+ cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts);
+ if (likely(!(cnts & _QW_WMASK)))
+ return 1;
+ atomic_sub(_QR_BIAS, &lock->cnts);
+ }
+ return 0;
+}
+
+/**
+ * queue_write_trylock - try to acquire write lock of a queue rwlock
+ * @lock : Pointer to queue rwlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static inline int queue_write_trylock(struct qrwlock *lock)
+{
+ u32 cnts;
+
+ cnts = atomic_read(&lock->cnts);
+ if (unlikely(cnts))
+ return 0;
+
+ return likely(atomic_cmpxchg(&lock->cnts,
+ cnts, cnts | _QW_LOCKED) == cnts);
+}
+/**
+ * queue_read_lock - acquire read lock of a queue rwlock
+ * @lock: Pointer to queue rwlock structure
+ */
+static inline void queue_read_lock(struct qrwlock *lock)
+{
+ u32 cnts;
+
+ cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
+ if (likely(!(cnts & _QW_WMASK)))
+ return;
+
+ /* The slowpath will decrement the reader count, if necessary. */
+ queue_read_lock_slowpath(lock);
+}
+
+/**
+ * queue_write_lock - acquire write lock of a queue rwlock
+ * @lock : Pointer to queue rwlock structure
+ */
+static inline void queue_write_lock(struct qrwlock *lock)
+{
+ /* Optimize for the unfair lock case where the fair flag is 0. */
+ if (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0)
+ return;
+
+ queue_write_lock_slowpath(lock);
+}
+
+/**
+ * queue_read_unlock - release read lock of a queue rwlock
+ * @lock : Pointer to queue rwlock structure
+ */
+static inline void queue_read_unlock(struct qrwlock *lock)
+{
+ /*
+ * Atomically decrement the reader count
+ */
+ smp_mb__before_atomic();
+ atomic_sub(_QR_BIAS, &lock->cnts);
+}
+
+#ifndef queue_write_unlock
+/**
+ * queue_write_unlock - release write lock of a queue rwlock
+ * @lock : Pointer to queue rwlock structure
+ */
+static inline void queue_write_unlock(struct qrwlock *lock)
+{
+ /*
+ * If the writer field is atomic, it can be cleared directly.
+ * Otherwise, an atomic subtraction will be used to clear it.
+ */
+ smp_mb__before_atomic();
+ atomic_sub(_QW_LOCKED, &lock->cnts);
+}
+#endif
+
+/*
+ * Remapping rwlock architecture specific functions to the corresponding
+ * queue rwlock functions.
+ */
+#define arch_read_can_lock(l) queue_read_can_lock(l)
+#define arch_write_can_lock(l) queue_write_can_lock(l)
+#define arch_read_lock(l) queue_read_lock(l)
+#define arch_write_lock(l) queue_write_lock(l)
+#define arch_read_trylock(l) queue_read_trylock(l)
+#define arch_write_trylock(l) queue_write_trylock(l)
+#define arch_read_unlock(l) queue_read_unlock(l)
+#define arch_write_unlock(l) queue_write_unlock(l)
+
+#endif /* __ASM_GENERIC_QRWLOCK_H */
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
new file mode 100644
index 00000000000..4d76f24df51
--- /dev/null
+++ b/include/asm-generic/qrwlock_types.h
@@ -0,0 +1,21 @@
+#ifndef __ASM_GENERIC_QRWLOCK_TYPES_H
+#define __ASM_GENERIC_QRWLOCK_TYPES_H
+
+#include <linux/types.h>
+#include <asm/spinlock_types.h>
+
+/*
+ * The queue read/write lock data structure
+ */
+
+typedef struct qrwlock {
+ atomic_t cnts;
+ arch_spinlock_t lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { \
+ .cnts = ATOMIC_INIT(0), \
+ .lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+}
+
+#endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */
diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
index 587566f95f6..5e752b95905 100644
--- a/include/asm-generic/resource.h
+++ b/include/asm-generic/resource.h
@@ -1,70 +1,8 @@
#ifndef _ASM_GENERIC_RESOURCE_H
#define _ASM_GENERIC_RESOURCE_H
-/*
- * Resource limit IDs
- *
- * ( Compatibility detail: there are architectures that have
- * a different rlimit ID order in the 5-9 range and want
- * to keep that order for binary compatibility. The reasons
- * are historic and all new rlimits are identical across all
- * arches. If an arch has such special order for some rlimits
- * then it defines them prior including asm-generic/resource.h. )
- */
-
-#define RLIMIT_CPU 0 /* CPU time in sec */
-#define RLIMIT_FSIZE 1 /* Maximum filesize */
-#define RLIMIT_DATA 2 /* max data size */
-#define RLIMIT_STACK 3 /* max stack size */
-#define RLIMIT_CORE 4 /* max core file size */
-
-#ifndef RLIMIT_RSS
-# define RLIMIT_RSS 5 /* max resident set size */
-#endif
-
-#ifndef RLIMIT_NPROC
-# define RLIMIT_NPROC 6 /* max number of processes */
-#endif
-
-#ifndef RLIMIT_NOFILE
-# define RLIMIT_NOFILE 7 /* max number of open files */
-#endif
+#include <uapi/asm-generic/resource.h>
-#ifndef RLIMIT_MEMLOCK
-# define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
-#endif
-
-#ifndef RLIMIT_AS
-# define RLIMIT_AS 9 /* address space limit */
-#endif
-
-#define RLIMIT_LOCKS 10 /* maximum file locks held */
-#define RLIMIT_SIGPENDING 11 /* max number of pending signals */
-#define RLIMIT_MSGQUEUE 12 /* maximum bytes in POSIX mqueues */
-#define RLIMIT_NICE 13 /* max nice prio allowed to raise to
- 0-39 for nice level 19 .. -20 */
-#define RLIMIT_RTPRIO 14 /* maximum realtime priority */
-#define RLIMIT_RTTIME 15 /* timeout for RT tasks in us */
-#define RLIM_NLIMITS 16
-
-/*
- * SuS says limits have to be unsigned.
- * Which makes a ton more sense anyway.
- *
- * Some architectures override this (for compatibility reasons):
- */
-#ifndef RLIM_INFINITY
-# define RLIM_INFINITY (~0UL)
-#endif
-
-/*
- * RLIMIT_STACK default maximum - some architectures override it:
- */
-#ifndef _STK_LIM_MAX
-# define _STK_LIM_MAX RLIM_INFINITY
-#endif
-
-#ifdef __KERNEL__
/*
* boot-time rlimit defaults for the init task:
@@ -74,11 +12,11 @@
[RLIMIT_CPU] = { RLIM_INFINITY, RLIM_INFINITY }, \
[RLIMIT_FSIZE] = { RLIM_INFINITY, RLIM_INFINITY }, \
[RLIMIT_DATA] = { RLIM_INFINITY, RLIM_INFINITY }, \
- [RLIMIT_STACK] = { _STK_LIM, _STK_LIM_MAX }, \
+ [RLIMIT_STACK] = { _STK_LIM, RLIM_INFINITY }, \
[RLIMIT_CORE] = { 0, RLIM_INFINITY }, \
[RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \
[RLIMIT_NPROC] = { 0, 0 }, \
- [RLIMIT_NOFILE] = { INR_OPEN, INR_OPEN }, \
+ [RLIMIT_NOFILE] = { INR_OPEN_CUR, INR_OPEN_MAX }, \
[RLIMIT_MEMLOCK] = { MLOCK_LIMIT, MLOCK_LIMIT }, \
[RLIMIT_AS] = { RLIM_INFINITY, RLIM_INFINITY }, \
[RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \
@@ -89,6 +27,4 @@
[RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
}
-#endif /* __KERNEL__ */
-
#endif
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
new file mode 100644
index 00000000000..d48bf5a95cc
--- /dev/null
+++ b/include/asm-generic/rwsem.h
@@ -0,0 +1,132 @@
+#ifndef _ASM_GENERIC_RWSEM_H
+#define _ASM_GENERIC_RWSEM_H
+
+#ifndef _LINUX_RWSEM_H
+#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
+#endif
+
+#ifdef __KERNEL__
+
+/*
+ * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
+ * Adapted largely from include/asm-i386/rwsem.h
+ * by Paul Mackerras <paulus@samba.org>.
+ */
+
+/*
+ * the semaphore definition
+ */
+#ifdef CONFIG_64BIT
+# define RWSEM_ACTIVE_MASK 0xffffffffL
+#else
+# define RWSEM_ACTIVE_MASK 0x0000ffffL
+#endif
+
+#define RWSEM_UNLOCKED_VALUE 0x00000000L
+#define RWSEM_ACTIVE_BIAS 0x00000001L
+#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
+#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+
+/*
+ * lock for reading
+ */
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
+ rwsem_down_read_failed(sem);
+}
+
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+ long tmp;
+
+ while ((tmp = sem->count) >= 0) {
+ if (tmp == cmpxchg(&sem->count, tmp,
+ tmp + RWSEM_ACTIVE_READ_BIAS)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * lock for writing
+ */
+static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+{
+ long tmp;
+
+ tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_long_t *)&sem->count);
+ if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+ rwsem_down_write_failed(sem);
+}
+
+static inline void __down_write(struct rw_semaphore *sem)
+{
+ __down_write_nested(sem, 0);
+}
+
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+ long tmp;
+
+ tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+ RWSEM_ACTIVE_WRITE_BIAS);
+ return tmp == RWSEM_UNLOCKED_VALUE;
+}
+
+/*
+ * unlock after reading
+ */
+static inline void __up_read(struct rw_semaphore *sem)
+{
+ long tmp;
+
+ tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
+ if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
+ rwsem_wake(sem);
+}
+
+/*
+ * unlock after writing
+ */
+static inline void __up_write(struct rw_semaphore *sem)
+{
+ if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_long_t *)&sem->count) < 0))
+ rwsem_wake(sem);
+}
+
+/*
+ * implement atomic add functionality
+ */
+static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
+{
+ atomic_long_add(delta, (atomic_long_t *)&sem->count);
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+ long tmp;
+
+ tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
+ (atomic_long_t *)&sem->count);
+ if (tmp < 0)
+ rwsem_downgrade_wake(sem);
+}
+
+/*
+ * implement exchange and add functionality
+ */
+static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
+{
+ return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_GENERIC_RWSEM_H */
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index b3bfabc258f..f1a24b5c3b9 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -3,6 +3,26 @@
/* References to section boundaries */
+/*
+ * Usage guidelines:
+ * _text, _data: architecture specific, don't use them in arch-independent code
+ * [_stext, _etext]: contains .text.* sections, may also contain .rodata.*
+ * and/or .init.* sections
+ * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.*
+ * and/or .init.* sections.
+ * [__start_rodata, __end_rodata]: contains .rodata.* sections
+ * [__init_begin, __init_end]: contains .init.* sections, but .init.text.*
+ * may be out of this range on some architectures.
+ * [_sinittext, _einittext]: contains .init.text.* sections
+ * [__bss_start, __bss_stop]: contains BSS sections
+ *
+ * Following global variables are optional and may be unavailable on some
+ * architectures and/or kernel configurations.
+ * _text, _data
+ * __kprobes_text_start, __kprobes_text_end
+ * __entry_text_start, __entry_text_end
+ * __ctors_start, __ctors_end
+ */
extern char _text[], _stext[], _etext[];
extern char _data[], _sdata[], _edata[];
extern char __bss_start[], __bss_stop[];
@@ -11,7 +31,7 @@ extern char _sinittext[], _einittext[];
extern char _end[];
extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
extern char __kprobes_text_start[], __kprobes_text_end[];
-extern char __initdata_begin[], __initdata_end[];
+extern char __entry_text_start[], __entry_text_end[];
extern char __start_rodata[], __end_rodata[];
/* Start and end of .ctors section - used for constructor calls. */
diff --git a/include/asm-generic/sembuf.h b/include/asm-generic/sembuf.h
deleted file mode 100644
index 4cb2c13e509..00000000000
--- a/include/asm-generic/sembuf.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef __ASM_GENERIC_SEMBUF_H
-#define __ASM_GENERIC_SEMBUF_H
-
-#include <asm/bitsperlong.h>
-
-/*
- * The semid64_ds structure for x86 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * semid64_ds was originally meant to be architecture specific, but
- * everyone just ended up making identical copies without specific
- * optimizations, so we may just as well all use the same one.
- *
- * 64 bit architectures typically define a 64 bit __kernel_time_t,
- * so they do not need the first two padding words.
- * On big-endian systems, the padding is in the wrong place.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-struct semid64_ds {
- struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
- __kernel_time_t sem_otime; /* last semop time */
-#if __BITS_PER_LONG != 64
- unsigned long __unused1;
-#endif
- __kernel_time_t sem_ctime; /* last change time */
-#if __BITS_PER_LONG != 64
- unsigned long __unused2;
-#endif
- unsigned long sem_nsems; /* no. of semaphores in array */
- unsigned long __unused3;
- unsigned long __unused4;
-};
-
-#endif /* __ASM_GENERIC_SEMBUF_H */
diff --git a/include/asm-generic/setup.h b/include/asm-generic/setup.h
deleted file mode 100644
index 6fc26a51003..00000000000
--- a/include/asm-generic/setup.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_GENERIC_SETUP_H
-#define __ASM_GENERIC_SETUP_H
-
-#define COMMAND_LINE_SIZE 512
-
-#endif /* __ASM_GENERIC_SETUP_H */
diff --git a/include/asm-generic/shmbuf.h b/include/asm-generic/shmbuf.h
deleted file mode 100644
index 5768fa60ac8..00000000000
--- a/include/asm-generic/shmbuf.h
+++ /dev/null
@@ -1,59 +0,0 @@
-#ifndef __ASM_GENERIC_SHMBUF_H
-#define __ASM_GENERIC_SHMBUF_H
-
-#include <asm/bitsperlong.h>
-
-/*
- * The shmid64_ds structure for x86 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * shmid64_ds was originally meant to be architecture specific, but
- * everyone just ended up making identical copies without specific
- * optimizations, so we may just as well all use the same one.
- *
- * 64 bit architectures typically define a 64 bit __kernel_time_t,
- * so they do not need the first two padding words.
- * On big-endian systems, the padding is in the wrong place.
- *
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct shmid64_ds {
- struct ipc64_perm shm_perm; /* operation perms */
- size_t shm_segsz; /* size of segment (bytes) */
- __kernel_time_t shm_atime; /* last attach time */
-#if __BITS_PER_LONG != 64
- unsigned long __unused1;
-#endif
- __kernel_time_t shm_dtime; /* last detach time */
-#if __BITS_PER_LONG != 64
- unsigned long __unused2;
-#endif
- __kernel_time_t shm_ctime; /* last change time */
-#if __BITS_PER_LONG != 64
- unsigned long __unused3;
-#endif
- __kernel_pid_t shm_cpid; /* pid of creator */
- __kernel_pid_t shm_lpid; /* pid of last operator */
- unsigned long shm_nattch; /* no. of current attaches */
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-struct shminfo64 {
- unsigned long shmmax;
- unsigned long shmmin;
- unsigned long shmmni;
- unsigned long shmseg;
- unsigned long shmall;
- unsigned long __unused1;
- unsigned long __unused2;
- unsigned long __unused3;
- unsigned long __unused4;
-};
-
-#endif /* __ASM_GENERIC_SHMBUF_H */
diff --git a/include/asm-generic/shmparam.h b/include/asm-generic/shmparam.h
deleted file mode 100644
index 51a3852de73..00000000000
--- a/include/asm-generic/shmparam.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_GENERIC_SHMPARAM_H
-#define __ASM_GENERIC_SHMPARAM_H
-
-#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
-
-#endif /* _ASM_GENERIC_SHMPARAM_H */
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
index 942d30b5aab..3d1a3af5cf5 100644
--- a/include/asm-generic/siginfo.h
+++ b/include/asm-generic/siginfo.h
@@ -1,123 +1,8 @@
#ifndef _ASM_GENERIC_SIGINFO_H
#define _ASM_GENERIC_SIGINFO_H
-#include <linux/compiler.h>
-#include <linux/types.h>
+#include <uapi/asm-generic/siginfo.h>
-typedef union sigval {
- int sival_int;
- void __user *sival_ptr;
-} sigval_t;
-
-/*
- * This is the size (including padding) of the part of the
- * struct siginfo that is before the union.
- */
-#ifndef __ARCH_SI_PREAMBLE_SIZE
-#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int))
-#endif
-
-#define SI_MAX_SIZE 128
-#ifndef SI_PAD_SIZE
-#define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int))
-#endif
-
-#ifndef __ARCH_SI_UID_T
-#define __ARCH_SI_UID_T __kernel_uid32_t
-#endif
-
-/*
- * The default "si_band" type is "long", as specified by POSIX.
- * However, some architectures want to override this to "int"
- * for historical compatibility reasons, so we allow that.
- */
-#ifndef __ARCH_SI_BAND_T
-#define __ARCH_SI_BAND_T long
-#endif
-
-#ifndef HAVE_ARCH_SIGINFO_T
-
-typedef struct siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[SI_PAD_SIZE];
-
- /* kill() */
- struct {
- __kernel_pid_t _pid; /* sender's pid */
- __ARCH_SI_UID_T _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- __kernel_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
- sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- __kernel_pid_t _pid; /* sender's pid */
- __ARCH_SI_UID_T _uid; /* sender's uid */
- sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- __kernel_pid_t _pid; /* which child */
- __ARCH_SI_UID_T _uid; /* sender's uid */
- int _status; /* exit code */
- __kernel_clock_t _utime;
- __kernel_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- void __user *_addr; /* faulting insn/memory ref. */
-#ifdef __ARCH_SI_TRAPNO
- int _trapno; /* TRAP # which caused the signal */
-#endif
- short _addr_lsb; /* LSB of the reported address */
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
- } _sifields;
-} siginfo_t;
-
-#endif
-
-/*
- * How these fields are to be accessed.
- */
-#define si_pid _sifields._kill._pid
-#define si_uid _sifields._kill._uid
-#define si_tid _sifields._timer._tid
-#define si_overrun _sifields._timer._overrun
-#define si_sys_private _sifields._timer._sys_private
-#define si_status _sifields._sigchld._status
-#define si_utime _sifields._sigchld._utime
-#define si_stime _sifields._sigchld._stime
-#define si_value _sifields._rt._sigval
-#define si_int _sifields._rt._sigval.sival_int
-#define si_ptr _sifields._rt._sigval.sival_ptr
-#define si_addr _sifields._sigfault._addr
-#ifdef __ARCH_SI_TRAPNO
-#define si_trapno _sifields._sigfault._trapno
-#endif
-#define si_addr_lsb _sifields._sigfault._addr_lsb
-#define si_band _sifields._sigpoll._band
-#define si_fd _sifields._sigpoll._fd
-
-#ifdef __KERNEL__
#define __SI_MASK 0xffff0000u
#define __SI_KILL (0 << 16)
#define __SI_TIMER (1 << 16)
@@ -126,156 +11,8 @@ typedef struct siginfo {
#define __SI_CHLD (4 << 16)
#define __SI_RT (5 << 16)
#define __SI_MESGQ (6 << 16)
+#define __SI_SYS (7 << 16)
#define __SI_CODE(T,N) ((T) | ((N) & 0xffff))
-#else
-#define __SI_KILL 0
-#define __SI_TIMER 0
-#define __SI_POLL 0
-#define __SI_FAULT 0
-#define __SI_CHLD 0
-#define __SI_RT 0
-#define __SI_MESGQ 0
-#define __SI_CODE(T,N) (N)
-#endif
-
-/*
- * si_code values
- * Digital reserves positive values for kernel-generated signals.
- */
-#define SI_USER 0 /* sent by kill, sigsend, raise */
-#define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
-#define SI_QUEUE -1 /* sent by sigqueue */
-#define SI_TIMER __SI_CODE(__SI_TIMER,-2) /* sent by timer expiration */
-#define SI_MESGQ __SI_CODE(__SI_MESGQ,-3) /* sent by real time mesq state change */
-#define SI_ASYNCIO -4 /* sent by AIO completion */
-#define SI_SIGIO -5 /* sent by queued SIGIO */
-#define SI_TKILL -6 /* sent by tkill system call */
-#define SI_DETHREAD -7 /* sent by execve() killing subsidiary threads */
-
-#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0)
-#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
-
-/*
- * SIGILL si_codes
- */
-#define ILL_ILLOPC (__SI_FAULT|1) /* illegal opcode */
-#define ILL_ILLOPN (__SI_FAULT|2) /* illegal operand */
-#define ILL_ILLADR (__SI_FAULT|3) /* illegal addressing mode */
-#define ILL_ILLTRP (__SI_FAULT|4) /* illegal trap */
-#define ILL_PRVOPC (__SI_FAULT|5) /* privileged opcode */
-#define ILL_PRVREG (__SI_FAULT|6) /* privileged register */
-#define ILL_COPROC (__SI_FAULT|7) /* coprocessor error */
-#define ILL_BADSTK (__SI_FAULT|8) /* internal stack error */
-#define NSIGILL 8
-
-/*
- * SIGFPE si_codes
- */
-#define FPE_INTDIV (__SI_FAULT|1) /* integer divide by zero */
-#define FPE_INTOVF (__SI_FAULT|2) /* integer overflow */
-#define FPE_FLTDIV (__SI_FAULT|3) /* floating point divide by zero */
-#define FPE_FLTOVF (__SI_FAULT|4) /* floating point overflow */
-#define FPE_FLTUND (__SI_FAULT|5) /* floating point underflow */
-#define FPE_FLTRES (__SI_FAULT|6) /* floating point inexact result */
-#define FPE_FLTINV (__SI_FAULT|7) /* floating point invalid operation */
-#define FPE_FLTSUB (__SI_FAULT|8) /* subscript out of range */
-#define NSIGFPE 8
-
-/*
- * SIGSEGV si_codes
- */
-#define SEGV_MAPERR (__SI_FAULT|1) /* address not mapped to object */
-#define SEGV_ACCERR (__SI_FAULT|2) /* invalid permissions for mapped object */
-#define NSIGSEGV 2
-
-/*
- * SIGBUS si_codes
- */
-#define BUS_ADRALN (__SI_FAULT|1) /* invalid address alignment */
-#define BUS_ADRERR (__SI_FAULT|2) /* non-existant physical address */
-#define BUS_OBJERR (__SI_FAULT|3) /* object specific hardware error */
-/* hardware memory error consumed on a machine check: action required */
-#define BUS_MCEERR_AR (__SI_FAULT|4)
-/* hardware memory error detected in process but not consumed: action optional*/
-#define BUS_MCEERR_AO (__SI_FAULT|5)
-#define NSIGBUS 5
-
-/*
- * SIGTRAP si_codes
- */
-#define TRAP_BRKPT (__SI_FAULT|1) /* process breakpoint */
-#define TRAP_TRACE (__SI_FAULT|2) /* process trace trap */
-#define TRAP_BRANCH (__SI_FAULT|3) /* process taken branch trap */
-#define TRAP_HWBKPT (__SI_FAULT|4) /* hardware breakpoint/watchpoint */
-#define NSIGTRAP 4
-
-/*
- * SIGCHLD si_codes
- */
-#define CLD_EXITED (__SI_CHLD|1) /* child has exited */
-#define CLD_KILLED (__SI_CHLD|2) /* child was killed */
-#define CLD_DUMPED (__SI_CHLD|3) /* child terminated abnormally */
-#define CLD_TRAPPED (__SI_CHLD|4) /* traced child has trapped */
-#define CLD_STOPPED (__SI_CHLD|5) /* child has stopped */
-#define CLD_CONTINUED (__SI_CHLD|6) /* stopped child has continued */
-#define NSIGCHLD 6
-
-/*
- * SIGPOLL si_codes
- */
-#define POLL_IN (__SI_POLL|1) /* data input available */
-#define POLL_OUT (__SI_POLL|2) /* output buffers available */
-#define POLL_MSG (__SI_POLL|3) /* input message available */
-#define POLL_ERR (__SI_POLL|4) /* i/o error */
-#define POLL_PRI (__SI_POLL|5) /* high priority input available */
-#define POLL_HUP (__SI_POLL|6) /* device disconnected */
-#define NSIGPOLL 6
-
-/*
- * sigevent definitions
- *
- * It seems likely that SIGEV_THREAD will have to be handled from
- * userspace, libpthread transmuting it to SIGEV_SIGNAL, which the
- * thread manager then catches and does the appropriate nonsense.
- * However, everything is written out here so as to not get lost.
- */
-#define SIGEV_SIGNAL 0 /* notify via signal */
-#define SIGEV_NONE 1 /* other notification: meaningless */
-#define SIGEV_THREAD 2 /* deliver via thread creation */
-#define SIGEV_THREAD_ID 4 /* deliver to thread */
-
-/*
- * This works because the alignment is ok on all current architectures
- * but we leave open this being overridden in the future
- */
-#ifndef __ARCH_SIGEV_PREAMBLE_SIZE
-#define __ARCH_SIGEV_PREAMBLE_SIZE (sizeof(int) * 2 + sizeof(sigval_t))
-#endif
-
-#define SIGEV_MAX_SIZE 64
-#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE - __ARCH_SIGEV_PREAMBLE_SIZE) \
- / sizeof(int))
-
-typedef struct sigevent {
- sigval_t sigev_value;
- int sigev_signo;
- int sigev_notify;
- union {
- int _pad[SIGEV_PAD_SIZE];
- int _tid;
-
- struct {
- void (*_function)(sigval_t);
- void *_attribute; /* really pthread_attr_t */
- } _sigev_thread;
- } _sigev_un;
-} sigevent_t;
-
-#define sigev_notify_function _sigev_un._sigev_thread._function
-#define sigev_notify_attributes _sigev_un._sigev_thread._attribute
-#define sigev_notify_thread_id _sigev_un._tid
-
-#ifdef __KERNEL__
struct siginfo;
void do_schedule_next_timer(struct siginfo *info);
@@ -295,8 +32,6 @@ static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
#endif
-extern int copy_siginfo_to_user(struct siginfo __user *to, struct siginfo *from);
-
-#endif /* __KERNEL__ */
+extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
#endif
diff --git a/include/asm-generic/signal-defs.h b/include/asm-generic/signal-defs.h
deleted file mode 100644
index 00f95df5429..00000000000
--- a/include/asm-generic/signal-defs.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef __ASM_GENERIC_SIGNAL_DEFS_H
-#define __ASM_GENERIC_SIGNAL_DEFS_H
-
-#include <linux/compiler.h>
-
-#ifndef SIG_BLOCK
-#define SIG_BLOCK 0 /* for blocking signals */
-#endif
-#ifndef SIG_UNBLOCK
-#define SIG_UNBLOCK 1 /* for unblocking signals */
-#endif
-#ifndef SIG_SETMASK
-#define SIG_SETMASK 2 /* for setting the signal mask */
-#endif
-
-#ifndef __ASSEMBLY__
-typedef void __signalfn_t(int);
-typedef __signalfn_t __user *__sighandler_t;
-
-typedef void __restorefn_t(void);
-typedef __restorefn_t __user *__sigrestore_t;
-
-#define SIG_DFL ((__force __sighandler_t)0) /* default signal handling */
-#define SIG_IGN ((__force __sighandler_t)1) /* ignore signal */
-#define SIG_ERR ((__force __sighandler_t)-1) /* error return from signal */
-#endif
-
-#endif /* __ASM_GENERIC_SIGNAL_DEFS_H */
diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h
index 555c0aee8a4..d840c90a157 100644
--- a/include/asm-generic/signal.h
+++ b/include/asm-generic/signal.h
@@ -1,131 +1,14 @@
#ifndef __ASM_GENERIC_SIGNAL_H
#define __ASM_GENERIC_SIGNAL_H
-#include <linux/types.h>
-
-#define _NSIG 64
-#define _NSIG_BPW __BITS_PER_LONG
-#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
-
-#define SIGHUP 1
-#define SIGINT 2
-#define SIGQUIT 3
-#define SIGILL 4
-#define SIGTRAP 5
-#define SIGABRT 6
-#define SIGIOT 6
-#define SIGBUS 7
-#define SIGFPE 8
-#define SIGKILL 9
-#define SIGUSR1 10
-#define SIGSEGV 11
-#define SIGUSR2 12
-#define SIGPIPE 13
-#define SIGALRM 14
-#define SIGTERM 15
-#define SIGSTKFLT 16
-#define SIGCHLD 17
-#define SIGCONT 18
-#define SIGSTOP 19
-#define SIGTSTP 20
-#define SIGTTIN 21
-#define SIGTTOU 22
-#define SIGURG 23
-#define SIGXCPU 24
-#define SIGXFSZ 25
-#define SIGVTALRM 26
-#define SIGPROF 27
-#define SIGWINCH 28
-#define SIGIO 29
-#define SIGPOLL SIGIO
-/*
-#define SIGLOST 29
-*/
-#define SIGPWR 30
-#define SIGSYS 31
-#define SIGUNUSED 31
-
-/* These should not be considered constants from userland. */
-#define SIGRTMIN 32
-#ifndef SIGRTMAX
-#define SIGRTMAX _NSIG
-#endif
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002
-#define SA_SIGINFO 0x00000004
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
-/*
- * New architectures should not define the obsolete
- * SA_RESTORER 0x04000000
- */
-
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
-#define MINSIGSTKSZ 2048
-#define SIGSTKSZ 8192
+#include <uapi/asm-generic/signal.h>
#ifndef __ASSEMBLY__
-typedef struct {
- unsigned long sig[_NSIG_WORDS];
-} sigset_t;
-
-/* not actually used, but required for linux/syscalls.h */
-typedef unsigned long old_sigset_t;
-
-#include <asm-generic/signal-defs.h>
-
-struct sigaction {
- __sighandler_t sa_handler;
- unsigned long sa_flags;
#ifdef SA_RESTORER
- __sigrestore_t sa_restorer;
#endif
- sigset_t sa_mask; /* mask last for extensibility */
-};
-
-struct k_sigaction {
- struct sigaction sa;
-};
-
-typedef struct sigaltstack {
- void __user *ss_sp;
- int ss_flags;
- size_t ss_size;
-} stack_t;
-
-#ifdef __KERNEL__
#include <asm/sigcontext.h>
#undef __HAVE_ARCH_SIG_BITOPS
-#define ptrace_signal_deliver(regs, cookie) do { } while (0)
-
-#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
-
#endif /* _ASM_GENERIC_SIGNAL_H */
diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h
new file mode 100644
index 00000000000..f57eb7b5c23
--- /dev/null
+++ b/include/asm-generic/simd.h
@@ -0,0 +1,14 @@
+
+#include <linux/hardirq.h>
+
+/*
+ * may_use_simd - whether it is allowable at this time to issue SIMD
+ * instructions or access the SIMD register file
+ *
+ * As architectures typically don't preserve the SIMD register file when
+ * taking an interrupt, !in_interrupt() should be a reasonable default.
+ */
+static __must_check inline bool may_use_simd(void)
+{
+ return !in_interrupt();
+}
diff --git a/include/asm-generic/sizes.h b/include/asm-generic/sizes.h
new file mode 100644
index 00000000000..1dcfad9629e
--- /dev/null
+++ b/include/asm-generic/sizes.h
@@ -0,0 +1,2 @@
+/* This is a placeholder, to be removed over time */
+#include <linux/sizes.h>
diff --git a/include/asm-generic/socket.h b/include/asm-generic/socket.h
deleted file mode 100644
index 9a6115e7cf6..00000000000
--- a/include/asm-generic/socket.h
+++ /dev/null
@@ -1,67 +0,0 @@
-#ifndef __ASM_GENERIC_SOCKET_H
-#define __ASM_GENERIC_SOCKET_H
-
-#include <asm/sockios.h>
-
-/* For setsockopt(2) */
-#define SOL_SOCKET 1
-
-#define SO_DEBUG 1
-#define SO_REUSEADDR 2
-#define SO_TYPE 3
-#define SO_ERROR 4
-#define SO_DONTROUTE 5
-#define SO_BROADCAST 6
-#define SO_SNDBUF 7
-#define SO_RCVBUF 8
-#define SO_SNDBUFFORCE 32
-#define SO_RCVBUFFORCE 33
-#define SO_KEEPALIVE 9
-#define SO_OOBINLINE 10
-#define SO_NO_CHECK 11
-#define SO_PRIORITY 12
-#define SO_LINGER 13
-#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
-
-#ifndef SO_PASSCRED /* powerpc only differs in these */
-#define SO_PASSCRED 16
-#define SO_PEERCRED 17
-#define SO_RCVLOWAT 18
-#define SO_SNDLOWAT 19
-#define SO_RCVTIMEO 20
-#define SO_SNDTIMEO 21
-#endif
-
-/* Security levels - as per NRL IPv6 - don't actually do anything */
-#define SO_SECURITY_AUTHENTICATION 22
-#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
-#define SO_SECURITY_ENCRYPTION_NETWORK 24
-
-#define SO_BINDTODEVICE 25
-
-/* Socket filtering */
-#define SO_ATTACH_FILTER 26
-#define SO_DETACH_FILTER 27
-
-#define SO_PEERNAME 28
-#define SO_TIMESTAMP 29
-#define SCM_TIMESTAMP SO_TIMESTAMP
-
-#define SO_ACCEPTCONN 30
-
-#define SO_PEERSEC 31
-#define SO_PASSSEC 34
-#define SO_TIMESTAMPNS 35
-#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
-
-#define SO_MARK 36
-
-#define SO_TIMESTAMPING 37
-#define SCM_TIMESTAMPING SO_TIMESTAMPING
-
-#define SO_PROTOCOL 38
-#define SO_DOMAIN 39
-
-#define SO_RXQ_OVFL 40
-#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/asm-generic/sockios.h b/include/asm-generic/sockios.h
deleted file mode 100644
index 9a61a369b90..00000000000
--- a/include/asm-generic/sockios.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_GENERIC_SOCKIOS_H
-#define __ASM_GENERIC_SOCKIOS_H
-
-/* Socket-level I/O control calls. */
-#define FIOSETOWN 0x8901
-#define SIOCSPGRP 0x8902
-#define FIOGETOWN 0x8903
-#define SIOCGPGRP 0x8904
-#define SIOCATMARK 0x8905
-#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
-#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
-
-#endif /* __ASM_GENERIC_SOCKIOS_H */
diff --git a/include/asm-generic/stat.h b/include/asm-generic/stat.h
deleted file mode 100644
index bd8cad21998..00000000000
--- a/include/asm-generic/stat.h
+++ /dev/null
@@ -1,72 +0,0 @@
-#ifndef __ASM_GENERIC_STAT_H
-#define __ASM_GENERIC_STAT_H
-
-/*
- * Everybody gets this wrong and has to stick with it for all
- * eternity. Hopefully, this version gets used by new architectures
- * so they don't fall into the same traps.
- *
- * stat64 is copied from powerpc64, with explicit padding added.
- * stat is the same structure layout on 64-bit, without the 'long long'
- * types.
- *
- * By convention, 64 bit architectures use the stat interface, while
- * 32 bit architectures use the stat64 interface. Note that we don't
- * provide an __old_kernel_stat here, which new architecture should
- * not have to start with.
- */
-
-#include <asm/bitsperlong.h>
-
-#define STAT_HAVE_NSEC 1
-
-struct stat {
- unsigned long st_dev; /* Device. */
- unsigned long st_ino; /* File serial number. */
- unsigned int st_mode; /* File mode. */
- unsigned int st_nlink; /* Link count. */
- unsigned int st_uid; /* User ID of the file's owner. */
- unsigned int st_gid; /* Group ID of the file's group. */
- unsigned long st_rdev; /* Device number, if device. */
- unsigned long __pad1;
- long st_size; /* Size of file, in bytes. */
- int st_blksize; /* Optimal block size for I/O. */
- int __pad2;
- long st_blocks; /* Number 512-byte blocks allocated. */
- long st_atime; /* Time of last access. */
- unsigned long st_atime_nsec;
- long st_mtime; /* Time of last modification. */
- unsigned long st_mtime_nsec;
- long st_ctime; /* Time of last status change. */
- unsigned long st_ctime_nsec;
- unsigned int __unused4;
- unsigned int __unused5;
-};
-
-/* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
-#if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64)
-struct stat64 {
- unsigned long long st_dev; /* Device. */
- unsigned long long st_ino; /* File serial number. */
- unsigned int st_mode; /* File mode. */
- unsigned int st_nlink; /* Link count. */
- unsigned int st_uid; /* User ID of the file's owner. */
- unsigned int st_gid; /* Group ID of the file's group. */
- unsigned long long st_rdev; /* Device number, if device. */
- unsigned long long __pad1;
- long long st_size; /* Size of file, in bytes. */
- int st_blksize; /* Optimal block size for I/O. */
- int __pad2;
- long long st_blocks; /* Number 512-byte blocks allocated. */
- int st_atime; /* Time of last access. */
- unsigned int st_atime_nsec;
- int st_mtime; /* Time of last modification. */
- unsigned int st_mtime_nsec;
- int st_ctime; /* Time of last status change. */
- unsigned int st_ctime_nsec;
- unsigned int __unused4;
- unsigned int __unused5;
-};
-#endif
-
-#endif /* __ASM_GENERIC_STAT_H */
diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h
index 0fd28e028de..4b934e9ec97 100644
--- a/include/asm-generic/statfs.h
+++ b/include/asm-generic/statfs.h
@@ -1,86 +1,7 @@
#ifndef _GENERIC_STATFS_H
#define _GENERIC_STATFS_H
-#include <linux/types.h>
+#include <uapi/asm-generic/statfs.h>
-#ifdef __KERNEL__
typedef __kernel_fsid_t fsid_t;
#endif
-
-/*
- * Most 64-bit platforms use 'long', while most 32-bit platforms use '__u32'.
- * Yes, they differ in signedness as well as size.
- * Special cases can override it for themselves -- except for S390x, which
- * is just a little too special for us. And MIPS, which I'm not touching
- * with a 10' pole.
- */
-#ifndef __statfs_word
-#if BITS_PER_LONG == 64
-#define __statfs_word long
-#else
-#define __statfs_word __u32
-#endif
-#endif
-
-struct statfs {
- __statfs_word f_type;
- __statfs_word f_bsize;
- __statfs_word f_blocks;
- __statfs_word f_bfree;
- __statfs_word f_bavail;
- __statfs_word f_files;
- __statfs_word f_ffree;
- __kernel_fsid_t f_fsid;
- __statfs_word f_namelen;
- __statfs_word f_frsize;
- __statfs_word f_flags;
- __statfs_word f_spare[4];
-};
-
-/*
- * ARM needs to avoid the 32-bit padding at the end, for consistency
- * between EABI and OABI
- */
-#ifndef ARCH_PACK_STATFS64
-#define ARCH_PACK_STATFS64
-#endif
-
-struct statfs64 {
- __statfs_word f_type;
- __statfs_word f_bsize;
- __u64 f_blocks;
- __u64 f_bfree;
- __u64 f_bavail;
- __u64 f_files;
- __u64 f_ffree;
- __kernel_fsid_t f_fsid;
- __statfs_word f_namelen;
- __statfs_word f_frsize;
- __statfs_word f_flags;
- __statfs_word f_spare[4];
-} ARCH_PACK_STATFS64;
-
-/*
- * IA64 and x86_64 need to avoid the 32-bit padding at the end,
- * to be compatible with the i386 ABI
- */
-#ifndef ARCH_PACK_COMPAT_STATFS64
-#define ARCH_PACK_COMPAT_STATFS64
-#endif
-
-struct compat_statfs64 {
- __u32 f_type;
- __u32 f_bsize;
- __u64 f_blocks;
- __u64 f_bfree;
- __u64 f_bavail;
- __u64 f_files;
- __u64 f_ffree;
- __kernel_fsid_t f_fsid;
- __u32 f_namelen;
- __u32 f_frsize;
- __u32 f_flags;
- __u32 f_spare[4];
-} ARCH_PACK_COMPAT_STATFS64;
-
-#endif
diff --git a/include/asm-generic/swab.h b/include/asm-generic/swab.h
deleted file mode 100644
index a8e9029d9eb..00000000000
--- a/include/asm-generic/swab.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef _ASM_GENERIC_SWAB_H
-#define _ASM_GENERIC_SWAB_H
-
-#include <asm/bitsperlong.h>
-
-/*
- * 32 bit architectures typically (but not always) want to
- * set __SWAB_64_THRU_32__. In user space, this is only
- * valid if the compiler supports 64 bit data types.
- */
-
-#if __BITS_PER_LONG == 32
-#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
-#define __SWAB_64_THRU_32__
-#endif
-#endif
-
-#endif /* _ASM_GENERIC_SWAB_H */
diff --git a/include/asm-generic/switch_to.h b/include/asm-generic/switch_to.h
new file mode 100644
index 00000000000..052c4ac04fd
--- /dev/null
+++ b/include/asm-generic/switch_to.h
@@ -0,0 +1,30 @@
+/* Generic task switch macro wrapper, based on MN10300 definitions.
+ *
+ * It should be possible to use these on really simple architectures,
+ * but it serves more as a starting point for new ports.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef __ASM_GENERIC_SWITCH_TO_H
+#define __ASM_GENERIC_SWITCH_TO_H
+
+#include <linux/thread_info.h>
+
+/*
+ * Context switching is now performed out-of-line in switch_to.S
+ */
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct task_struct *);
+
+#define switch_to(prev, next, last) \
+ do { \
+ ((last) = __switch_to((prev), (next))); \
+ } while (0)
+
+#endif /* __ASM_GENERIC_SWITCH_TO_H */
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
index 5c122ae6bfa..d401e5463fb 100644
--- a/include/asm-generic/syscall.h
+++ b/include/asm-generic/syscall.h
@@ -142,4 +142,16 @@ void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
unsigned int i, unsigned int n,
const unsigned long *args);
+/**
+ * syscall_get_arch - return the AUDIT_ARCH for the current system call
+ *
+ * Returns the AUDIT_ARCH_* based on the system call convention in use.
+ *
+ * It's only valid to call this when @task is stopped on entry to a system
+ * call, due to %TIF_SYSCALL_TRACE, %TIF_SYSCALL_AUDIT, or %TIF_SECCOMP.
+ *
+ * Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must
+ * provide an implementation of this.
+ */
+int syscall_get_arch(void);
#endif /* _ASM_SYSCALL_H */
diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h
index d89dec864d4..1f74be5113b 100644
--- a/include/asm-generic/syscalls.h
+++ b/include/asm-generic/syscalls.h
@@ -8,26 +8,6 @@
* Calling conventions for these system calls can differ, so
* it's possible to override them.
*/
-#ifndef sys_clone
-asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp,
- void __user *parent_tid, void __user *child_tid,
- struct pt_regs *regs);
-#endif
-
-#ifndef sys_fork
-asmlinkage long sys_fork(struct pt_regs *regs);
-#endif
-
-#ifndef sys_vfork
-asmlinkage long sys_vfork(struct pt_regs *regs);
-#endif
-
-#ifndef sys_execve
-asmlinkage long sys_execve(const char __user *filename,
- const char __user *const __user *argv,
- const char __user *const __user *envp,
- struct pt_regs *regs);
-#endif
#ifndef sys_mmap2
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
@@ -41,22 +21,8 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
unsigned long fd, off_t pgoff);
#endif
-#ifndef sys_sigaltstack
-asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *,
- struct pt_regs *);
-#endif
-
#ifndef sys_rt_sigreturn
asmlinkage long sys_rt_sigreturn(struct pt_regs *regs);
#endif
-#ifndef sys_rt_sigsuspend
-asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
-#endif
-
-#ifndef sys_rt_sigaction
-asmlinkage long sys_rt_sigaction(int sig, const struct sigaction __user *act,
- struct sigaction __user *oact, size_t sigsetsize);
-#endif
-
#endif /* __ASM_GENERIC_SYSCALLS_H */
diff --git a/include/asm-generic/system.h b/include/asm-generic/system.h
deleted file mode 100644
index 4b0b9cbbfae..00000000000
--- a/include/asm-generic/system.h
+++ /dev/null
@@ -1,143 +0,0 @@
-/* Generic system definitions, based on MN10300 definitions.
- *
- * It should be possible to use these on really simple architectures,
- * but it serves more as a starting point for new ports.
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#ifndef __ASM_GENERIC_SYSTEM_H
-#define __ASM_GENERIC_SYSTEM_H
-
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-#include <linux/irqflags.h>
-
-#include <asm/cmpxchg-local.h>
-#include <asm/cmpxchg.h>
-
-struct task_struct;
-
-/* context switching is now performed out-of-line in switch_to.S */
-extern struct task_struct *__switch_to(struct task_struct *,
- struct task_struct *);
-#define switch_to(prev, next, last) \
- do { \
- ((last) = __switch_to((prev), (next))); \
- } while (0)
-
-#define arch_align_stack(x) (x)
-
-#define nop() asm volatile ("nop")
-
-#endif /* !__ASSEMBLY__ */
-
-/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- *
- * This implementation only contains a compiler barrier.
- */
-
-#define mb() asm volatile ("": : :"memory")
-#define rmb() mb()
-#define wmb() asm volatile ("": : :"memory")
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#endif
-
-#define set_mb(var, value) do { var = value; mb(); } while (0)
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-
-#define read_barrier_depends() do {} while (0)
-#define smp_read_barrier_depends() do {} while (0)
-
-/*
- * we make sure local_irq_enable() doesn't cause priority inversion
- */
-#ifndef __ASSEMBLY__
-
-/* This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid xchg(). */
-extern void __xchg_called_with_bad_pointer(void);
-
-static inline
-unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
-{
- unsigned long ret, flags;
-
- switch (size) {
- case 1:
-#ifdef __xchg_u8
- return __xchg_u8(x, ptr);
-#else
- local_irq_save(flags);
- ret = *(volatile u8 *)ptr;
- *(volatile u8 *)ptr = x;
- local_irq_restore(flags);
- return ret;
-#endif /* __xchg_u8 */
-
- case 2:
-#ifdef __xchg_u16
- return __xchg_u16(x, ptr);
-#else
- local_irq_save(flags);
- ret = *(volatile u16 *)ptr;
- *(volatile u16 *)ptr = x;
- local_irq_restore(flags);
- return ret;
-#endif /* __xchg_u16 */
-
- case 4:
-#ifdef __xchg_u32
- return __xchg_u32(x, ptr);
-#else
- local_irq_save(flags);
- ret = *(volatile u32 *)ptr;
- *(volatile u32 *)ptr = x;
- local_irq_restore(flags);
- return ret;
-#endif /* __xchg_u32 */
-
-#ifdef CONFIG_64BIT
- case 8:
-#ifdef __xchg_u64
- return __xchg_u64(x, ptr);
-#else
- local_irq_save(flags);
- ret = *(volatile u64 *)ptr;
- *(volatile u64 *)ptr = x;
- local_irq_restore(flags);
- return ret;
-#endif /* __xchg_u64 */
-#endif /* CONFIG_64BIT */
-
- default:
- __xchg_called_with_bad_pointer();
- return x;
- }
-}
-
-#define xchg(ptr, x) \
- ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
-#endif /* __ASM_GENERIC_SYSTEM_H */
diff --git a/include/asm-generic/termbits.h b/include/asm-generic/termbits.h
deleted file mode 100644
index 232b4781aef..00000000000
--- a/include/asm-generic/termbits.h
+++ /dev/null
@@ -1,199 +0,0 @@
-#ifndef __ASM_GENERIC_TERMBITS_H
-#define __ASM_GENERIC_TERMBITS_H
-
-#include <linux/posix_types.h>
-
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
-typedef unsigned int tcflag_t;
-
-#define NCCS 19
-struct termios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
-};
-
-struct termios2 {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-struct ktermios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-/* c_cc characters */
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VTIME 5
-#define VMIN 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-#define VSUSP 10
-#define VEOL 11
-#define VREPRINT 12
-#define VDISCARD 13
-#define VWERASE 14
-#define VLNEXT 15
-#define VEOL2 16
-
-/* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK 0000020
-#define ISTRIP 0000040
-#define INLCR 0000100
-#define IGNCR 0000200
-#define ICRNL 0000400
-#define IUCLC 0001000
-#define IXON 0002000
-#define IXANY 0004000
-#define IXOFF 0010000
-#define IMAXBEL 0020000
-#define IUTF8 0040000
-
-/* c_oflag bits */
-#define OPOST 0000001
-#define OLCUC 0000002
-#define ONLCR 0000004
-#define OCRNL 0000010
-#define ONOCR 0000020
-#define ONLRET 0000040
-#define OFILL 0000100
-#define OFDEL 0000200
-#define NLDLY 0000400
-#define NL0 0000000
-#define NL1 0000400
-#define CRDLY 0003000
-#define CR0 0000000
-#define CR1 0001000
-#define CR2 0002000
-#define CR3 0003000
-#define TABDLY 0014000
-#define TAB0 0000000
-#define TAB1 0004000
-#define TAB2 0010000
-#define TAB3 0014000
-#define XTABS 0014000
-#define BSDLY 0020000
-#define BS0 0000000
-#define BS1 0020000
-#define VTDLY 0040000
-#define VT0 0000000
-#define VT1 0040000
-#define FFDLY 0100000
-#define FF0 0000000
-#define FF1 0100000
-
-/* c_cflag bit meaning */
-#define CBAUD 0010017
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE 0000060
-#define CS5 0000000
-#define CS6 0000020
-#define CS7 0000040
-#define CS8 0000060
-#define CSTOPB 0000100
-#define CREAD 0000200
-#define PARENB 0000400
-#define PARODD 0001000
-#define HUPCL 0002000
-#define CLOCAL 0004000
-#define CBAUDEX 0010000
-#define BOTHER 0010000
-#define B57600 0010001
-#define B115200 0010002
-#define B230400 0010003
-#define B460800 0010004
-#define B500000 0010005
-#define B576000 0010006
-#define B921600 0010007
-#define B1000000 0010010
-#define B1152000 0010011
-#define B1500000 0010012
-#define B2000000 0010013
-#define B2500000 0010014
-#define B3000000 0010015
-#define B3500000 0010016
-#define B4000000 0010017
-#define CIBAUD 002003600000 /* input baud rate */
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
-#define CRTSCTS 020000000000 /* flow control */
-
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
-
-/* c_lflag bits */
-#define ISIG 0000001
-#define ICANON 0000002
-#define XCASE 0000004
-#define ECHO 0000010
-#define ECHOE 0000020
-#define ECHOK 0000040
-#define ECHONL 0000100
-#define NOFLSH 0000200
-#define TOSTOP 0000400
-#define ECHOCTL 0001000
-#define ECHOPRT 0002000
-#define ECHOKE 0004000
-#define FLUSHO 0010000
-#define PENDIN 0040000
-#define IEXTEN 0100000
-#define EXTPROC 0200000
-
-/* tcflow() and TCXONC use these */
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* tcflush() and TCFLSH use these */
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
-
-/* tcsetattr uses these */
-#define TCSANOW 0
-#define TCSADRAIN 1
-#define TCSAFLUSH 2
-
-#endif /* __ASM_GENERIC_TERMBITS_H */
diff --git a/include/asm-generic/termios.h b/include/asm-generic/termios.h
index d0922adc56d..4fa6fe0fc2a 100644
--- a/include/asm-generic/termios.h
+++ b/include/asm-generic/termios.h
@@ -1,54 +1,9 @@
#ifndef _ASM_GENERIC_TERMIOS_H
#define _ASM_GENERIC_TERMIOS_H
-/*
- * Most architectures have straight copies of the x86 code, with
- * varying levels of bug fixes on top. Usually it's a good idea
- * to use this generic version instead, but be careful to avoid
- * ABI changes.
- * New architectures should not provide their own version.
- */
-#include <asm/termbits.h>
-#include <asm/ioctls.h>
-
-struct winsize {
- unsigned short ws_row;
- unsigned short ws_col;
- unsigned short ws_xpixel;
- unsigned short ws_ypixel;
-};
-
-#define NCC 8
-struct termio {
- unsigned short c_iflag; /* input mode flags */
- unsigned short c_oflag; /* output mode flags */
- unsigned short c_cflag; /* control mode flags */
- unsigned short c_lflag; /* local mode flags */
- unsigned char c_line; /* line discipline */
- unsigned char c_cc[NCC]; /* control characters */
-};
-
-/* modem lines */
-#define TIOCM_LE 0x001
-#define TIOCM_DTR 0x002
-#define TIOCM_RTS 0x004
-#define TIOCM_ST 0x008
-#define TIOCM_SR 0x010
-#define TIOCM_CTS 0x020
-#define TIOCM_CAR 0x040
-#define TIOCM_RNG 0x080
-#define TIOCM_DSR 0x100
-#define TIOCM_CD TIOCM_CAR
-#define TIOCM_RI TIOCM_RNG
-#define TIOCM_OUT1 0x2000
-#define TIOCM_OUT2 0x4000
-#define TIOCM_LOOP 0x8000
-
-/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-
-#ifdef __KERNEL__
#include <asm/uaccess.h>
+#include <uapi/asm-generic/termios.h>
/* intr=^C quit=^\ erase=del kill=^U
eof=^D vtime=\0 vmin=\1 sxtc=\0
@@ -149,6 +104,4 @@ static inline int kernel_termios_to_user_termios(struct termios __user *u,
}
#endif /* TCGETS2 */
-#endif /* __KERNEL__ */
-
#endif /* _ASM_GENERIC_TERMIOS_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index e43f9766259..5672d7ea1fa 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -5,6 +5,8 @@
* Copyright 2001 Red Hat, Inc.
* Based on code from mm/memory.c Copyright Linus Torvalds and others.
*
+ * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
@@ -17,97 +19,113 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
/*
- * For UP we don't need to worry about TLB flush
- * and page free order so much..
- */
-#ifdef CONFIG_SMP
- #ifdef ARCH_FREE_PTR_NR
- #define FREE_PTR_NR ARCH_FREE_PTR_NR
- #else
- #define FREE_PTE_NR 506
- #endif
- #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
-#else
- #define FREE_PTE_NR 1
- #define tlb_fast_mode(tlb) 1
-#endif
-
-/* struct mmu_gather is an opaque type used by the mm code for passing around
- * any data needed by arch specific code for tlb_remove_page.
+ * Semi RCU freeing of the page directories.
+ *
+ * This is needed by some architectures to implement software pagetable walkers.
+ *
+ * gup_fast() and other software pagetable walkers do a lockless page-table
+ * walk and therefore needs some synchronization with the freeing of the page
+ * directories. The chosen means to accomplish that is by disabling IRQs over
+ * the walk.
+ *
+ * Architectures that use IPIs to flush TLBs will then automagically DTRT,
+ * since we unlink the page, flush TLBs, free the page. Since the disabling of
+ * IRQs delays the completion of the TLB flush we can never observe an already
+ * freed page.
+ *
+ * Architectures that do not have this (PPC) need to delay the freeing by some
+ * other means, this is that means.
+ *
+ * What we do is batch the freed directory pages (tables) and RCU free them.
+ * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
+ * holds off grace periods.
+ *
+ * However, in order to batch these pages we need to allocate storage, this
+ * allocation is deep inside the MM code and can thus easily fail on memory
+ * pressure. To guarantee progress we fall back to single table freeing, see
+ * the implementation of tlb_remove_table_one().
+ *
*/
-struct mmu_gather {
- struct mm_struct *mm;
- unsigned int nr; /* set to ~0U means fast mode */
- unsigned int need_flush;/* Really unmapped some ptes? */
- unsigned int fullmm; /* non-zero means full mm flush */
- struct page * pages[FREE_PTE_NR];
+struct mmu_table_batch {
+ struct rcu_head rcu;
+ unsigned int nr;
+ void *tables[0];
};
-/* Users of the generic TLB shootdown code must declare this storage space. */
-DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
+#define MAX_TABLE_BATCH \
+ ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
-/* tlb_gather_mmu
- * Return a pointer to an initialized struct mmu_gather.
- */
-static inline struct mmu_gather *
-tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
-{
- struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
+extern void tlb_table_flush(struct mmu_gather *tlb);
+extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
- tlb->mm = mm;
+#endif
- /* Use fast mode if only one CPU is online */
- tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;
+/*
+ * If we can't allocate a page to make a big batch of page pointers
+ * to work on, then just handle a few from the on-stack structure.
+ */
+#define MMU_GATHER_BUNDLE 8
- tlb->fullmm = full_mm_flush;
+struct mmu_gather_batch {
+ struct mmu_gather_batch *next;
+ unsigned int nr;
+ unsigned int max;
+ struct page *pages[0];
+};
- return tlb;
-}
+#define MAX_GATHER_BATCH \
+ ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
-static inline void
-tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
-{
- if (!tlb->need_flush)
- return;
- tlb->need_flush = 0;
- tlb_flush(tlb);
- if (!tlb_fast_mode(tlb)) {
- free_pages_and_swap_cache(tlb->pages, tlb->nr);
- tlb->nr = 0;
- }
-}
+/*
+ * Limit the maximum number of mmu_gather batches to reduce a risk of soft
+ * lockups for non-preemptible kernels on huge machines when a lot of memory
+ * is zapped during unmapping.
+ * 10K pages freed at once should be safe even without a preemption point.
+ */
+#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
-/* tlb_finish_mmu
- * Called at the end of the shootdown operation to free up any resources
- * that were required.
+/* struct mmu_gather is an opaque type used by the mm code for passing around
+ * any data needed by arch specific code for tlb_remove_page.
*/
-static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
-{
- tlb_flush_mmu(tlb, start, end);
+struct mmu_gather {
+ struct mm_struct *mm;
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ struct mmu_table_batch *batch;
+#endif
+ unsigned long start;
+ unsigned long end;
+ unsigned int need_flush : 1, /* Did free PTEs */
+ /* we are in the middle of an operation to clear
+ * a full mm and can make some optimizations */
+ fullmm : 1,
+ /* we have performed an operation which
+ * requires a complete flush of the tlb */
+ need_flush_all : 1;
+
+ struct mmu_gather_batch *active;
+ struct mmu_gather_batch local;
+ struct page *__pages[MMU_GATHER_BUNDLE];
+ unsigned int batch_count;
+};
- /* keep the page table cache within bounds */
- check_pgt_cache();
+#define HAVE_GENERIC_MMU_GATHER
- put_cpu_var(mmu_gathers);
-}
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
+void tlb_flush_mmu(struct mmu_gather *tlb);
+void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
+ unsigned long end);
+int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
/* tlb_remove_page
- * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
- * handling the additional races in SMP caused by other CPUs caching valid
- * mappings in their TLBs.
+ * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
+ * required.
*/
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
- tlb->need_flush = 1;
- if (tlb_fast_mode(tlb)) {
- free_page_and_swap_cache(page);
- return;
- }
- tlb->pages[tlb->nr++] = page;
- if (tlb->nr >= FREE_PTE_NR)
- tlb_flush_mmu(tlb, 0, 0);
+ if (!__tlb_remove_page(tlb, page))
+ tlb_flush_mmu(tlb);
}
/**
@@ -123,6 +141,20 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
+/**
+ * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
+ * This is a nop so far, because only x86 needs it.
+ */
+#ifndef __tlb_remove_pmd_tlb_entry
+#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
+#endif
+
+#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
+ do { \
+ tlb->need_flush = 1; \
+ __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
+ } while (0)
+
#define pte_free_tlb(tlb, ptep, address) \
do { \
tlb->need_flush = 1; \
diff --git a/include/asm-generic/tlbflush.h b/include/asm-generic/tlbflush.h
index c7af037024c..d6d0a88430f 100644
--- a/include/asm-generic/tlbflush.h
+++ b/include/asm-generic/tlbflush.h
@@ -9,6 +9,8 @@
#error need to implement an architecture specific asm/tlbflush.h
#endif
+#include <linux/bug.h>
+
static inline void flush_tlb_mm(struct mm_struct *mm)
{
BUG();
diff --git a/include/asm-generic/trace_clock.h b/include/asm-generic/trace_clock.h
new file mode 100644
index 00000000000..6726f1bafb5
--- /dev/null
+++ b/include/asm-generic/trace_clock.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_GENERIC_TRACE_CLOCK_H
+#define _ASM_GENERIC_TRACE_CLOCK_H
+/*
+ * Arch-specific trace clocks.
+ */
+
+/*
+ * Additional trace clocks added to the trace_clocks
+ * array in kernel/trace/trace.c
+ * None if the architecture has not defined it.
+ */
+#ifndef ARCH_TRACE_CLOCKS
+# define ARCH_TRACE_CLOCKS
+#endif
+
+#endif /* _ASM_GENERIC_TRACE_CLOCK_H */
diff --git a/include/asm-generic/types.h b/include/asm-generic/types.h
deleted file mode 100644
index fba7d33ca3f..00000000000
--- a/include/asm-generic/types.h
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifndef _ASM_GENERIC_TYPES_H
-#define _ASM_GENERIC_TYPES_H
-/*
- * int-ll64 is used practically everywhere now,
- * so use it as a reasonable default.
- */
-#include <asm-generic/int-ll64.h>
-
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-/*
- * DMA addresses may be very different from physical addresses
- * and pointers. i386 and powerpc may have 64 bit DMA on 32 bit
- * systems, while sparc64 uses 32 bit DMA addresses for 64 bit
- * physical addresses.
- * This default defines dma_addr_t to have the same size as
- * phys_addr_t, which is the most common way.
- * Do not define the dma64_addr_t type, which never really
- * worked.
- */
-#ifndef dma_addr_t
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
-typedef u64 dma_addr_t;
-#else
-typedef u32 dma_addr_t;
-#endif /* CONFIG_PHYS_ADDR_T_64BIT */
-#endif /* dma_addr_t */
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_GENERIC_TYPES_H */
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index b218b8513d0..72d8803832f 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -3,11 +3,10 @@
/*
* User space memory access functions, these should work
- * on a ny machine that has kernel and user data in the same
+ * on any machine that has kernel and user data in the same
* address space, e.g. all NOMMU machines.
*/
#include <linux/sched.h>
-#include <linux/mm.h>
#include <linux/string.h>
#include <asm/segment.h>
@@ -32,7 +31,9 @@ static inline void set_fs(mm_segment_t fs)
}
#endif
+#ifndef segment_eq
#define segment_eq(a, b) ((a).seg == (b).seg)
+#endif
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -162,18 +163,24 @@ static inline __must_check long __copy_to_user(void __user *to,
#define put_user(x, ptr) \
({ \
- might_sleep(); \
+ might_fault(); \
access_ok(VERIFY_WRITE, ptr, sizeof(*ptr)) ? \
__put_user(x, ptr) : \
-EFAULT; \
})
+#ifndef __put_user_fn
+
static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
{
size = __copy_to_user(ptr, x, size);
return size ? -EFAULT : size;
}
+#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
+
+#endif
+
extern int __put_user_bad(void) __attribute__((noreturn));
#define __get_user(x, ptr) \
@@ -218,18 +225,23 @@ extern int __put_user_bad(void) __attribute__((noreturn));
#define get_user(x, ptr) \
({ \
- might_sleep(); \
+ might_fault(); \
access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ? \
__get_user(x, ptr) : \
-EFAULT; \
})
+#ifndef __get_user_fn
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
size = __copy_from_user(x, ptr, size);
return size ? -EFAULT : size;
}
+#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
+
+#endif
+
extern int __get_user_bad(void) __attribute__((noreturn));
#ifndef __copy_from_user_inatomic
@@ -243,7 +255,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
static inline long copy_from_user(void *to,
const void __user * from, unsigned long n)
{
- might_sleep();
+ might_fault();
if (access_ok(VERIFY_READ, from, n))
return __copy_from_user(to, from, n);
else
@@ -253,7 +265,7 @@ static inline long copy_from_user(void *to,
static inline long copy_to_user(void __user *to,
const void *from, unsigned long n)
{
- might_sleep();
+ might_fault();
if (access_ok(VERIFY_WRITE, to, n))
return __copy_to_user(to, from, n);
else
@@ -288,14 +300,21 @@ strncpy_from_user(char *dst, const char __user *src, long count)
*
* Return 0 on exception, a value greater than N if too long
*/
-#ifndef strnlen_user
+#ifndef __strnlen_user
+#define __strnlen_user(s, n) (strnlen((s), (n)) + 1)
+#endif
+
+/*
+ * Unlike strnlen, strnlen_user includes the nul terminator in
+ * its returned count. Callers should check for a returned value
+ * greater than N as an indication the string is too long.
+ */
static inline long strnlen_user(const char __user *src, long n)
{
if (!access_ok(VERIFY_READ, src, 1))
return 0;
- return strlen((void * __force)src) + 1;
+ return __strnlen_user(src, n);
}
-#endif
static inline long strlen_user(const char __user *src)
{
@@ -317,7 +336,7 @@ __clear_user(void __user *to, unsigned long n)
static inline __must_check unsigned long
clear_user(void __user *to, unsigned long n)
{
- might_sleep();
+ might_fault();
if (!access_ok(VERIFY_WRITE, to, n))
return n;
diff --git a/include/asm-generic/ucontext.h b/include/asm-generic/ucontext.h
deleted file mode 100644
index ad77343e8a9..00000000000
--- a/include/asm-generic/ucontext.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef __ASM_GENERIC_UCONTEXT_H
-#define __ASM_GENERIC_UCONTEXT_H
-
-struct ucontext {
- unsigned long uc_flags;
- struct ucontext *uc_link;
- stack_t uc_stack;
- struct sigcontext uc_mcontext;
- sigset_t uc_sigmask; /* mask last for extensibility */
-};
-
-#endif /* __ASM_GENERIC_UCONTEXT_H */
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index 03cf5936bad..1ac097279db 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -4,22 +4,27 @@
/*
* This is the most generic implementation of unaligned accesses
* and should work almost anywhere.
- *
- * If an architecture can handle unaligned accesses in hardware,
- * it may want to use the linux/unaligned/access_ok.h implementation
- * instead.
*/
#include <asm/byteorder.h>
+/* Set by the arch if it can handle unaligned accesses in hardware. */
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+# include <linux/unaligned/access_ok.h>
+#endif
+
#if defined(__LITTLE_ENDIAN)
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/be_byteshift.h>
+# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+# include <linux/unaligned/le_struct.h>
+# include <linux/unaligned/be_byteshift.h>
+# endif
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_le
# define put_unaligned __put_unaligned_le
#elif defined(__BIG_ENDIAN)
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/le_byteshift.h>
+# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+# include <linux/unaligned/be_struct.h>
+# include <linux/unaligned/le_byteshift.h>
+# endif
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_be
# define put_unaligned __put_unaligned_be
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index b969770196c..cccc86ecfea 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -1,863 +1,5 @@
-#if !defined(_ASM_GENERIC_UNISTD_H) || defined(__SYSCALL)
-#define _ASM_GENERIC_UNISTD_H
-
-#include <asm/bitsperlong.h>
-
-/*
- * This file contains the system call numbers, based on the
- * layout of the x86-64 architecture, which embeds the
- * pointer to the syscall in the table.
- *
- * As a basic principle, no duplication of functionality
- * should be added, e.g. we don't use lseek when llseek
- * is present. New architectures should use this file
- * and implement the less feature-full calls in user space.
- */
-
-#ifndef __SYSCALL
-#define __SYSCALL(x, y)
-#endif
-
-#if __BITS_PER_LONG == 32 || defined(__SYSCALL_COMPAT)
-#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _32)
-#else
-#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64)
-#endif
-
-#define __NR_io_setup 0
-__SYSCALL(__NR_io_setup, sys_io_setup)
-#define __NR_io_destroy 1
-__SYSCALL(__NR_io_destroy, sys_io_destroy)
-#define __NR_io_submit 2
-__SYSCALL(__NR_io_submit, sys_io_submit)
-#define __NR_io_cancel 3
-__SYSCALL(__NR_io_cancel, sys_io_cancel)
-#define __NR_io_getevents 4
-__SYSCALL(__NR_io_getevents, sys_io_getevents)
-
-/* fs/xattr.c */
-#define __NR_setxattr 5
-__SYSCALL(__NR_setxattr, sys_setxattr)
-#define __NR_lsetxattr 6
-__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
-#define __NR_fsetxattr 7
-__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
-#define __NR_getxattr 8
-__SYSCALL(__NR_getxattr, sys_getxattr)
-#define __NR_lgetxattr 9
-__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
-#define __NR_fgetxattr 10
-__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
-#define __NR_listxattr 11
-__SYSCALL(__NR_listxattr, sys_listxattr)
-#define __NR_llistxattr 12
-__SYSCALL(__NR_llistxattr, sys_llistxattr)
-#define __NR_flistxattr 13
-__SYSCALL(__NR_flistxattr, sys_flistxattr)
-#define __NR_removexattr 14
-__SYSCALL(__NR_removexattr, sys_removexattr)
-#define __NR_lremovexattr 15
-__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
-#define __NR_fremovexattr 16
-__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
-
-/* fs/dcache.c */
-#define __NR_getcwd 17
-__SYSCALL(__NR_getcwd, sys_getcwd)
-
-/* fs/cookies.c */
-#define __NR_lookup_dcookie 18
-__SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie)
-
-/* fs/eventfd.c */
-#define __NR_eventfd2 19
-__SYSCALL(__NR_eventfd2, sys_eventfd2)
-
-/* fs/eventpoll.c */
-#define __NR_epoll_create1 20
-__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
-#define __NR_epoll_ctl 21
-__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
-#define __NR_epoll_pwait 22
-__SYSCALL(__NR_epoll_pwait, sys_epoll_pwait)
-
-/* fs/fcntl.c */
-#define __NR_dup 23
-__SYSCALL(__NR_dup, sys_dup)
-#define __NR_dup3 24
-__SYSCALL(__NR_dup3, sys_dup3)
-#define __NR3264_fcntl 25
-__SC_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl)
-
-/* fs/inotify_user.c */
-#define __NR_inotify_init1 26
-__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
-#define __NR_inotify_add_watch 27
-__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
-#define __NR_inotify_rm_watch 28
-__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
-
-/* fs/ioctl.c */
-#define __NR_ioctl 29
-__SYSCALL(__NR_ioctl, sys_ioctl)
-
-/* fs/ioprio.c */
-#define __NR_ioprio_set 30
-__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
-#define __NR_ioprio_get 31
-__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
-
-/* fs/locks.c */
-#define __NR_flock 32
-__SYSCALL(__NR_flock, sys_flock)
-
-/* fs/namei.c */
-#define __NR_mknodat 33
-__SYSCALL(__NR_mknodat, sys_mknodat)
-#define __NR_mkdirat 34
-__SYSCALL(__NR_mkdirat, sys_mkdirat)
-#define __NR_unlinkat 35
-__SYSCALL(__NR_unlinkat, sys_unlinkat)
-#define __NR_symlinkat 36
-__SYSCALL(__NR_symlinkat, sys_symlinkat)
-#define __NR_linkat 37
-__SYSCALL(__NR_linkat, sys_linkat)
-#define __NR_renameat 38
-__SYSCALL(__NR_renameat, sys_renameat)
-
-/* fs/namespace.c */
-#define __NR_umount2 39
-__SYSCALL(__NR_umount2, sys_umount)
-#define __NR_mount 40
-__SYSCALL(__NR_mount, sys_mount)
-#define __NR_pivot_root 41
-__SYSCALL(__NR_pivot_root, sys_pivot_root)
-
-/* fs/nfsctl.c */
-#define __NR_nfsservctl 42
-__SYSCALL(__NR_nfsservctl, sys_nfsservctl)
-
-/* fs/open.c */
-#define __NR3264_statfs 43
-__SC_3264(__NR3264_statfs, sys_statfs64, sys_statfs)
-#define __NR3264_fstatfs 44
-__SC_3264(__NR3264_fstatfs, sys_fstatfs64, sys_fstatfs)
-#define __NR3264_truncate 45
-__SC_3264(__NR3264_truncate, sys_truncate64, sys_truncate)
-#define __NR3264_ftruncate 46
-__SC_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate)
-
-#define __NR_fallocate 47
-__SYSCALL(__NR_fallocate, sys_fallocate)
-#define __NR_faccessat 48
-__SYSCALL(__NR_faccessat, sys_faccessat)
-#define __NR_chdir 49
-__SYSCALL(__NR_chdir, sys_chdir)
-#define __NR_fchdir 50
-__SYSCALL(__NR_fchdir, sys_fchdir)
-#define __NR_chroot 51
-__SYSCALL(__NR_chroot, sys_chroot)
-#define __NR_fchmod 52
-__SYSCALL(__NR_fchmod, sys_fchmod)
-#define __NR_fchmodat 53
-__SYSCALL(__NR_fchmodat, sys_fchmodat)
-#define __NR_fchownat 54
-__SYSCALL(__NR_fchownat, sys_fchownat)
-#define __NR_fchown 55
-__SYSCALL(__NR_fchown, sys_fchown)
-#define __NR_openat 56
-__SYSCALL(__NR_openat, sys_openat)
-#define __NR_close 57
-__SYSCALL(__NR_close, sys_close)
-#define __NR_vhangup 58
-__SYSCALL(__NR_vhangup, sys_vhangup)
-
-/* fs/pipe.c */
-#define __NR_pipe2 59
-__SYSCALL(__NR_pipe2, sys_pipe2)
-
-/* fs/quota.c */
-#define __NR_quotactl 60
-__SYSCALL(__NR_quotactl, sys_quotactl)
-
-/* fs/readdir.c */
-#define __NR_getdents64 61
-__SYSCALL(__NR_getdents64, sys_getdents64)
-
-/* fs/read_write.c */
-#define __NR3264_lseek 62
-__SC_3264(__NR3264_lseek, sys_llseek, sys_lseek)
-#define __NR_read 63
-__SYSCALL(__NR_read, sys_read)
-#define __NR_write 64
-__SYSCALL(__NR_write, sys_write)
-#define __NR_readv 65
-__SYSCALL(__NR_readv, sys_readv)
-#define __NR_writev 66
-__SYSCALL(__NR_writev, sys_writev)
-#define __NR_pread64 67
-__SYSCALL(__NR_pread64, sys_pread64)
-#define __NR_pwrite64 68
-__SYSCALL(__NR_pwrite64, sys_pwrite64)
-#define __NR_preadv 69
-__SYSCALL(__NR_preadv, sys_preadv)
-#define __NR_pwritev 70
-__SYSCALL(__NR_pwritev, sys_pwritev)
-
-/* fs/sendfile.c */
-#define __NR3264_sendfile 71
-__SC_3264(__NR3264_sendfile, sys_sendfile64, sys_sendfile)
-
-/* fs/select.c */
-#define __NR_pselect6 72
-__SYSCALL(__NR_pselect6, sys_pselect6)
-#define __NR_ppoll 73
-__SYSCALL(__NR_ppoll, sys_ppoll)
-
-/* fs/signalfd.c */
-#define __NR_signalfd4 74
-__SYSCALL(__NR_signalfd4, sys_signalfd4)
-
-/* fs/splice.c */
-#define __NR_vmsplice 75
-__SYSCALL(__NR_vmsplice, sys_vmsplice)
-#define __NR_splice 76
-__SYSCALL(__NR_splice, sys_splice)
-#define __NR_tee 77
-__SYSCALL(__NR_tee, sys_tee)
-
-/* fs/stat.c */
-#define __NR_readlinkat 78
-__SYSCALL(__NR_readlinkat, sys_readlinkat)
-#define __NR3264_fstatat 79
-__SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
-#define __NR3264_fstat 80
-__SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat)
-
-/* fs/sync.c */
-#define __NR_sync 81
-__SYSCALL(__NR_sync, sys_sync)
-#define __NR_fsync 82
-__SYSCALL(__NR_fsync, sys_fsync)
-#define __NR_fdatasync 83
-__SYSCALL(__NR_fdatasync, sys_fdatasync)
-#ifdef __ARCH_WANT_SYNC_FILE_RANGE2
-#define __NR_sync_file_range2 84
-__SYSCALL(__NR_sync_file_range2, sys_sync_file_range2)
-#else
-#define __NR_sync_file_range 84
-__SYSCALL(__NR_sync_file_range, sys_sync_file_range)
-#endif
-
-/* fs/timerfd.c */
-#define __NR_timerfd_create 85
-__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
-#define __NR_timerfd_settime 86
-__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime)
-#define __NR_timerfd_gettime 87
-__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
-
-/* fs/utimes.c */
-#define __NR_utimensat 88
-__SYSCALL(__NR_utimensat, sys_utimensat)
-
-/* kernel/acct.c */
-#define __NR_acct 89
-__SYSCALL(__NR_acct, sys_acct)
-
-/* kernel/capability.c */
-#define __NR_capget 90
-__SYSCALL(__NR_capget, sys_capget)
-#define __NR_capset 91
-__SYSCALL(__NR_capset, sys_capset)
-
-/* kernel/exec_domain.c */
-#define __NR_personality 92
-__SYSCALL(__NR_personality, sys_personality)
-
-/* kernel/exit.c */
-#define __NR_exit 93
-__SYSCALL(__NR_exit, sys_exit)
-#define __NR_exit_group 94
-__SYSCALL(__NR_exit_group, sys_exit_group)
-#define __NR_waitid 95
-__SYSCALL(__NR_waitid, sys_waitid)
-
-/* kernel/fork.c */
-#define __NR_set_tid_address 96
-__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
-#define __NR_unshare 97
-__SYSCALL(__NR_unshare, sys_unshare)
-
-/* kernel/futex.c */
-#define __NR_futex 98
-__SYSCALL(__NR_futex, sys_futex)
-#define __NR_set_robust_list 99
-__SYSCALL(__NR_set_robust_list, sys_set_robust_list)
-#define __NR_get_robust_list 100
-__SYSCALL(__NR_get_robust_list, sys_get_robust_list)
-
-/* kernel/hrtimer.c */
-#define __NR_nanosleep 101
-__SYSCALL(__NR_nanosleep, sys_nanosleep)
-
-/* kernel/itimer.c */
-#define __NR_getitimer 102
-__SYSCALL(__NR_getitimer, sys_getitimer)
-#define __NR_setitimer 103
-__SYSCALL(__NR_setitimer, sys_setitimer)
-
-/* kernel/kexec.c */
-#define __NR_kexec_load 104
-__SYSCALL(__NR_kexec_load, sys_kexec_load)
-
-/* kernel/module.c */
-#define __NR_init_module 105
-__SYSCALL(__NR_init_module, sys_init_module)
-#define __NR_delete_module 106
-__SYSCALL(__NR_delete_module, sys_delete_module)
-
-/* kernel/posix-timers.c */
-#define __NR_timer_create 107
-__SYSCALL(__NR_timer_create, sys_timer_create)
-#define __NR_timer_gettime 108
-__SYSCALL(__NR_timer_gettime, sys_timer_gettime)
-#define __NR_timer_getoverrun 109
-__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
-#define __NR_timer_settime 110
-__SYSCALL(__NR_timer_settime, sys_timer_settime)
-#define __NR_timer_delete 111
-__SYSCALL(__NR_timer_delete, sys_timer_delete)
-#define __NR_clock_settime 112
-__SYSCALL(__NR_clock_settime, sys_clock_settime)
-#define __NR_clock_gettime 113
-__SYSCALL(__NR_clock_gettime, sys_clock_gettime)
-#define __NR_clock_getres 114
-__SYSCALL(__NR_clock_getres, sys_clock_getres)
-#define __NR_clock_nanosleep 115
-__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep)
-
-/* kernel/printk.c */
-#define __NR_syslog 116
-__SYSCALL(__NR_syslog, sys_syslog)
-
-/* kernel/ptrace.c */
-#define __NR_ptrace 117
-__SYSCALL(__NR_ptrace, sys_ptrace)
-
-/* kernel/sched.c */
-#define __NR_sched_setparam 118
-__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
-#define __NR_sched_setscheduler 119
-__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
-#define __NR_sched_getscheduler 120
-__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
-#define __NR_sched_getparam 121
-__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
-#define __NR_sched_setaffinity 122
-__SYSCALL(__NR_sched_setaffinity, sys_sched_setaffinity)
-#define __NR_sched_getaffinity 123
-__SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity)
-#define __NR_sched_yield 124
-__SYSCALL(__NR_sched_yield, sys_sched_yield)
-#define __NR_sched_get_priority_max 125
-__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
-#define __NR_sched_get_priority_min 126
-__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
-#define __NR_sched_rr_get_interval 127
-__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval)
-
-/* kernel/signal.c */
-#define __NR_restart_syscall 128
-__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
-#define __NR_kill 129
-__SYSCALL(__NR_kill, sys_kill)
-#define __NR_tkill 130
-__SYSCALL(__NR_tkill, sys_tkill)
-#define __NR_tgkill 131
-__SYSCALL(__NR_tgkill, sys_tgkill)
-#define __NR_sigaltstack 132
-__SYSCALL(__NR_sigaltstack, sys_sigaltstack)
-#define __NR_rt_sigsuspend 133
-__SYSCALL(__NR_rt_sigsuspend, sys_rt_sigsuspend) /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
-#define __NR_rt_sigaction 134
-__SYSCALL(__NR_rt_sigaction, sys_rt_sigaction) /* __ARCH_WANT_SYS_RT_SIGACTION */
-#define __NR_rt_sigprocmask 135
-__SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask)
-#define __NR_rt_sigpending 136
-__SYSCALL(__NR_rt_sigpending, sys_rt_sigpending)
-#define __NR_rt_sigtimedwait 137
-__SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait)
-#define __NR_rt_sigqueueinfo 138
-__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo)
-#define __NR_rt_sigreturn 139
-__SYSCALL(__NR_rt_sigreturn, sys_rt_sigreturn) /* sys_rt_sigreturn_wrapper, */
-
-/* kernel/sys.c */
-#define __NR_setpriority 140
-__SYSCALL(__NR_setpriority, sys_setpriority)
-#define __NR_getpriority 141
-__SYSCALL(__NR_getpriority, sys_getpriority)
-#define __NR_reboot 142
-__SYSCALL(__NR_reboot, sys_reboot)
-#define __NR_setregid 143
-__SYSCALL(__NR_setregid, sys_setregid)
-#define __NR_setgid 144
-__SYSCALL(__NR_setgid, sys_setgid)
-#define __NR_setreuid 145
-__SYSCALL(__NR_setreuid, sys_setreuid)
-#define __NR_setuid 146
-__SYSCALL(__NR_setuid, sys_setuid)
-#define __NR_setresuid 147
-__SYSCALL(__NR_setresuid, sys_setresuid)
-#define __NR_getresuid 148
-__SYSCALL(__NR_getresuid, sys_getresuid)
-#define __NR_setresgid 149
-__SYSCALL(__NR_setresgid, sys_setresgid)
-#define __NR_getresgid 150
-__SYSCALL(__NR_getresgid, sys_getresgid)
-#define __NR_setfsuid 151
-__SYSCALL(__NR_setfsuid, sys_setfsuid)
-#define __NR_setfsgid 152
-__SYSCALL(__NR_setfsgid, sys_setfsgid)
-#define __NR_times 153
-__SYSCALL(__NR_times, sys_times)
-#define __NR_setpgid 154
-__SYSCALL(__NR_setpgid, sys_setpgid)
-#define __NR_getpgid 155
-__SYSCALL(__NR_getpgid, sys_getpgid)
-#define __NR_getsid 156
-__SYSCALL(__NR_getsid, sys_getsid)
-#define __NR_setsid 157
-__SYSCALL(__NR_setsid, sys_setsid)
-#define __NR_getgroups 158
-__SYSCALL(__NR_getgroups, sys_getgroups)
-#define __NR_setgroups 159
-__SYSCALL(__NR_setgroups, sys_setgroups)
-#define __NR_uname 160
-__SYSCALL(__NR_uname, sys_newuname)
-#define __NR_sethostname 161
-__SYSCALL(__NR_sethostname, sys_sethostname)
-#define __NR_setdomainname 162
-__SYSCALL(__NR_setdomainname, sys_setdomainname)
-#define __NR_getrlimit 163
-__SYSCALL(__NR_getrlimit, sys_getrlimit)
-#define __NR_setrlimit 164
-__SYSCALL(__NR_setrlimit, sys_setrlimit)
-#define __NR_getrusage 165
-__SYSCALL(__NR_getrusage, sys_getrusage)
-#define __NR_umask 166
-__SYSCALL(__NR_umask, sys_umask)
-#define __NR_prctl 167
-__SYSCALL(__NR_prctl, sys_prctl)
-#define __NR_getcpu 168
-__SYSCALL(__NR_getcpu, sys_getcpu)
-
-/* kernel/time.c */
-#define __NR_gettimeofday 169
-__SYSCALL(__NR_gettimeofday, sys_gettimeofday)
-#define __NR_settimeofday 170
-__SYSCALL(__NR_settimeofday, sys_settimeofday)
-#define __NR_adjtimex 171
-__SYSCALL(__NR_adjtimex, sys_adjtimex)
-
-/* kernel/timer.c */
-#define __NR_getpid 172
-__SYSCALL(__NR_getpid, sys_getpid)
-#define __NR_getppid 173
-__SYSCALL(__NR_getppid, sys_getppid)
-#define __NR_getuid 174
-__SYSCALL(__NR_getuid, sys_getuid)
-#define __NR_geteuid 175
-__SYSCALL(__NR_geteuid, sys_geteuid)
-#define __NR_getgid 176
-__SYSCALL(__NR_getgid, sys_getgid)
-#define __NR_getegid 177
-__SYSCALL(__NR_getegid, sys_getegid)
-#define __NR_gettid 178
-__SYSCALL(__NR_gettid, sys_gettid)
-#define __NR_sysinfo 179
-__SYSCALL(__NR_sysinfo, sys_sysinfo)
-
-/* ipc/mqueue.c */
-#define __NR_mq_open 180
-__SYSCALL(__NR_mq_open, sys_mq_open)
-#define __NR_mq_unlink 181
-__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
-#define __NR_mq_timedsend 182
-__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend)
-#define __NR_mq_timedreceive 183
-__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive)
-#define __NR_mq_notify 184
-__SYSCALL(__NR_mq_notify, sys_mq_notify)
-#define __NR_mq_getsetattr 185
-__SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr)
-
-/* ipc/msg.c */
-#define __NR_msgget 186
-__SYSCALL(__NR_msgget, sys_msgget)
-#define __NR_msgctl 187
-__SYSCALL(__NR_msgctl, sys_msgctl)
-#define __NR_msgrcv 188
-__SYSCALL(__NR_msgrcv, sys_msgrcv)
-#define __NR_msgsnd 189
-__SYSCALL(__NR_msgsnd, sys_msgsnd)
-
-/* ipc/sem.c */
-#define __NR_semget 190
-__SYSCALL(__NR_semget, sys_semget)
-#define __NR_semctl 191
-__SYSCALL(__NR_semctl, sys_semctl)
-#define __NR_semtimedop 192
-__SYSCALL(__NR_semtimedop, sys_semtimedop)
-#define __NR_semop 193
-__SYSCALL(__NR_semop, sys_semop)
-
-/* ipc/shm.c */
-#define __NR_shmget 194
-__SYSCALL(__NR_shmget, sys_shmget)
-#define __NR_shmctl 195
-__SYSCALL(__NR_shmctl, sys_shmctl)
-#define __NR_shmat 196
-__SYSCALL(__NR_shmat, sys_shmat)
-#define __NR_shmdt 197
-__SYSCALL(__NR_shmdt, sys_shmdt)
-
-/* net/socket.c */
-#define __NR_socket 198
-__SYSCALL(__NR_socket, sys_socket)
-#define __NR_socketpair 199
-__SYSCALL(__NR_socketpair, sys_socketpair)
-#define __NR_bind 200
-__SYSCALL(__NR_bind, sys_bind)
-#define __NR_listen 201
-__SYSCALL(__NR_listen, sys_listen)
-#define __NR_accept 202
-__SYSCALL(__NR_accept, sys_accept)
-#define __NR_connect 203
-__SYSCALL(__NR_connect, sys_connect)
-#define __NR_getsockname 204
-__SYSCALL(__NR_getsockname, sys_getsockname)
-#define __NR_getpeername 205
-__SYSCALL(__NR_getpeername, sys_getpeername)
-#define __NR_sendto 206
-__SYSCALL(__NR_sendto, sys_sendto)
-#define __NR_recvfrom 207
-__SYSCALL(__NR_recvfrom, sys_recvfrom)
-#define __NR_setsockopt 208
-__SYSCALL(__NR_setsockopt, sys_setsockopt)
-#define __NR_getsockopt 209
-__SYSCALL(__NR_getsockopt, sys_getsockopt)
-#define __NR_shutdown 210
-__SYSCALL(__NR_shutdown, sys_shutdown)
-#define __NR_sendmsg 211
-__SYSCALL(__NR_sendmsg, sys_sendmsg)
-#define __NR_recvmsg 212
-__SYSCALL(__NR_recvmsg, sys_recvmsg)
-
-/* mm/filemap.c */
-#define __NR_readahead 213
-__SYSCALL(__NR_readahead, sys_readahead)
-
-/* mm/nommu.c, also with MMU */
-#define __NR_brk 214
-__SYSCALL(__NR_brk, sys_brk)
-#define __NR_munmap 215
-__SYSCALL(__NR_munmap, sys_munmap)
-#define __NR_mremap 216
-__SYSCALL(__NR_mremap, sys_mremap)
-
-/* security/keys/keyctl.c */
-#define __NR_add_key 217
-__SYSCALL(__NR_add_key, sys_add_key)
-#define __NR_request_key 218
-__SYSCALL(__NR_request_key, sys_request_key)
-#define __NR_keyctl 219
-__SYSCALL(__NR_keyctl, sys_keyctl)
-
-/* arch/example/kernel/sys_example.c */
-#define __NR_clone 220
-__SYSCALL(__NR_clone, sys_clone) /* .long sys_clone_wrapper */
-#define __NR_execve 221
-__SYSCALL(__NR_execve, sys_execve) /* .long sys_execve_wrapper */
-
-#define __NR3264_mmap 222
-__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap)
-/* mm/fadvise.c */
-#define __NR3264_fadvise64 223
-__SYSCALL(__NR3264_fadvise64, sys_fadvise64_64)
-
-/* mm/, CONFIG_MMU only */
-#ifndef __ARCH_NOMMU
-#define __NR_swapon 224
-__SYSCALL(__NR_swapon, sys_swapon)
-#define __NR_swapoff 225
-__SYSCALL(__NR_swapoff, sys_swapoff)
-#define __NR_mprotect 226
-__SYSCALL(__NR_mprotect, sys_mprotect)
-#define __NR_msync 227
-__SYSCALL(__NR_msync, sys_msync)
-#define __NR_mlock 228
-__SYSCALL(__NR_mlock, sys_mlock)
-#define __NR_munlock 229
-__SYSCALL(__NR_munlock, sys_munlock)
-#define __NR_mlockall 230
-__SYSCALL(__NR_mlockall, sys_mlockall)
-#define __NR_munlockall 231
-__SYSCALL(__NR_munlockall, sys_munlockall)
-#define __NR_mincore 232
-__SYSCALL(__NR_mincore, sys_mincore)
-#define __NR_madvise 233
-__SYSCALL(__NR_madvise, sys_madvise)
-#define __NR_remap_file_pages 234
-__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
-#define __NR_mbind 235
-__SYSCALL(__NR_mbind, sys_mbind)
-#define __NR_get_mempolicy 236
-__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
-#define __NR_set_mempolicy 237
-__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
-#define __NR_migrate_pages 238
-__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
-#define __NR_move_pages 239
-__SYSCALL(__NR_move_pages, sys_move_pages)
-#endif
-
-#define __NR_rt_tgsigqueueinfo 240
-__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
-#define __NR_perf_event_open 241
-__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
-#define __NR_accept4 242
-__SYSCALL(__NR_accept4, sys_accept4)
-#define __NR_recvmmsg 243
-__SYSCALL(__NR_recvmmsg, sys_recvmmsg)
-
-/*
- * Architectures may provide up to 16 syscalls of their own
- * starting with this value.
- */
-#define __NR_arch_specific_syscall 244
-
-#define __NR_wait4 260
-__SYSCALL(__NR_wait4, sys_wait4)
-#define __NR_prlimit64 261
-__SYSCALL(__NR_prlimit64, sys_prlimit64)
-#define __NR_fanotify_init 262
-__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
-#define __NR_fanotify_mark 263
-__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
-
-#undef __NR_syscalls
-#define __NR_syscalls 264
-
-/*
- * All syscalls below here should go away really,
- * these are provided for both review and as a porting
- * help for the C library version.
-*
- * Last chance: are any of these important enough to
- * enable by default?
- */
-#ifdef __ARCH_WANT_SYSCALL_NO_AT
-#define __NR_open 1024
-__SYSCALL(__NR_open, sys_open)
-#define __NR_link 1025
-__SYSCALL(__NR_link, sys_link)
-#define __NR_unlink 1026
-__SYSCALL(__NR_unlink, sys_unlink)
-#define __NR_mknod 1027
-__SYSCALL(__NR_mknod, sys_mknod)
-#define __NR_chmod 1028
-__SYSCALL(__NR_chmod, sys_chmod)
-#define __NR_chown 1029
-__SYSCALL(__NR_chown, sys_chown)
-#define __NR_mkdir 1030
-__SYSCALL(__NR_mkdir, sys_mkdir)
-#define __NR_rmdir 1031
-__SYSCALL(__NR_rmdir, sys_rmdir)
-#define __NR_lchown 1032
-__SYSCALL(__NR_lchown, sys_lchown)
-#define __NR_access 1033
-__SYSCALL(__NR_access, sys_access)
-#define __NR_rename 1034
-__SYSCALL(__NR_rename, sys_rename)
-#define __NR_readlink 1035
-__SYSCALL(__NR_readlink, sys_readlink)
-#define __NR_symlink 1036
-__SYSCALL(__NR_symlink, sys_symlink)
-#define __NR_utimes 1037
-__SYSCALL(__NR_utimes, sys_utimes)
-#define __NR3264_stat 1038
-__SC_3264(__NR3264_stat, sys_stat64, sys_newstat)
-#define __NR3264_lstat 1039
-__SC_3264(__NR3264_lstat, sys_lstat64, sys_newlstat)
-
-#undef __NR_syscalls
-#define __NR_syscalls (__NR3264_lstat+1)
-#endif /* __ARCH_WANT_SYSCALL_NO_AT */
-
-#ifdef __ARCH_WANT_SYSCALL_NO_FLAGS
-#define __NR_pipe 1040
-__SYSCALL(__NR_pipe, sys_pipe)
-#define __NR_dup2 1041
-__SYSCALL(__NR_dup2, sys_dup2)
-#define __NR_epoll_create 1042
-__SYSCALL(__NR_epoll_create, sys_epoll_create)
-#define __NR_inotify_init 1043
-__SYSCALL(__NR_inotify_init, sys_inotify_init)
-#define __NR_eventfd 1044
-__SYSCALL(__NR_eventfd, sys_eventfd)
-#define __NR_signalfd 1045
-__SYSCALL(__NR_signalfd, sys_signalfd)
-
-#undef __NR_syscalls
-#define __NR_syscalls (__NR_signalfd+1)
-#endif /* __ARCH_WANT_SYSCALL_NO_FLAGS */
-
-#if (__BITS_PER_LONG == 32 || defined(__SYSCALL_COMPAT)) && \
- defined(__ARCH_WANT_SYSCALL_OFF_T)
-#define __NR_sendfile 1046
-__SYSCALL(__NR_sendfile, sys_sendfile)
-#define __NR_ftruncate 1047
-__SYSCALL(__NR_ftruncate, sys_ftruncate)
-#define __NR_truncate 1048
-__SYSCALL(__NR_truncate, sys_truncate)
-#define __NR_stat 1049
-__SYSCALL(__NR_stat, sys_newstat)
-#define __NR_lstat 1050
-__SYSCALL(__NR_lstat, sys_newlstat)
-#define __NR_fstat 1051
-__SYSCALL(__NR_fstat, sys_newfstat)
-#define __NR_fcntl 1052
-__SYSCALL(__NR_fcntl, sys_fcntl)
-#define __NR_fadvise64 1053
-#define __ARCH_WANT_SYS_FADVISE64
-__SYSCALL(__NR_fadvise64, sys_fadvise64)
-#define __NR_newfstatat 1054
-#define __ARCH_WANT_SYS_NEWFSTATAT
-__SYSCALL(__NR_newfstatat, sys_newfstatat)
-#define __NR_fstatfs 1055
-__SYSCALL(__NR_fstatfs, sys_fstatfs)
-#define __NR_statfs 1056
-__SYSCALL(__NR_statfs, sys_statfs)
-#define __NR_lseek 1057
-__SYSCALL(__NR_lseek, sys_lseek)
-#define __NR_mmap 1058
-__SYSCALL(__NR_mmap, sys_mmap)
-
-#undef __NR_syscalls
-#define __NR_syscalls (__NR_mmap+1)
-#endif /* 32 bit off_t syscalls */
-
-#ifdef __ARCH_WANT_SYSCALL_DEPRECATED
-#define __NR_alarm 1059
-#define __ARCH_WANT_SYS_ALARM
-__SYSCALL(__NR_alarm, sys_alarm)
-#define __NR_getpgrp 1060
-#define __ARCH_WANT_SYS_GETPGRP
-__SYSCALL(__NR_getpgrp, sys_getpgrp)
-#define __NR_pause 1061
-#define __ARCH_WANT_SYS_PAUSE
-__SYSCALL(__NR_pause, sys_pause)
-#define __NR_time 1062
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_COMPAT_SYS_TIME
-__SYSCALL(__NR_time, sys_time)
-#define __NR_utime 1063
-#define __ARCH_WANT_SYS_UTIME
-__SYSCALL(__NR_utime, sys_utime)
-
-#define __NR_creat 1064
-__SYSCALL(__NR_creat, sys_creat)
-#define __NR_getdents 1065
-#define __ARCH_WANT_SYS_GETDENTS
-__SYSCALL(__NR_getdents, sys_getdents)
-#define __NR_futimesat 1066
-__SYSCALL(__NR_futimesat, sys_futimesat)
-#define __NR_select 1067
-#define __ARCH_WANT_SYS_SELECT
-__SYSCALL(__NR_select, sys_select)
-#define __NR_poll 1068
-__SYSCALL(__NR_poll, sys_poll)
-#define __NR_epoll_wait 1069
-__SYSCALL(__NR_epoll_wait, sys_epoll_wait)
-#define __NR_ustat 1070
-__SYSCALL(__NR_ustat, sys_ustat)
-#define __NR_vfork 1071
-__SYSCALL(__NR_vfork, sys_vfork)
-#define __NR_oldwait4 1072
-__SYSCALL(__NR_oldwait4, sys_wait4)
-#define __NR_recv 1073
-__SYSCALL(__NR_recv, sys_recv)
-#define __NR_send 1074
-__SYSCALL(__NR_send, sys_send)
-#define __NR_bdflush 1075
-__SYSCALL(__NR_bdflush, sys_bdflush)
-#define __NR_umount 1076
-__SYSCALL(__NR_umount, sys_oldumount)
-#define __ARCH_WANT_SYS_OLDUMOUNT
-#define __NR_uselib 1077
-__SYSCALL(__NR_uselib, sys_uselib)
-#define __NR__sysctl 1078
-__SYSCALL(__NR__sysctl, sys_sysctl)
-
-#define __NR_fork 1079
-#ifdef CONFIG_MMU
-__SYSCALL(__NR_fork, sys_fork)
-#else
-__SYSCALL(__NR_fork, sys_ni_syscall)
-#endif /* CONFIG_MMU */
-
-#undef __NR_syscalls
-#define __NR_syscalls (__NR_fork+1)
-
-#endif /* __ARCH_WANT_SYSCALL_DEPRECATED */
-
-/*
- * 32 bit systems traditionally used different
- * syscalls for off_t and loff_t arguments, while
- * 64 bit systems only need the off_t version.
- * For new 32 bit platforms, there is no need to
- * implement the old 32 bit off_t syscalls, so
- * they take different names.
- * Here we map the numbers so that both versions
- * use the same syscall table layout.
- */
-#if __BITS_PER_LONG == 64 && !defined(__SYSCALL_COMPAT)
-#define __NR_fcntl __NR3264_fcntl
-#define __NR_statfs __NR3264_statfs
-#define __NR_fstatfs __NR3264_fstatfs
-#define __NR_truncate __NR3264_truncate
-#define __NR_ftruncate __NR3264_ftruncate
-#define __NR_lseek __NR3264_lseek
-#define __NR_sendfile __NR3264_sendfile
-#define __NR_newfstatat __NR3264_fstatat
-#define __NR_fstat __NR3264_fstat
-#define __NR_mmap __NR3264_mmap
-#define __NR_fadvise64 __NR3264_fadvise64
-#ifdef __NR3264_stat
-#define __NR_stat __NR3264_stat
-#define __NR_lstat __NR3264_lstat
-#endif
-#else
-#define __NR_fcntl64 __NR3264_fcntl
-#define __NR_statfs64 __NR3264_statfs
-#define __NR_fstatfs64 __NR3264_fstatfs
-#define __NR_truncate64 __NR3264_truncate
-#define __NR_ftruncate64 __NR3264_ftruncate
-#define __NR_llseek __NR3264_lseek
-#define __NR_sendfile64 __NR3264_sendfile
-#define __NR_fstatat64 __NR3264_fstatat
-#define __NR_fstat64 __NR3264_fstat
-#define __NR_mmap2 __NR3264_mmap
-#define __NR_fadvise64_64 __NR3264_fadvise64
-#ifdef __NR3264_stat
-#define __NR_stat64 __NR3264_stat
-#define __NR_lstat64 __NR3264_lstat
-#endif
-#endif
-
-#ifdef __KERNEL__
+#include <uapi/asm-generic/unistd.h>
+#include <linux/export.h>
/*
* These are required system calls, we should
@@ -868,19 +10,3 @@ __SYSCALL(__NR_fork, sys_ni_syscall)
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_LLSEEK
#endif
-#define __ARCH_WANT_SYS_RT_SIGACTION
-#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
-
-/*
- * "Conditional" syscalls
- *
- * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
- * but it doesn't work on all toolchains, so we just do it by hand
- */
-#ifndef cond_syscall
-#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
-#endif
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_GENERIC_UNISTD_H */
diff --git a/include/asm-generic/user.h b/include/asm-generic/user.h
index 8b9c3c960ae..35638c34700 100644
--- a/include/asm-generic/user.h
+++ b/include/asm-generic/user.h
@@ -1,8 +1,8 @@
#ifndef __ASM_GENERIC_USER_H
#define __ASM_GENERIC_USER_H
/*
- * This file may define a 'struct user' structure. However, it it only
- * used for a.out file, which are not supported on new architectures.
+ * This file may define a 'struct user' structure. However, it is only
+ * used for a.out files, which are not supported on new architectures.
*/
#endif /* __ASM_GENERIC_USER_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index bd69d79208d..c1c0b0cf39b 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -15,7 +15,7 @@
* HEAD_TEXT_SECTION
* INIT_TEXT_SECTION(PAGE_SIZE)
* INIT_DATA_SECTION(...)
- * PERCPU(PAGE_SIZE)
+ * PERCPU_SECTION(CACHELINE_SIZE)
* __init_end = .;
*
* _stext = .;
@@ -52,13 +52,7 @@
#define LOAD_OFFSET 0
#endif
-#ifndef SYMBOL_PREFIX
-#define VMLINUX_SYMBOL(sym) sym
-#else
-#define PASTE2(x,y) x##y
-#define PASTE(x,y) PASTE2(x,y)
-#define VMLINUX_SYMBOL(sym) PASTE(SYMBOL_PREFIX, sym)
-#endif
+#include <linux/export.h>
/* Align . to a 8 byte boundary equals to maximum function alignment. */
#define ALIGN_FUNCTION() . = ALIGN(8)
@@ -67,20 +61,13 @@
* Align to a 32 byte boundary equal to the
* alignment gcc 4.5 uses for a struct
*/
-#define STRUCT_ALIGN() . = ALIGN(32)
+#define STRUCT_ALIGNMENT 32
+#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
/* The actual configuration determine if the init/exit sections
* are handled as text/data or they can be discarded (which
* often happens at runtime)
*/
-#ifdef CONFIG_HOTPLUG
-#define DEV_KEEP(sec) *(.dev##sec)
-#define DEV_DISCARD(sec)
-#else
-#define DEV_KEEP(sec)
-#define DEV_DISCARD(sec) *(.dev##sec)
-#endif
-
#ifdef CONFIG_HOTPLUG_CPU
#define CPU_KEEP(sec) *(.cpu##sec)
#define CPU_DISCARD(sec)
@@ -122,8 +109,18 @@
#define BRANCH_PROFILE()
#endif
+#ifdef CONFIG_KPROBES
+#define KPROBE_BLACKLIST() . = ALIGN(8); \
+ VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
+ *(_kprobe_blacklist) \
+ VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
+#else
+#define KPROBE_BLACKLIST()
+#endif
+
#ifdef CONFIG_EVENT_TRACING
-#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
+#define FTRACE_EVENTS() . = ALIGN(8); \
+ VMLINUX_SYMBOL(__start_ftrace_events) = .; \
*(_ftrace_events) \
VMLINUX_SYMBOL(__stop_ftrace_events) = .;
#else
@@ -134,34 +131,62 @@
#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
*(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
+#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
+ *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
+ VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
#else
#define TRACE_PRINTKS()
+#define TRACEPOINT_STR()
#endif
#ifdef CONFIG_FTRACE_SYSCALLS
-#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
+#define TRACE_SYSCALLS() . = ALIGN(8); \
+ VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
*(__syscalls_metadata) \
VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
#else
#define TRACE_SYSCALLS()
#endif
+
+#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
+#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
+#define OF_TABLE(cfg, name) __OF_TABLE(config_enabled(cfg), name)
+#define _OF_TABLE_0(name)
+#define _OF_TABLE_1(name) \
+ . = ALIGN(8); \
+ VMLINUX_SYMBOL(__##name##_of_table) = .; \
+ *(__##name##_of_table) \
+ *(__##name##_of_table_end)
+
+#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
+#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
+#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
+#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
+#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
+#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
+
+#define KERNEL_DTB() \
+ STRUCT_ALIGN(); \
+ VMLINUX_SYMBOL(__dtb_start) = .; \
+ *(.dtb.init.rodata) \
+ VMLINUX_SYMBOL(__dtb_end) = .;
+
/* .data section */
#define DATA_DATA \
*(.data) \
*(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \
- DEV_KEEP(init.data) \
- DEV_KEEP(exit.data) \
- CPU_KEEP(init.data) \
- CPU_KEEP(exit.data) \
MEM_KEEP(init.data) \
MEM_KEEP(exit.data) \
- . = ALIGN(32); \
- VMLINUX_SYMBOL(__start___tracepoints) = .; \
+ *(.data.unlikely) \
+ STRUCT_ALIGN(); \
*(__tracepoints) \
- VMLINUX_SYMBOL(__stop___tracepoints) = .; \
/* implement dynamic printk debug */ \
+ . = ALIGN(8); \
+ VMLINUX_SYMBOL(__start___jump_table) = .; \
+ *(__jump_table) \
+ VMLINUX_SYMBOL(__stop___jump_table) = .; \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___verbose) = .; \
*(__verbose) \
@@ -169,12 +194,7 @@
LIKELY_PROFILE() \
BRANCH_PROFILE() \
TRACE_PRINTKS() \
- \
- STRUCT_ALIGN(); \
- FTRACE_EVENTS() \
- \
- STRUCT_ALIGN(); \
- TRACE_SYSCALLS()
+ TRACEPOINT_STR()
/*
* Data section helpers
@@ -192,7 +212,8 @@
#define READ_MOSTLY_DATA(align) \
. = ALIGN(align); \
- *(.data..read_mostly)
+ *(.data..read_mostly) \
+ . = ALIGN(align);
#define CACHELINE_ALIGNED_DATA(align) \
. = ALIGN(align); \
@@ -211,7 +232,10 @@
VMLINUX_SYMBOL(__start_rodata) = .; \
*(.rodata) *(.rodata.*) \
*(__vermagic) /* Kernel version magic */ \
- *(__markers_strings) /* Markers: strings */ \
+ . = ALIGN(8); \
+ VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
+ *(__tracepoints_ptrs) /* Tracepoints: pointer array */\
+ VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
*(__tracepoints_strings)/* Tracepoints: strings */ \
} \
\
@@ -221,8 +245,6 @@
\
BUG_TABLE \
\
- JUMP_TABLE \
- \
/* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
@@ -255,82 +277,75 @@
VMLINUX_SYMBOL(__end_builtin_fw) = .; \
} \
\
- /* RapidIO route ops */ \
- .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \
- *(.rio_switch_ops) \
- VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \
- } \
- \
TRACEDATA \
\
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab) = .; \
- *(__ksymtab) \
+ *(SORT(___ksymtab+*)) \
VMLINUX_SYMBOL(__stop___ksymtab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
- *(__ksymtab_gpl) \
+ *(SORT(___ksymtab_gpl+*)) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
- *(__ksymtab_unused) \
+ *(SORT(___ksymtab_unused+*)) \
VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
- *(__ksymtab_unused_gpl) \
+ *(SORT(___ksymtab_unused_gpl+*)) \
VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
- *(__ksymtab_gpl_future) \
+ *(SORT(___ksymtab_gpl_future+*)) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
} \
\
/* Kernel symbol table: Normal symbols */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab) = .; \
- *(__kcrctab) \
+ *(SORT(___kcrctab+*)) \
VMLINUX_SYMBOL(__stop___kcrctab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
- *(__kcrctab_gpl) \
+ *(SORT(___kcrctab_gpl+*)) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
- *(__kcrctab_unused) \
+ *(SORT(___kcrctab_unused+*)) \
VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
- *(__kcrctab_unused_gpl) \
+ *(SORT(___kcrctab_unused_gpl+*)) \
VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
- *(__kcrctab_gpl_future) \
+ *(SORT(___kcrctab_gpl_future+*)) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
} \
\
@@ -342,10 +357,6 @@
/* __*init sections */ \
__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
*(.ref.rodata) \
- DEV_KEEP(init.rodata) \
- DEV_KEEP(exit.rodata) \
- CPU_KEEP(init.rodata) \
- CPU_KEEP(exit.rodata) \
MEM_KEEP(init.rodata) \
MEM_KEEP(exit.rodata) \
} \
@@ -355,6 +366,13 @@
VMLINUX_SYMBOL(__start___param) = .; \
*(__param) \
VMLINUX_SYMBOL(__stop___param) = .; \
+ } \
+ \
+ /* Built-in module versions. */ \
+ __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___modver) = .; \
+ *(__modver) \
+ VMLINUX_SYMBOL(__stop___modver) = .; \
. = ALIGN((align)); \
VMLINUX_SYMBOL(__end_rodata) = .; \
} \
@@ -379,10 +397,6 @@
*(.text.hot) \
*(.text) \
*(.ref.text) \
- DEV_KEEP(init.text) \
- DEV_KEEP(exit.text) \
- CPU_KEEP(init.text) \
- CPU_KEEP(exit.text) \
MEM_KEEP(init.text) \
MEM_KEEP(exit.text) \
*(.text.unlikely)
@@ -410,6 +424,12 @@
*(.kprobes.text) \
VMLINUX_SYMBOL(__kprobes_text_end) = .;
+#define ENTRY_TEXT \
+ ALIGN_FUNCTION(); \
+ VMLINUX_SYMBOL(__entry_text_start) = .; \
+ *(.entry.text) \
+ VMLINUX_SYMBOL(__entry_text_end) = .;
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define IRQENTRY_TEXT \
ALIGN_FUNCTION(); \
@@ -452,6 +472,7 @@
#define KERNEL_CTORS() . = ALIGN(8); \
VMLINUX_SYMBOL(__ctors_start) = .; \
*(.ctors) \
+ *(.init_array) \
VMLINUX_SYMBOL(__ctors_end) = .;
#else
#define KERNEL_CTORS()
@@ -460,35 +481,33 @@
/* init and exit section handling */
#define INIT_DATA \
*(.init.data) \
- DEV_DISCARD(init.data) \
- CPU_DISCARD(init.data) \
MEM_DISCARD(init.data) \
KERNEL_CTORS() \
- *(.init.rodata) \
MCOUNT_REC() \
- DEV_DISCARD(init.rodata) \
- CPU_DISCARD(init.rodata) \
- MEM_DISCARD(init.rodata)
+ *(.init.rodata) \
+ FTRACE_EVENTS() \
+ TRACE_SYSCALLS() \
+ KPROBE_BLACKLIST() \
+ MEM_DISCARD(init.rodata) \
+ CLK_OF_TABLES() \
+ RESERVEDMEM_OF_TABLES() \
+ CLKSRC_OF_TABLES() \
+ CPU_METHOD_OF_TABLES() \
+ KERNEL_DTB() \
+ IRQCHIP_OF_MATCH_TABLE() \
+ EARLYCON_OF_TABLES()
#define INIT_TEXT \
*(.init.text) \
- DEV_DISCARD(init.text) \
- CPU_DISCARD(init.text) \
MEM_DISCARD(init.text)
#define EXIT_DATA \
*(.exit.data) \
- DEV_DISCARD(exit.data) \
- DEV_DISCARD(exit.rodata) \
- CPU_DISCARD(exit.data) \
- CPU_DISCARD(exit.rodata) \
MEM_DISCARD(exit.data) \
MEM_DISCARD(exit.rodata)
#define EXIT_TEXT \
*(.exit.text) \
- DEV_DISCARD(exit.text) \
- CPU_DISCARD(exit.text) \
MEM_DISCARD(exit.text)
#define EXIT_CALL \
@@ -505,9 +524,18 @@
*(.scommon) \
}
+/*
+ * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
+ * sections to the front of bss.
+ */
+#ifndef BSS_FIRST_SECTIONS
+#define BSS_FIRST_SECTIONS
+#endif
+
#define BSS(bss_align) \
. = ALIGN(bss_align); \
.bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
+ BSS_FIRST_SECTIONS \
*(.bss..page_aligned) \
*(.dynbss) \
*(.bss) \
@@ -566,14 +594,6 @@
#define BUG_TABLE
#endif
-#define JUMP_TABLE \
- . = ALIGN(8); \
- __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___jump_table) = .; \
- *(__jump_table) \
- VMLINUX_SYMBOL(__stop___jump_table) = .; \
- }
-
#ifdef CONFIG_PM_TRACE
#define TRACEDATA \
. = ALIGN(4); \
@@ -599,30 +619,23 @@
*(.init.setup) \
VMLINUX_SYMBOL(__setup_end) = .;
-#define INITCALLS \
- *(.initcallearly.init) \
- VMLINUX_SYMBOL(__early_initcall_end) = .; \
- *(.initcall0.init) \
- *(.initcall0s.init) \
- *(.initcall1.init) \
- *(.initcall1s.init) \
- *(.initcall2.init) \
- *(.initcall2s.init) \
- *(.initcall3.init) \
- *(.initcall3s.init) \
- *(.initcall4.init) \
- *(.initcall4s.init) \
- *(.initcall5.init) \
- *(.initcall5s.init) \
- *(.initcallrootfs.init) \
- *(.initcall6.init) \
- *(.initcall6s.init) \
- *(.initcall7.init) \
- *(.initcall7s.init)
+#define INIT_CALLS_LEVEL(level) \
+ VMLINUX_SYMBOL(__initcall##level##_start) = .; \
+ *(.initcall##level##.init) \
+ *(.initcall##level##s.init) \
#define INIT_CALLS \
VMLINUX_SYMBOL(__initcall_start) = .; \
- INITCALLS \
+ *(.initcallearly.init) \
+ INIT_CALLS_LEVEL(0) \
+ INIT_CALLS_LEVEL(1) \
+ INIT_CALLS_LEVEL(2) \
+ INIT_CALLS_LEVEL(3) \
+ INIT_CALLS_LEVEL(4) \
+ INIT_CALLS_LEVEL(5) \
+ INIT_CALLS_LEVEL(rootfs) \
+ INIT_CALLS_LEVEL(6) \
+ INIT_CALLS_LEVEL(7) \
VMLINUX_SYMBOL(__initcall_end) = .;
#define CON_INITCALL \
@@ -665,14 +678,41 @@
}
/**
+ * PERCPU_INPUT - the percpu input sections
+ * @cacheline: cacheline size
+ *
+ * The core percpu section names and core symbols which do not rely
+ * directly upon load addresses.
+ *
+ * @cacheline is used to align subsections to avoid false cacheline
+ * sharing between subsections for different purposes.
+ */
+#define PERCPU_INPUT(cacheline) \
+ VMLINUX_SYMBOL(__per_cpu_start) = .; \
+ *(.data..percpu..first) \
+ . = ALIGN(PAGE_SIZE); \
+ *(.data..percpu..page_aligned) \
+ . = ALIGN(cacheline); \
+ *(.data..percpu..read_mostly) \
+ . = ALIGN(cacheline); \
+ *(.data..percpu) \
+ *(.data..percpu..shared_aligned) \
+ VMLINUX_SYMBOL(__per_cpu_end) = .;
+
+/**
* PERCPU_VADDR - define output section for percpu area
+ * @cacheline: cacheline size
* @vaddr: explicit base address (optional)
* @phdr: destination PHDR (optional)
*
- * Macro which expands to output section for percpu area. If @vaddr
- * is not blank, it specifies explicit base address and all percpu
- * symbols will be offset from the given address. If blank, @vaddr
- * always equals @laddr + LOAD_OFFSET.
+ * Macro which expands to output section for percpu area.
+ *
+ * @cacheline is used to align subsections to avoid false cacheline
+ * sharing between subsections for different purposes.
+ *
+ * If @vaddr is not blank, it specifies explicit base address and all
+ * percpu symbols will be offset from the given address. If blank,
+ * @vaddr always equals @laddr + LOAD_OFFSET.
*
* @phdr defines the output PHDR to use if not blank. Be warned that
* output PHDR is sticky. If @phdr is specified, the next output
@@ -681,48 +721,33 @@
*
* Note that this macros defines __per_cpu_load as an absolute symbol.
* If there is no need to put the percpu section at a predetermined
- * address, use PERCPU().
+ * address, use PERCPU_SECTION.
*/
-#define PERCPU_VADDR(vaddr, phdr) \
+#define PERCPU_VADDR(cacheline, vaddr, phdr) \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
.data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
- LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__per_cpu_start) = .; \
- *(.data..percpu..first) \
- . = ALIGN(PAGE_SIZE); \
- *(.data..percpu..page_aligned) \
- *(.data..percpu..readmostly) \
- *(.data..percpu) \
- *(.data..percpu..shared_aligned) \
- VMLINUX_SYMBOL(__per_cpu_end) = .; \
+ PERCPU_INPUT(cacheline) \
} phdr \
. = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
/**
- * PERCPU - define output section for percpu area, simple version
- * @align: required alignment
+ * PERCPU_SECTION - define output section for percpu area, simple version
+ * @cacheline: cacheline size
*
- * Align to @align and outputs output section for percpu area. This
- * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
+ * Align to PAGE_SIZE and outputs output section for percpu area. This
+ * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
* __per_cpu_start will be identical.
*
- * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
- * that __per_cpu_load is defined as a relative symbol against
- * .data..percpu which is required for relocatable x86_32
- * configuration.
+ * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
+ * except that __per_cpu_load is defined as a relative symbol against
+ * .data..percpu which is required for relocatable x86_32 configuration.
*/
-#define PERCPU(align) \
- . = ALIGN(align); \
+#define PERCPU_SECTION(cacheline) \
+ . = ALIGN(PAGE_SIZE); \
.data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
- VMLINUX_SYMBOL(__per_cpu_start) = .; \
- *(.data..percpu..first) \
- . = ALIGN(PAGE_SIZE); \
- *(.data..percpu..page_aligned) \
- *(.data..percpu..readmostly) \
- *(.data..percpu) \
- *(.data..percpu..shared_aligned) \
- VMLINUX_SYMBOL(__per_cpu_end) = .; \
+ PERCPU_INPUT(cacheline) \
}
@@ -741,7 +766,7 @@
* the sections that has this restriction (or similar)
* is located before the ones requiring PAGE_SIZE alignment.
* NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
- * matches the requirment of PAGE_ALIGNED_DATA.
+ * matches the requirement of PAGE_ALIGNED_DATA.
*
* use 0 as page_align if page_aligned data is not used */
#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
diff --git a/include/asm-generic/vtime.h b/include/asm-generic/vtime.h
new file mode 100644
index 00000000000..b1a49677fe2
--- /dev/null
+++ b/include/asm-generic/vtime.h
@@ -0,0 +1 @@
+/* no content, but patch(1) dislikes empty files */
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
new file mode 100644
index 00000000000..94f9ea8abca
--- /dev/null
+++ b/include/asm-generic/word-at-a-time.h
@@ -0,0 +1,56 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+/*
+ * This says "generic", but it's actually big-endian only.
+ * Little-endian can use more efficient versions of these
+ * interfaces, see for example
+ * arch/x86/include/asm/word-at-a-time.h
+ * for those.
+ */
+
+#include <linux/kernel.h>
+
+struct word_at_a_time {
+ const unsigned long high_bits, low_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
+
+/* Bit set in the bytes that have a zero */
+static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
+{
+ unsigned long mask = (val & c->low_bits) + c->low_bits;
+ return ~(mask | rhs);
+}
+
+#define create_zero_mask(mask) (mask)
+
+static inline long find_zero(unsigned long mask)
+{
+ long byte = 0;
+#ifdef CONFIG_64BIT
+ if (mask >> 32)
+ mask >>= 32;
+ else
+ byte = 4;
+#endif
+ if (mask >> 16)
+ mask >>= 16;
+ else
+ byte += 2;
+ return (mask >> 8) ? byte : byte + 1;
+}
+
+static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+ unsigned long rhs = val | c->low_bits;
+ *data = rhs;
+ return (val + c->high_bits) & ~rhs;
+}
+
+#ifndef zero_bytemask
+#define zero_bytemask(mask) (~1ul << __fls(mask))
+#endif
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h
index aaab875e1a3..b4d843225af 100644
--- a/include/asm-generic/xor.h
+++ b/include/asm-generic/xor.h
@@ -13,7 +13,7 @@
* Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <asm/processor.h>
+#include <linux/prefetch.h>
static void
xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
@@ -693,7 +693,7 @@ static struct xor_block_template xor_block_32regs = {
.do_5 = xor_32regs_5,
};
-static struct xor_block_template xor_block_8regs_p = {
+static struct xor_block_template xor_block_8regs_p __maybe_unused = {
.name = "8regs_prefetch",
.do_2 = xor_8regs_p_2,
.do_3 = xor_8regs_p_3,
@@ -701,7 +701,7 @@ static struct xor_block_template xor_block_8regs_p = {
.do_5 = xor_8regs_p_5,
};
-static struct xor_block_template xor_block_32regs_p = {
+static struct xor_block_template xor_block_32regs_p __maybe_unused = {
.name = "32regs_prefetch",
.do_2 = xor_32regs_p_2,
.do_3 = xor_32regs_p_3,