diff options
Diffstat (limited to 'include/asm-generic')
52 files changed, 1264 insertions, 581 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 1ced6413ea0..9c79e760345 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -16,6 +16,7 @@ #define __ASM_GENERIC_ATOMIC_H #include <asm/cmpxchg.h> +#include <asm/barrier.h> #ifdef CONFIG_SMP /* Force people to define core atomics */ @@ -136,12 +137,6 @@ static inline void atomic_dec(atomic_t *v) #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ - (unsigned long)(n), sizeof(*(ptr)))) - -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; @@ -188,11 +183,5 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) } #endif -/* Assume that atomic operations are already serializing */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif /* __KERNEL__ */ #endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h index 89b73e5d0fd..a1865537339 100644 --- a/include/asm-generic/audit_change_attr.h +++ b/include/asm-generic/audit_change_attr.h @@ -4,9 +4,11 @@ __NR_chmod, __NR_fchmod, #ifdef __NR_chown __NR_chown, -__NR_fchown, __NR_lchown, #endif +#ifdef __NR_fchown +__NR_fchown, +#endif __NR_setxattr, __NR_lsetxattr, __NR_fsetxattr, diff --git a/include/asm-generic/audit_write.h b/include/asm-generic/audit_write.h index e7020c57b13..274575d7129 100644 --- a/include/asm-generic/audit_write.h +++ b/include/asm-generic/audit_write.h @@ -10,6 +10,12 @@ __NR_truncate, #ifdef __NR_truncate64 __NR_truncate64, #endif +#ifdef __NR_ftruncate +__NR_ftruncate, +#endif +#ifdef __NR_ftruncate64 +__NR_ftruncate64, +#endif #ifdef __NR_bind __NR_bind, /* bind can affect fs object only in one way... */ #endif diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 639d7a4d033..1402fa85538 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -1,4 +1,5 @@ -/* Generic barrier definitions, based on MN10300 definitions. +/* + * Generic barrier definitions, originally based on MN10300 definitions. * * It should be possible to use these on really simple architectures, * but it serves more as a starting point for new ports. @@ -16,35 +17,73 @@ #ifndef __ASSEMBLY__ -#define nop() asm volatile ("nop") +#include <linux/compiler.h> + +#ifndef nop +#define nop() asm volatile ("nop") +#endif /* - * Force strict CPU ordering. - * And yes, this is required on UP too when we're talking - * to devices. + * Force strict CPU ordering. And yes, this is required on UP too when we're + * talking to devices. * - * This implementation only contains a compiler barrier. + * Fall back to compiler barriers if nothing better is provided. */ -#define mb() asm volatile ("": : :"memory") +#ifndef mb +#define mb() barrier() +#endif + +#ifndef rmb #define rmb() mb() -#define wmb() asm volatile ("": : :"memory") +#endif + +#ifndef wmb +#define wmb() mb() +#endif + +#ifndef read_barrier_depends +#define read_barrier_depends() do { } while (0) +#endif #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while (0) +#endif + +#ifndef set_mb +#define set_mb(var, value) do { (var) = (value); mb(); } while (0) +#endif + +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#endif + +#ifndef smp_mb__after_atomic +#define smp_mb__after_atomic() smp_mb() #endif -#define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) -#define read_barrier_depends() do {} while (0) -#define smp_read_barrier_depends() do {} while (0) +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h index 280ca7a96f7..dcdcacf2fd2 100644 --- a/include/asm-generic/bitops.h +++ b/include/asm-generic/bitops.h @@ -11,14 +11,7 @@ #include <linux/irqflags.h> #include <linux/compiler.h> - -/* - * clear_bit may not imply a memory barrier - */ -#ifndef smp_mb__before_clear_bit -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() -#endif +#include <asm/barrier.h> #include <asm-generic/bitops/__ffs.h> #include <asm-generic/bitops/ffz.h> diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index 9ae6c34dc19..49673510b48 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -80,7 +80,7 @@ static inline void set_bit(int nr, volatile unsigned long *addr) * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */ static inline void clear_bit(int nr, volatile unsigned long *addr) diff --git a/include/asm-generic/bitops/const_hweight.h b/include/asm-generic/bitops/const_hweight.h index fa2a50b7ee6..0a7e0662347 100644 --- a/include/asm-generic/bitops/const_hweight.h +++ b/include/asm-generic/bitops/const_hweight.h @@ -5,14 +5,15 @@ * Compile time versions of __arch_hweightN() */ #define __const_hweight8(w) \ - ( (!!((w) & (1ULL << 0))) + \ - (!!((w) & (1ULL << 1))) + \ - (!!((w) & (1ULL << 2))) + \ - (!!((w) & (1ULL << 3))) + \ - (!!((w) & (1ULL << 4))) + \ - (!!((w) & (1ULL << 5))) + \ - (!!((w) & (1ULL << 6))) + \ - (!!((w) & (1ULL << 7))) ) + ((unsigned int) \ + ((!!((w) & (1ULL << 0))) + \ + (!!((w) & (1ULL << 1))) + \ + (!!((w) & (1ULL << 2))) + \ + (!!((w) & (1ULL << 3))) + \ + (!!((w) & (1ULL << 4))) + \ + (!!((w) & (1ULL << 5))) + \ + (!!((w) & (1ULL << 6))) + \ + (!!((w) & (1ULL << 7))))) #define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 )) #define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16)) diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h index 71c778033f5..998d4d544f1 100644 --- a/include/asm-generic/bitops/find.h +++ b/include/asm-generic/bitops/find.h @@ -7,6 +7,9 @@ * @addr: The address to base the search on * @offset: The bitnumber to start searching at * @size: The bitmap size in bits + * + * Returns the bit number for the next set bit + * If no bits are set, returns @size. */ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset); @@ -18,6 +21,9 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long * @addr: The address to base the search on * @offset: The bitnumber to start searching at * @size: The bitmap size in bits + * + * Returns the bit number of the next zero bit + * If no bits are zero, returns @size. */ extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset); @@ -28,9 +34,10 @@ extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned /** * find_first_bit - find the first set bit in a memory region * @addr: The address to start the search at - * @size: The maximum size to search + * @size: The maximum number of bits to search * * Returns the bit number of the first set bit. + * If no bits are set, returns @size. */ extern unsigned long find_first_bit(const unsigned long *addr, unsigned long size); @@ -38,9 +45,10 @@ extern unsigned long find_first_bit(const unsigned long *addr, /** * find_first_zero_bit - find the first cleared bit in a memory region * @addr: The address to start the search at - * @size: The maximum size to search + * @size: The maximum number of bits to search * * Returns the bit number of the first cleared bit. + * If no bits are zero, returns @size. */ extern unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size); diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h index 308a9e22c80..c30266e9480 100644 --- a/include/asm-generic/bitops/lock.h +++ b/include/asm-generic/bitops/lock.h @@ -20,7 +20,7 @@ */ #define clear_bit_unlock(nr, addr) \ do { \ - smp_mb__before_clear_bit(); \ + smp_mb__before_atomic(); \ clear_bit(nr, addr); \ } while (0) diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 7d10f962aa1..630dd237223 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -52,7 +52,7 @@ struct bug_entry { #endif #ifndef HAVE_ARCH_BUG_ON -#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0) +#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0) #endif /* @@ -106,33 +106,6 @@ extern void warn_slowpath_null(const char *file, const int line); unlikely(__ret_warn_on); \ }) -#else /* !CONFIG_BUG */ -#ifndef HAVE_ARCH_BUG -#define BUG() do {} while(0) -#endif - -#ifndef HAVE_ARCH_BUG_ON -#define BUG_ON(condition) do { if (condition) ; } while(0) -#endif - -#ifndef HAVE_ARCH_WARN_ON -#define WARN_ON(condition) ({ \ - int __ret_warn_on = !!(condition); \ - unlikely(__ret_warn_on); \ -}) -#endif - -#ifndef WARN -#define WARN(condition, format...) ({ \ - int __ret_warn_on = !!(condition); \ - unlikely(__ret_warn_on); \ -}) -#endif - -#define WARN_TAINT(condition, taint, format...) WARN_ON(condition) - -#endif - #define WARN_ON_ONCE(condition) ({ \ static bool __section(.data.unlikely) __warned; \ int __ret_warn_once = !!(condition); \ @@ -163,6 +136,37 @@ extern void warn_slowpath_null(const char *file, const int line); unlikely(__ret_warn_once); \ }) +#else /* !CONFIG_BUG */ +#ifndef HAVE_ARCH_BUG +#define BUG() do {} while (1) +#endif + +#ifndef HAVE_ARCH_BUG_ON +#define BUG_ON(condition) do { if (condition) ; } while (0) +#endif + +#ifndef HAVE_ARCH_WARN_ON +#define WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + no_printk(format); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#define WARN_ON_ONCE(condition) WARN_ON(condition) +#define WARN_ONCE(condition, format...) WARN(condition, format) +#define WARN_TAINT(condition, taint, format...) WARN(condition, format) +#define WARN_TAINT_ONCE(condition, taint, format...) WARN(condition, format) + +#endif + /* * WARN_ON_SMP() is for cases that the warning is either * meaningless for !SMP or may even cause failures. diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h index c084767c88b..59811df58c5 100644 --- a/include/asm-generic/checksum.h +++ b/include/asm-generic/checksum.h @@ -38,12 +38,15 @@ extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, csum_partial_copy((src), (dst), (len), (sum)) #endif +#ifndef ip_fast_csum /* * This is a version of ip_compute_csum() optimized for IP headers, * which always checksum on 4 octet boundaries. */ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); +#endif +#ifndef csum_fold /* * Fold a partial checksum */ @@ -54,6 +57,7 @@ static inline __sum16 csum_fold(__wsum csum) sum = (sum & 0xffff) + (sum >> 16); return (__force __sum16)~sum; } +#endif #ifndef csum_tcpudp_nofold /* diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h index 2533fddd34a..70bef78912b 100644 --- a/include/asm-generic/cmpxchg-local.h +++ b/include/asm-generic/cmpxchg-local.h @@ -4,7 +4,8 @@ #include <linux/types.h> #include <linux/irqflags.h> -extern unsigned long wrong_size_cmpxchg(volatile void *ptr); +extern unsigned long wrong_size_cmpxchg(volatile void *ptr) + __noreturn; /* * Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned @@ -21,7 +22,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, if (size == 8 && sizeof(unsigned long) != 8) wrong_size_cmpxchg(ptr); - local_irq_save(flags); + raw_local_irq_save(flags); switch (size) { case 1: prev = *(u8 *)ptr; if (prev == old) @@ -42,7 +43,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, default: wrong_size_cmpxchg(ptr); } - local_irq_restore(flags); + raw_local_irq_restore(flags); return prev; } @@ -55,11 +56,11 @@ static inline u64 __cmpxchg64_local_generic(volatile void *ptr, u64 prev; unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); prev = *(u64 *)ptr; if (prev == old) *(u64 *)ptr = new; - local_irq_restore(flags); + raw_local_irq_restore(flags); return prev; } diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h index 14883026015..811fb1e9b06 100644 --- a/include/asm-generic/cmpxchg.h +++ b/include/asm-generic/cmpxchg.h @@ -92,6 +92,16 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size) */ #include <asm-generic/cmpxchg-local.h> +#ifndef cmpxchg_local +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ + (unsigned long)(n), sizeof(*(ptr)))) +#endif + +#ifndef cmpxchg64_local +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#endif + #define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n)) #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h index 9a62937c56c..51969436b8b 100644 --- a/include/asm-generic/cputime.h +++ b/include/asm-generic/cputime.h @@ -4,66 +4,12 @@ #include <linux/time.h> #include <linux/jiffies.h> -typedef unsigned long __nocast cputime_t; - -#define cputime_one_jiffy jiffies_to_cputime(1) -#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct) -#define cputime_to_scaled(__ct) (__ct) -#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz) - -typedef u64 __nocast cputime64_t; - -#define cputime64_to_jiffies64(__ct) (__force u64)(__ct) -#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif) - -#define nsecs_to_cputime64(__ct) \ - jiffies64_to_cputime64(nsecs_to_jiffies64(__ct)) - - -/* - * Convert cputime to microseconds and back. - */ -#define cputime_to_usecs(__ct) \ - jiffies_to_usecs(cputime_to_jiffies(__ct)) -#define usecs_to_cputime(__usec) \ - jiffies_to_cputime(usecs_to_jiffies(__usec)) -#define usecs_to_cputime64(__usec) \ - jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000)) - -/* - * Convert cputime to seconds and back. - */ -#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ) -#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ) - -/* - * Convert cputime to timespec and back. - */ -#define timespec_to_cputime(__val) \ - jiffies_to_cputime(timespec_to_jiffies(__val)) -#define cputime_to_timespec(__ct,__val) \ - jiffies_to_timespec(cputime_to_jiffies(__ct),__val) - -/* - * Convert cputime to timeval and back. - */ -#define timeval_to_cputime(__val) \ - jiffies_to_cputime(timeval_to_jiffies(__val)) -#define cputime_to_timeval(__ct,__val) \ - jiffies_to_timeval(cputime_to_jiffies(__ct),__val) - -/* - * Convert cputime to clock and back. - */ -#define cputime_to_clock_t(__ct) \ - jiffies_to_clock_t(cputime_to_jiffies(__ct)) -#define clock_t_to_cputime(__x) \ - jiffies_to_cputime(clock_t_to_jiffies(__x)) +#ifndef CONFIG_VIRT_CPU_ACCOUNTING +# include <asm-generic/cputime_jiffies.h> +#endif -/* - * Convert cputime64 to clock. - */ -#define cputime64_to_clock_t(__ct) \ - jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct)) +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +# include <asm-generic/cputime_nsecs.h> +#endif #endif diff --git a/include/asm-generic/cputime_jiffies.h b/include/asm-generic/cputime_jiffies.h new file mode 100644 index 00000000000..d5cb78f5398 --- /dev/null +++ b/include/asm-generic/cputime_jiffies.h @@ -0,0 +1,74 @@ +#ifndef _ASM_GENERIC_CPUTIME_JIFFIES_H +#define _ASM_GENERIC_CPUTIME_JIFFIES_H + +typedef unsigned long __nocast cputime_t; + +#define cputime_one_jiffy jiffies_to_cputime(1) +#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct) +#define cputime_to_scaled(__ct) (__ct) +#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz) + +typedef u64 __nocast cputime64_t; + +#define cputime64_to_jiffies64(__ct) (__force u64)(__ct) +#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif) + + +/* + * Convert nanoseconds <-> cputime + */ +#define cputime_to_nsecs(__ct) \ + jiffies_to_nsecs(cputime_to_jiffies(__ct)) +#define nsecs_to_cputime64(__nsec) \ + jiffies64_to_cputime64(nsecs_to_jiffies64(__nsec)) +#define nsecs_to_cputime(__nsec) \ + jiffies_to_cputime(nsecs_to_jiffies(__nsec)) + + +/* + * Convert cputime to microseconds and back. + */ +#define cputime_to_usecs(__ct) \ + jiffies_to_usecs(cputime_to_jiffies(__ct)) +#define usecs_to_cputime(__usec) \ + jiffies_to_cputime(usecs_to_jiffies(__usec)) +#define usecs_to_cputime64(__usec) \ + jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000)) + +/* + * Convert cputime to seconds and back. + */ +#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ) +#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ) + +/* + * Convert cputime to timespec and back. + */ +#define timespec_to_cputime(__val) \ + jiffies_to_cputime(timespec_to_jiffies(__val)) +#define cputime_to_timespec(__ct,__val) \ + jiffies_to_timespec(cputime_to_jiffies(__ct),__val) + +/* + * Convert cputime to timeval and back. + */ +#define timeval_to_cputime(__val) \ + jiffies_to_cputime(timeval_to_jiffies(__val)) +#define cputime_to_timeval(__ct,__val) \ + jiffies_to_timeval(cputime_to_jiffies(__ct),__val) + +/* + * Convert cputime to clock and back. + */ +#define cputime_to_clock_t(__ct) \ + jiffies_to_clock_t(cputime_to_jiffies(__ct)) +#define clock_t_to_cputime(__x) \ + jiffies_to_cputime(clock_t_to_jiffies(__x)) + +/* + * Convert cputime64 to clock. + */ +#define cputime64_to_clock_t(__ct) \ + jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct)) + +#endif diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h new file mode 100644 index 00000000000..4e817606c54 --- /dev/null +++ b/include/asm-generic/cputime_nsecs.h @@ -0,0 +1,117 @@ +/* + * Definitions for measuring cputime in nsecs resolution. + * + * Based on <arch/ia64/include/asm/cputime.h> + * + * Copyright (C) 2007 FUJITSU LIMITED + * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#ifndef _ASM_GENERIC_CPUTIME_NSECS_H +#define _ASM_GENERIC_CPUTIME_NSECS_H + +#include <linux/math64.h> + +typedef u64 __nocast cputime_t; +typedef u64 __nocast cputime64_t; + +#define cputime_one_jiffy jiffies_to_cputime(1) + +#define cputime_div(__ct, divisor) div_u64((__force u64)__ct, divisor) +#define cputime_div_rem(__ct, divisor, remainder) \ + div_u64_rem((__force u64)__ct, divisor, remainder); + +/* + * Convert cputime <-> jiffies (HZ) + */ +#define cputime_to_jiffies(__ct) \ + cputime_div(__ct, NSEC_PER_SEC / HZ) +#define cputime_to_scaled(__ct) (__ct) +#define jiffies_to_cputime(__jif) \ + (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ)) +#define cputime64_to_jiffies64(__ct) \ + cputime_div(__ct, NSEC_PER_SEC / HZ) +#define jiffies64_to_cputime64(__jif) \ + (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ)) + + +/* + * Convert cputime <-> nanoseconds + */ +#define cputime_to_nsecs(__ct) \ + (__force u64)(__ct) +#define nsecs_to_cputime(__nsecs) \ + (__force cputime_t)(__nsecs) + + +/* + * Convert cputime <-> microseconds + */ +#define cputime_to_usecs(__ct) \ + cputime_div(__ct, NSEC_PER_USEC) +#define usecs_to_cputime(__usecs) \ + (__force cputime_t)((__usecs) * NSEC_PER_USEC) +#define usecs_to_cputime64(__usecs) \ + (__force cputime64_t)((__usecs) * NSEC_PER_USEC) + +/* + * Convert cputime <-> seconds + */ +#define cputime_to_secs(__ct) \ + cputime_div(__ct, NSEC_PER_SEC) +#define secs_to_cputime(__secs) \ + (__force cputime_t)((__secs) * NSEC_PER_SEC) + +/* + * Convert cputime <-> timespec (nsec) + */ +static inline cputime_t timespec_to_cputime(const struct timespec *val) +{ + u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec; + return (__force cputime_t) ret; +} +static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) +{ + u32 rem; + + val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem); + val->tv_nsec = rem; +} + +/* + * Convert cputime <-> timeval (msec) + */ +static inline cputime_t timeval_to_cputime(const struct timeval *val) +{ + u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC; + return (__force cputime_t) ret; +} +static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) +{ + u32 rem; + + val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem); + val->tv_usec = rem / NSEC_PER_USEC; +} + +/* + * Convert cputime <-> clock (USER_HZ) + */ +#define cputime_to_clock_t(__ct) \ + cputime_div(__ct, (NSEC_PER_SEC / USER_HZ)) +#define clock_t_to_cputime(__x) \ + (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ)) + +/* + * Convert cputime64 to clock. + */ +#define cputime64_to_clock_t(__ct) \ + cputime_to_clock_t((__force cputime_t)__ct) + +#endif diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h index 2be8a2dbc86..0297e587579 100644 --- a/include/asm-generic/dma-coherent.h +++ b/include/asm-generic/dma-coherent.h @@ -16,16 +16,13 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, * Standard interface */ #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY -extern int -dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, - dma_addr_t device_addr, size_t size, int flags); +int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, + dma_addr_t device_addr, size_t size, int flags); -extern void -dma_release_declared_memory(struct device *dev); +void dma_release_declared_memory(struct device *dev); -extern void * -dma_mark_declared_memory_occupied(struct device *dev, - dma_addr_t device_addr, size_t size); +void *dma_mark_declared_memory_occupied(struct device *dev, + dma_addr_t device_addr, size_t size); #else #define dma_alloc_from_coherent(dev, size, handle, ret) (0) #define dma_release_from_coherent(dev, order, vaddr) (0) diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h deleted file mode 100644 index 294b1e755ab..00000000000 --- a/include/asm-generic/dma-contiguous.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef ASM_DMA_CONTIGUOUS_H -#define ASM_DMA_CONTIGUOUS_H - -#ifdef __KERNEL__ -#ifdef CONFIG_CMA - -#include <linux/device.h> -#include <linux/dma-contiguous.h> - -static inline struct cma *dev_get_cma_area(struct device *dev) -{ - if (dev && dev->cma_area) - return dev->cma_area; - return dma_contiguous_default_area; -} - -static inline void dev_set_cma_area(struct device *dev, struct cma *cma) -{ - if (dev) - dev->cma_area = cma; - if (!dev && !dma_contiguous_default_area) - dma_contiguous_default_area = cma; -} - -#endif -#endif - -#endif diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h new file mode 100644 index 00000000000..a5de55c04fb --- /dev/null +++ b/include/asm-generic/early_ioremap.h @@ -0,0 +1,42 @@ +#ifndef _ASM_EARLY_IOREMAP_H_ +#define _ASM_EARLY_IOREMAP_H_ + +#include <linux/types.h> + +/* + * early_ioremap() and early_iounmap() are for temporary early boot-time + * mappings, before the real ioremap() is functional. + */ +extern void __iomem *early_ioremap(resource_size_t phys_addr, + unsigned long size); +extern void *early_memremap(resource_size_t phys_addr, + unsigned long size); +extern void early_iounmap(void __iomem *addr, unsigned long size); +extern void early_memunmap(void *addr, unsigned long size); + +/* + * Weak function called by early_ioremap_reset(). It does nothing, but + * architectures may provide their own version to do any needed cleanups. + */ +extern void early_ioremap_shutdown(void); + +#if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU) +/* Arch-specific initialization */ +extern void early_ioremap_init(void); + +/* Generic initialization called by architecture code */ +extern void early_ioremap_setup(void); + +/* + * Called as last step in paging_init() so library can act + * accordingly for subsequent map/unmap requests. + */ +extern void early_ioremap_reset(void); + +#else +static inline void early_ioremap_init(void) { } +static inline void early_ioremap_setup(void) { } +static inline void early_ioremap_reset(void) { } +#endif + +#endif /* _ASM_EARLY_IOREMAP_H_ */ diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h new file mode 100644 index 00000000000..f23174fb9ec --- /dev/null +++ b/include/asm-generic/fixmap.h @@ -0,0 +1,100 @@ +/* + * fixmap.h: compile-time virtual memory allocation + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998 Ingo Molnar + * + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 + * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009 + * Break out common bits to asm-generic by Mark Salter, November 2013 + */ + +#ifndef __ASM_GENERIC_FIXMAP_H +#define __ASM_GENERIC_FIXMAP_H + +#include <linux/bug.h> + +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) + +#ifndef __ASSEMBLY__ +/* + * 'index to address' translation. If anyone tries to use the idx + * directly without translation, we catch the bug with a NULL-deference + * kernel oops. Illegal ranges of incoming indices are caught too. + */ +static __always_inline unsigned long fix_to_virt(const unsigned int idx) +{ + BUILD_BUG_ON(idx >= __end_of_fixed_addresses); + return __fix_to_virt(idx); +} + +static inline unsigned long virt_to_fix(const unsigned long vaddr) +{ + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); + return __virt_to_fix(vaddr); +} + +/* + * Provide some reasonable defaults for page flags. + * Not all architectures use all of these different types and some + * architectures use different names. + */ +#ifndef FIXMAP_PAGE_NORMAL +#define FIXMAP_PAGE_NORMAL PAGE_KERNEL +#endif +#ifndef FIXMAP_PAGE_NOCACHE +#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE +#endif +#ifndef FIXMAP_PAGE_IO +#define FIXMAP_PAGE_IO PAGE_KERNEL_IO +#endif +#ifndef FIXMAP_PAGE_CLEAR +#define FIXMAP_PAGE_CLEAR __pgprot(0) +#endif + +#ifndef set_fixmap +#define set_fixmap(idx, phys) \ + __set_fixmap(idx, phys, FIXMAP_PAGE_NORMAL) +#endif + +#ifndef clear_fixmap +#define clear_fixmap(idx) \ + __set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR) +#endif + +/* Return a pointer with offset calculated */ +#define __set_fixmap_offset(idx, phys, flags) \ +({ \ + unsigned long addr; \ + __set_fixmap(idx, phys, flags); \ + addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \ + addr; \ +}) + +#define set_fixmap_offset(idx, phys) \ + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NORMAL) + +/* + * Some hardware wants to get fixmapped without caching. + */ +#define set_fixmap_nocache(idx, phys) \ + __set_fixmap(idx, phys, FIXMAP_PAGE_NOCACHE) + +#define set_fixmap_offset_nocache(idx, phys) \ + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NOCACHE) + +/* + * Some fixmaps are for IO + */ +#define set_fixmap_io(idx, phys) \ + __set_fixmap(idx, phys, FIXMAP_PAGE_IO) + +#define set_fixmap_offset_io(idx, phys) \ + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_IO) + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_GENERIC_FIXMAP_H */ diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index 20ca7663975..23e364538ab 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -10,6 +10,8 @@ #ifdef CONFIG_GPIOLIB #include <linux/compiler.h> +#include <linux/gpio/driver.h> +#include <linux/gpio/consumer.h> /* Platforms may implement their GPIO interface with library code, * at a small performance cost for non-inlined operations and some @@ -47,120 +49,13 @@ struct gpio; struct seq_file; struct module; struct device_node; +struct gpio_desc; -/** - * struct gpio_chip - abstract a GPIO controller - * @label: for diagnostics - * @dev: optional device providing the GPIOs - * @owner: helps prevent removal of modules exporting active GPIOs - * @request: optional hook for chip-specific activation, such as - * enabling module power and clock; may sleep - * @free: optional hook for chip-specific deactivation, such as - * disabling module power and clock; may sleep - * @get_direction: returns direction for signal "offset", 0=out, 1=in, - * (same as GPIOF_DIR_XXX), or negative error - * @direction_input: configures signal "offset" as input, or returns error - * @get: returns value for signal "offset"; for output signals this - * returns either the value actually sensed, or zero - * @direction_output: configures signal "offset" as output, or returns error - * @set_debounce: optional hook for setting debounce time for specified gpio in - * interrupt triggered gpio chips - * @set: assigns output value for signal "offset" - * @to_irq: optional hook supporting non-static gpio_to_irq() mappings; - * implementation may not sleep - * @dbg_show: optional routine to show contents in debugfs; default code - * will be used when this is omitted, but custom code can show extra - * state (such as pullup/pulldown configuration). - * @base: identifies the first GPIO number handled by this chip; or, if - * negative during registration, requests dynamic ID allocation. - * @ngpio: the number of GPIOs handled by this controller; the last GPIO - * handled is (base + ngpio - 1). - * @can_sleep: flag must be set iff get()/set() methods sleep, as they - * must while accessing GPIO expander chips over I2C or SPI - * @names: if set, must be an array of strings to use as alternative - * names for the GPIOs in this chip. Any entry in the array - * may be NULL if there is no alias for the GPIO, however the - * array must be @ngpio entries long. A name can include a single printk - * format specifier for an unsigned int. It is substituted by the actual - * number of the gpio. - * - * A gpio_chip can help platforms abstract various sources of GPIOs so - * they can all be accessed through a common programing interface. - * Example sources would be SOC controllers, FPGAs, multifunction - * chips, dedicated GPIO expanders, and so on. - * - * Each chip controls a number of signals, identified in method calls - * by "offset" values in the range 0..(@ngpio - 1). When those signals - * are referenced through calls like gpio_get_value(gpio), the offset - * is calculated by subtracting @base from the gpio number. - */ -struct gpio_chip { - const char *label; - struct device *dev; - struct module *owner; - - int (*request)(struct gpio_chip *chip, - unsigned offset); - void (*free)(struct gpio_chip *chip, - unsigned offset); - int (*get_direction)(struct gpio_chip *chip, - unsigned offset); - int (*direction_input)(struct gpio_chip *chip, - unsigned offset); - int (*get)(struct gpio_chip *chip, - unsigned offset); - int (*direction_output)(struct gpio_chip *chip, - unsigned offset, int value); - int (*set_debounce)(struct gpio_chip *chip, - unsigned offset, unsigned debounce); - - void (*set)(struct gpio_chip *chip, - unsigned offset, int value); - - int (*to_irq)(struct gpio_chip *chip, - unsigned offset); - - void (*dbg_show)(struct seq_file *s, - struct gpio_chip *chip); - int base; - u16 ngpio; - const char *const *names; - unsigned can_sleep:1; - unsigned exported:1; - -#if defined(CONFIG_OF_GPIO) - /* - * If CONFIG_OF is enabled, then all GPIO controllers described in the - * device tree automatically may have an OF translation - */ - struct device_node *of_node; - int of_gpio_n_cells; - int (*of_xlate)(struct gpio_chip *gc, - const struct of_phandle_args *gpiospec, u32 *flags); -#endif -#ifdef CONFIG_PINCTRL - /* - * If CONFIG_PINCTRL is enabled, then gpio controllers can optionally - * describe the actual pin range which they serve in an SoC. This - * information would be used by pinctrl subsystem to configure - * corresponding pins for gpio usage. - */ - struct list_head pin_ranges; -#endif -}; - -extern const char *gpiochip_is_requested(struct gpio_chip *chip, - unsigned offset); -extern struct gpio_chip *gpio_to_chip(unsigned gpio); -extern int __must_check gpiochip_reserve(int start, int ngpio); - -/* add/remove chips */ -extern int gpiochip_add(struct gpio_chip *chip); -extern int __must_check gpiochip_remove(struct gpio_chip *chip); -extern struct gpio_chip *gpiochip_find(void *data, - int (*match)(struct gpio_chip *chip, - void *data)); - +/* caller holds gpio_lock *OR* gpio is marked as requested */ +static inline struct gpio_chip *gpio_to_chip(unsigned gpio) +{ + return gpiod_to_chip(gpio_to_desc(gpio)); +} /* Always use the library code for GPIO management calls, * or when sleeping may be involved. @@ -168,107 +63,84 @@ extern struct gpio_chip *gpiochip_find(void *data, extern int gpio_request(unsigned gpio, const char *label); extern void gpio_free(unsigned gpio); -extern int gpio_direction_input(unsigned gpio); -extern int gpio_direction_output(unsigned gpio, int value); +static inline int gpio_direction_input(unsigned gpio) +{ + return gpiod_direction_input(gpio_to_desc(gpio)); +} +static inline int gpio_direction_output(unsigned gpio, int value) +{ + return gpiod_direction_output_raw(gpio_to_desc(gpio), value); +} -extern int gpio_set_debounce(unsigned gpio, unsigned debounce); +static inline int gpio_set_debounce(unsigned gpio, unsigned debounce) +{ + return gpiod_set_debounce(gpio_to_desc(gpio), debounce); +} -extern int gpio_get_value_cansleep(unsigned gpio); -extern void gpio_set_value_cansleep(unsigned gpio, int value); +static inline int gpio_get_value_cansleep(unsigned gpio) +{ + return gpiod_get_raw_value_cansleep(gpio_to_desc(gpio)); +} +static inline void gpio_set_value_cansleep(unsigned gpio, int value) +{ + return gpiod_set_raw_value_cansleep(gpio_to_desc(gpio), value); +} /* A platform's <asm/gpio.h> code may want to inline the I/O calls when * the GPIO is constant and refers to some always-present controller, * giving direct access to chip registers and tight bitbanging loops. */ -extern int __gpio_get_value(unsigned gpio); -extern void __gpio_set_value(unsigned gpio, int value); - -extern int __gpio_cansleep(unsigned gpio); - -extern int __gpio_to_irq(unsigned gpio); - -extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label); -extern int gpio_request_array(const struct gpio *array, size_t num); -extern void gpio_free_array(const struct gpio *array, size_t num); - -/* bindings for managed devices that want to request gpios */ -int devm_gpio_request(struct device *dev, unsigned gpio, const char *label); -int devm_gpio_request_one(struct device *dev, unsigned gpio, - unsigned long flags, const char *label); -void devm_gpio_free(struct device *dev, unsigned int gpio); - -#ifdef CONFIG_GPIO_SYSFS - -/* - * A sysfs interface can be exported by individual drivers if they want, - * but more typically is configured entirely from userspace. - */ -extern int gpio_export(unsigned gpio, bool direction_may_change); -extern int gpio_export_link(struct device *dev, const char *name, - unsigned gpio); -extern int gpio_sysfs_set_active_low(unsigned gpio, int value); -extern void gpio_unexport(unsigned gpio); - -#endif /* CONFIG_GPIO_SYSFS */ - -#else /* !CONFIG_GPIOLIB */ - -static inline bool gpio_is_valid(int number) +static inline int __gpio_get_value(unsigned gpio) { - /* only non-negative numbers are valid */ - return number >= 0; + return gpiod_get_raw_value(gpio_to_desc(gpio)); } - -/* platforms that don't directly support access to GPIOs through I2C, SPI, - * or other blocking infrastructure can use these wrappers. - */ - -static inline int gpio_cansleep(unsigned gpio) +static inline void __gpio_set_value(unsigned gpio, int value) { - return 0; + return gpiod_set_raw_value(gpio_to_desc(gpio), value); } -static inline int gpio_get_value_cansleep(unsigned gpio) +static inline int __gpio_cansleep(unsigned gpio) { - might_sleep(); - return __gpio_get_value(gpio); + return gpiod_cansleep(gpio_to_desc(gpio)); } -static inline void gpio_set_value_cansleep(unsigned gpio, int value) +static inline int __gpio_to_irq(unsigned gpio) { - might_sleep(); - __gpio_set_value(gpio, value); + return gpiod_to_irq(gpio_to_desc(gpio)); } -#endif /* !CONFIG_GPIOLIB */ +extern int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset); +extern void gpio_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); -#ifndef CONFIG_GPIO_SYSFS - -struct device; - -/* sysfs support is only available with gpiolib, where it's optional */ +extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label); +extern int gpio_request_array(const struct gpio *array, size_t num); +extern void gpio_free_array(const struct gpio *array, size_t num); +/* + * A sysfs interface can be exported by individual drivers if they want, + * but more typically is configured entirely from userspace. + */ static inline int gpio_export(unsigned gpio, bool direction_may_change) { - return -ENOSYS; + return gpiod_export(gpio_to_desc(gpio), direction_may_change); } static inline int gpio_export_link(struct device *dev, const char *name, - unsigned gpio) + unsigned gpio) { - return -ENOSYS; + return gpiod_export_link(dev, name, gpio_to_desc(gpio)); } static inline int gpio_sysfs_set_active_low(unsigned gpio, int value) { - return -ENOSYS; + return gpiod_sysfs_set_active_low(gpio_to_desc(gpio), value); } static inline void gpio_unexport(unsigned gpio) { + gpiod_unexport(gpio_to_desc(gpio)); } -#endif /* CONFIG_GPIO_SYSFS */ #ifdef CONFIG_PINCTRL @@ -288,6 +160,9 @@ struct gpio_pin_range { int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, unsigned int gpio_offset, unsigned int pin_offset, unsigned int npins); +int gpiochip_add_pingroup_range(struct gpio_chip *chip, + struct pinctrl_dev *pctldev, + unsigned int gpio_offset, const char *pin_group); void gpiochip_remove_pin_ranges(struct gpio_chip *chip); #else @@ -299,6 +174,13 @@ gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, { return 0; } +static inline int +gpiochip_add_pingroup_range(struct gpio_chip *chip, + struct pinctrl_dev *pctldev, + unsigned int gpio_offset, const char *pin_group) +{ + return 0; +} static inline void gpiochip_remove_pin_ranges(struct gpio_chip *chip) @@ -307,4 +189,35 @@ gpiochip_remove_pin_ranges(struct gpio_chip *chip) #endif /* CONFIG_PINCTRL */ +#else /* !CONFIG_GPIOLIB */ + +static inline bool gpio_is_valid(int number) +{ + /* only non-negative numbers are valid */ + return number >= 0; +} + +/* platforms that don't directly support access to GPIOs through I2C, SPI, + * or other blocking infrastructure can use these wrappers. + */ + +static inline int gpio_cansleep(unsigned gpio) +{ + return 0; +} + +static inline int gpio_get_value_cansleep(unsigned gpio) +{ + might_sleep(); + return __gpio_get_value(gpio); +} + +static inline void gpio_set_value_cansleep(unsigned gpio, int value) +{ + might_sleep(); + __gpio_set_value(gpio, value); +} + +#endif /* !CONFIG_GPIOLIB */ + #endif /* _ASM_GENERIC_GPIO_H */ diff --git a/include/asm-generic/hash.h b/include/asm-generic/hash.h new file mode 100644 index 00000000000..b6312843dbd --- /dev/null +++ b/include/asm-generic/hash.h @@ -0,0 +1,9 @@ +#ifndef __ASM_GENERIC_HASH_H +#define __ASM_GENERIC_HASH_H + +struct fast_hash_ops; +static inline void setup_arch_fast_hash(struct fast_hash_ops *ops) +{ +} + +#endif /* __ASM_GENERIC_HASH_H */ diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h new file mode 100644 index 00000000000..99b490b4d05 --- /dev/null +++ b/include/asm-generic/hugetlb.h @@ -0,0 +1,40 @@ +#ifndef _ASM_GENERIC_HUGETLB_H +#define _ASM_GENERIC_HUGETLB_H + +static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) +{ + return mk_pte(page, pgprot); +} + +static inline unsigned long huge_pte_write(pte_t pte) +{ + return pte_write(pte); +} + +static inline unsigned long huge_pte_dirty(pte_t pte) +{ + return pte_dirty(pte); +} + +static inline pte_t huge_pte_mkwrite(pte_t pte) +{ + return pte_mkwrite(pte); +} + +static inline pte_t huge_pte_mkdirty(pte_t pte) +{ + return pte_mkdirty(pte); +} + +static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) +{ + return pte_modify(pte, newprot); +} + +static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + pte_clear(mm, addr, ptep); +} + +#endif /* _ASM_GENERIC_HUGETLB_H */ diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h deleted file mode 100644 index 27d4ec0dfce..00000000000 --- a/include/asm-generic/int-l64.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * asm-generic/int-l64.h - * - * Integer declarations for architectures which use "long" - * for 64-bit types. - */ -#ifndef _ASM_GENERIC_INT_L64_H -#define _ASM_GENERIC_INT_L64_H - -#include <uapi/asm-generic/int-l64.h> - - -#ifndef __ASSEMBLY__ - -typedef signed char s8; -typedef unsigned char u8; - -typedef signed short s16; -typedef unsigned short u16; - -typedef signed int s32; -typedef unsigned int u32; - -typedef signed long s64; -typedef unsigned long u64; - -#define S8_C(x) x -#define U8_C(x) x ## U -#define S16_C(x) x -#define U16_C(x) x ## U -#define S32_C(x) x -#define U32_C(x) x ## U -#define S64_C(x) x ## L -#define U64_C(x) x ## UL - -#else /* __ASSEMBLY__ */ - -#define S8_C(x) x -#define U8_C(x) x -#define S16_C(x) x -#define U16_C(x) x -#define S32_C(x) x -#define U32_C(x) x -#define S64_C(x) x -#define U64_C(x) x - -#endif /* __ASSEMBLY__ */ - -#endif /* _ASM_GENERIC_INT_L64_H */ diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 33bbbae4ddc..975e1cc75ed 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -53,8 +53,18 @@ static inline u32 __raw_readl(const volatile void __iomem *addr) #endif #define readb __raw_readb -#define readw(addr) __le16_to_cpu(__raw_readw(addr)) -#define readl(addr) __le32_to_cpu(__raw_readl(addr)) + +#define readw readw +static inline u16 readw(const volatile void __iomem *addr) +{ + return __le16_to_cpu(__raw_readw(addr)); +} + +#define readl readl +static inline u32 readl(const volatile void __iomem *addr) +{ + return __le32_to_cpu(__raw_readl(addr)); +} #ifndef __raw_writeb static inline void __raw_writeb(u8 b, volatile void __iomem *addr) @@ -89,7 +99,11 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) } #endif -#define readq(addr) __le64_to_cpu(__raw_readq(addr)) +#define readq readq +static inline u64 readq(const volatile void __iomem *addr) +{ + return __le64_to_cpu(__raw_readq(addr)); +} #ifndef __raw_writeq static inline void __raw_writeq(u64 b, volatile void __iomem *addr) @@ -225,15 +239,15 @@ static inline void outsl(unsigned long addr, const void *buffer, int count) #ifndef CONFIG_GENERIC_IOMAP #define ioread8(addr) readb(addr) #define ioread16(addr) readw(addr) -#define ioread16be(addr) be16_to_cpu(ioread16(addr)) +#define ioread16be(addr) __be16_to_cpu(__raw_readw(addr)) #define ioread32(addr) readl(addr) -#define ioread32be(addr) be32_to_cpu(ioread32(addr)) +#define ioread32be(addr) __be32_to_cpu(__raw_readl(addr)) #define iowrite8(v, addr) writeb((v), (addr)) #define iowrite16(v, addr) writew((v), (addr)) -#define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr)) +#define iowrite16be(v, addr) __raw_writew(__cpu_to_be16(v), addr) #define iowrite32(v, addr) writel((v), (addr)) -#define iowrite32be(v, addr) iowrite32(be32_to_cpu(v), (addr)) +#define iowrite32be(v, addr) __raw_writel(__cpu_to_be32(v), addr) #define ioread8_rep(p, dst, count) \ insb((unsigned long) (p), (dst), (count)) @@ -313,7 +327,7 @@ static inline void iounmap(void __iomem *addr) } #endif /* CONFIG_MMU */ -#ifdef CONFIG_HAS_IOPORT +#ifdef CONFIG_HAS_IOPORT_MAP #ifndef CONFIG_GENERIC_IOMAP static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { @@ -327,11 +341,16 @@ static inline void ioport_unmap(void __iomem *p) extern void __iomem *ioport_map(unsigned long port, unsigned int nr); extern void ioport_unmap(void __iomem *p); #endif /* CONFIG_GENERIC_IOMAP */ -#endif /* CONFIG_HAS_IOPORT */ +#endif /* CONFIG_HAS_IOPORT_MAP */ +#ifndef xlate_dev_kmem_ptr #define xlate_dev_kmem_ptr(p) p +#endif +#ifndef xlate_dev_mem_ptr #define xlate_dev_mem_ptr(p) __va(p) +#endif +#ifdef CONFIG_VIRT_TO_BUS #ifndef virt_to_bus static inline unsigned long virt_to_bus(volatile void *address) { @@ -343,6 +362,7 @@ static inline void *bus_to_virt(unsigned long address) return (void *) address; } #endif +#endif #ifndef memset_io #define memset_io(a, b, c) memset(__io_virt(a), (b), (c)) diff --git a/include/asm-generic/ioctl.h b/include/asm-generic/ioctl.h index d17295b290f..297fb0d7cd6 100644 --- a/include/asm-generic/ioctl.h +++ b/include/asm-generic/ioctl.h @@ -3,10 +3,15 @@ #include <uapi/asm-generic/ioctl.h> +#ifdef __CHECKER__ +#define _IOC_TYPECHECK(t) (sizeof(t)) +#else /* provoke compile error for invalid uses of size argument */ extern unsigned int __invalid_size_argument_for_IOC; #define _IOC_TYPECHECK(t) \ ((sizeof(t) == sizeof(t[1]) && \ sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ sizeof(t) : __invalid_size_argument_for_IOC) +#endif + #endif /* _ASM_GENERIC_IOCTL_H */ diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h index 6afd7d6a989..1b41011643a 100644 --- a/include/asm-generic/iomap.h +++ b/include/asm-generic/iomap.h @@ -56,7 +56,7 @@ extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long coun extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count); extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count); -#ifdef CONFIG_HAS_IOPORT +#ifdef CONFIG_HAS_IOPORT_MAP /* Create a virtual mapping cookie for an IO port range */ extern void __iomem *ioport_map(unsigned long port, unsigned int nr); extern void ioport_unmap(void __iomem *); diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h index 9d96605f160..fa25becbdca 100644 --- a/include/asm-generic/kvm_para.h +++ b/include/asm-generic/kvm_para.h @@ -18,4 +18,9 @@ static inline unsigned int kvm_arch_para_features(void) return 0; } +static inline bool kvm_para_available(void) +{ + return false; +} + #endif diff --git a/include/asm-generic/mcs_spinlock.h b/include/asm-generic/mcs_spinlock.h new file mode 100644 index 00000000000..10cd4ffc6ba --- /dev/null +++ b/include/asm-generic/mcs_spinlock.h @@ -0,0 +1,13 @@ +#ifndef __ASM_MCS_SPINLOCK_H +#define __ASM_MCS_SPINLOCK_H + +/* + * Architectures can define their own: + * + * arch_mcs_spin_lock_contended(l) + * arch_mcs_spin_unlock_contended(l) + * + * See kernel/locking/mcs_spinlock.c. + */ + +#endif /* __ASM_MCS_SPINLOCK_H */ diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index aea9e45efce..14909b0b9ca 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -53,7 +53,7 @@ #elif defined(CONFIG_SPARSEMEM) /* - * Note: section's mem_map is encorded to reflect its start_pfn. + * Note: section's mem_map is encoded to reflect its start_pfn. * section[i].section_mem_map == mem_map's address - start_pfn; */ #define __page_to_pfn(pg) \ diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h index f104af7cf43..d4f9fb4e53d 100644 --- a/include/asm-generic/mutex-dec.h +++ b/include/asm-generic/mutex-dec.h @@ -28,17 +28,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) * __mutex_fastpath_lock_retval - try to take the lock by moving the count * from 1 to a 0 value * @count: pointer of type atomic_t - * @fail_fn: function to call if the original value was not 1 * - * Change the count from 1 to a value lower than 1, and call <fail_fn> if - * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, - * or anything the slow path function returns. + * Change the count from 1 to a value lower than 1. This function returns 0 + * if the fastpath succeeds, or -1 otherwise. */ static inline int -__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) +__mutex_fastpath_lock_retval(atomic_t *count) { if (unlikely(atomic_dec_return(count) < 0)) - return fail_fn(count); + return -1; return 0; } diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h index e1bbbc72b6a..61069ed334e 100644 --- a/include/asm-generic/mutex-null.h +++ b/include/asm-generic/mutex-null.h @@ -11,7 +11,7 @@ #define _ASM_GENERIC_MUTEX_NULL_H #define __mutex_fastpath_lock(count, fail_fn) fail_fn(count) -#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count) +#define __mutex_fastpath_lock_retval(count) (-1) #define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count) #define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count) #define __mutex_slowpath_needs_to_unlock() 1 diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h index c04e0db8a2d..f169ec06478 100644 --- a/include/asm-generic/mutex-xchg.h +++ b/include/asm-generic/mutex-xchg.h @@ -39,18 +39,16 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) * __mutex_fastpath_lock_retval - try to take the lock by moving the count * from 1 to a 0 value * @count: pointer of type atomic_t - * @fail_fn: function to call if the original value was not 1 * - * Change the count from 1 to a value lower than 1, and call <fail_fn> if it - * wasn't 1 originally. This function returns 0 if the fastpath succeeds, - * or anything the slow path function returns + * Change the count from 1 to a value lower than 1. This function returns 0 + * if the fastpath succeeds, or -1 otherwise. */ static inline int -__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) +__mutex_fastpath_lock_retval(atomic_t *count) { if (unlikely(atomic_xchg(count, 0) != 1)) if (likely(atomic_xchg(count, -1) != 1)) - return fail_fn(count); + return -1; return 0; } diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index d17784ea37f..0703aa75b5e 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -56,17 +56,17 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define per_cpu(var, cpu) \ (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu))) -#ifndef __this_cpu_ptr -#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) +#ifndef raw_cpu_ptr +#define raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) #endif #ifdef CONFIG_DEBUG_PREEMPT #define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) #else -#define this_cpu_ptr(ptr) __this_cpu_ptr(ptr) +#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) #endif #define __get_cpu_var(var) (*this_cpu_ptr(&(var))) -#define __raw_get_cpu_var(var) (*__this_cpu_ptr(&(var))) +#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var))) #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA extern void setup_per_cpu_areas(void); @@ -83,7 +83,7 @@ extern void setup_per_cpu_areas(void); #define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) #define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) #define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) -#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) +#define raw_cpu_ptr(ptr) this_cpu_ptr(ptr) #endif /* SMP */ @@ -122,4 +122,7 @@ extern void setup_per_cpu_areas(void); #define PER_CPU_DEF_ATTRIBUTES #endif +/* Keep until we have removed all uses of __this_cpu_ptr */ +#define __this_cpu_ptr raw_cpu_ptr + #endif /* _ASM_GENERIC_PERCPU_H_ */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 5cf680a98f9..53b2acc3821 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -7,6 +7,16 @@ #include <linux/mm_types.h> #include <linux/bug.h> +/* + * On almost all architectures and configurations, 0 can be used as the + * upper ceiling to free_pgtables(): on many architectures it has the same + * effect as using TASK_SIZE. However, there is one configuration which + * must impose a more careful limit, to avoid freeing kernel pgtables. + */ +#ifndef USER_PGTABLES_CEILING +#define USER_PGTABLES_CEILING 0UL +#endif + #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, @@ -163,11 +173,12 @@ extern void pmdp_splitting_flush(struct vm_area_struct *vma, #endif #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT -extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); +extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable); #endif #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW -extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); +extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); #endif #ifndef __HAVE_ARCH_PMDP_INVALIDATE @@ -182,6 +193,19 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b) } #endif +#ifndef __HAVE_ARCH_PTE_UNUSED +/* + * Some architectures provide facilities to virtualization guests + * so that they can flag allocated pages as unused. This allows the + * host to transparently reclaim unused pages. This function returns + * whether the pte's page is unused. + */ +static inline int pte_unused(pte_t pte) +{ + return 0; +} +#endif + #ifndef __HAVE_ARCH_PMD_SAME #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) @@ -197,20 +221,6 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif -#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY -#define page_test_and_clear_dirty(pfn, mapped) (0) -#endif - -#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY -#define pte_maybe_dirty(pte) pte_dirty(pte) -#else -#define pte_maybe_dirty(pte) (1) -#endif - -#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG -#define page_test_and_clear_young(pfn) (0) -#endif - #ifndef __HAVE_ARCH_PGD_OFFSET_GATE #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) #endif @@ -220,7 +230,11 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) #endif #ifndef pte_accessible -# define pte_accessible(pte) ((void)(pte),1) +# define pte_accessible(mm, pte) ((void)(pte), 1) +#endif + +#ifndef pte_present_nonuma +#define pte_present_nonuma(pte) pte_present(pte) #endif #ifndef flush_tlb_fix_spurious_fault @@ -396,6 +410,58 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, #define arch_start_context_switch(prev) do {} while (0) #endif +#ifndef CONFIG_HAVE_ARCH_SOFT_DIRTY +static inline int pte_soft_dirty(pte_t pte) +{ + return 0; +} + +static inline int pmd_soft_dirty(pmd_t pmd) +{ + return 0; +} + +static inline pte_t pte_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline pte_t pte_swp_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline int pte_swp_soft_dirty(pte_t pte) +{ + return 0; +} + +static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) +{ + return pte; +} + +static inline pte_t pte_file_clear_soft_dirty(pte_t pte) +{ + return pte; +} + +static inline pte_t pte_file_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline int pte_file_soft_dirty(pte_t pte) +{ + return 0; +} +#endif + #ifndef __HAVE_PFNMAP_TRACKING /* * Interfaces that can be used by architecture code to keep track of @@ -509,6 +575,18 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp) } #endif +#ifndef pmd_move_must_withdraw +static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, + spinlock_t *old_pmd_ptl) +{ + /* + * With split pmd lock we also need to move preallocated + * PTE page table if new_pmd is on different PMD page table. + */ + return new_pmd_ptl != old_pmd_ptl; +} +#endif + /* * This function is meant to be used by sites walking pagetables with * the mmap_sem hold in read mode to protect against MADV_DONTNEED and @@ -550,11 +628,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) #ifdef CONFIG_TRANSPARENT_HUGEPAGE barrier(); #endif - if (pmd_none(pmdval)) + if (pmd_none(pmdval) || pmd_trans_huge(pmdval)) return 1; if (unlikely(pmd_bad(pmdval))) { - if (!pmd_trans_huge(pmdval)) - pmd_clear_bad(pmd); + pmd_clear_bad(pmd); return 1; } return 0; @@ -597,7 +674,7 @@ static inline int pmd_trans_unstable(pmd_t *pmd) static inline int pte_numa(pte_t pte) { return (pte_flags(pte) & - (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA; + (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA; } #endif @@ -605,7 +682,7 @@ static inline int pte_numa(pte_t pte) static inline int pmd_numa(pmd_t pmd) { return (pmd_flags(pmd) & - (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA; + (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA; } #endif @@ -620,32 +697,71 @@ static inline int pmd_numa(pmd_t pmd) #ifndef pte_mknonnuma static inline pte_t pte_mknonnuma(pte_t pte) { - pte = pte_clear_flags(pte, _PAGE_NUMA); - return pte_set_flags(pte, _PAGE_PRESENT|_PAGE_ACCESSED); + pteval_t val = pte_val(pte); + + val &= ~_PAGE_NUMA; + val |= (_PAGE_PRESENT|_PAGE_ACCESSED); + return __pte(val); } #endif #ifndef pmd_mknonnuma static inline pmd_t pmd_mknonnuma(pmd_t pmd) { - pmd = pmd_clear_flags(pmd, _PAGE_NUMA); - return pmd_set_flags(pmd, _PAGE_PRESENT|_PAGE_ACCESSED); + pmdval_t val = pmd_val(pmd); + + val &= ~_PAGE_NUMA; + val |= (_PAGE_PRESENT|_PAGE_ACCESSED); + + return __pmd(val); } #endif #ifndef pte_mknuma static inline pte_t pte_mknuma(pte_t pte) { - pte = pte_set_flags(pte, _PAGE_NUMA); - return pte_clear_flags(pte, _PAGE_PRESENT); + pteval_t val = pte_val(pte); + + val &= ~_PAGE_PRESENT; + val |= _PAGE_NUMA; + + return __pte(val); +} +#endif + +#ifndef ptep_set_numa +static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + pte_t ptent = *ptep; + + ptent = pte_mknuma(ptent); + set_pte_at(mm, addr, ptep, ptent); + return; } #endif #ifndef pmd_mknuma static inline pmd_t pmd_mknuma(pmd_t pmd) { - pmd = pmd_set_flags(pmd, _PAGE_NUMA); - return pmd_clear_flags(pmd, _PAGE_PRESENT); + pmdval_t val = pmd_val(pmd); + + val &= ~_PAGE_PRESENT; + val |= _PAGE_NUMA; + + return __pmd(val); +} +#endif + +#ifndef pmdp_set_numa +static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + + pmd = pmd_mknuma(pmd); + set_pmd_at(mm, addr, pmdp, pmd); + return; } #endif #else @@ -655,6 +771,8 @@ extern pte_t pte_mknonnuma(pte_t pte); extern pmd_t pmd_mknonnuma(pmd_t pmd); extern pte_t pte_mknuma(pte_t pte); extern pmd_t pmd_mknuma(pmd_t pmd); +extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep); +extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp); #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ #else static inline int pmd_numa(pmd_t pmd) @@ -682,14 +800,31 @@ static inline pte_t pte_mknuma(pte_t pte) return pte; } +static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + return; +} + + static inline pmd_t pmd_mknuma(pmd_t pmd) { return pmd; } + +static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp) +{ + return ; +} #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_MMU */ #endif /* !__ASSEMBLY__ */ +#ifndef io_remap_pfn_range +#define io_remap_pfn_range remap_pfn_range +#endif + #endif /* _ASM_GENERIC_PGTABLE_H */ diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h new file mode 100644 index 00000000000..1cd3f5d767a --- /dev/null +++ b/include/asm-generic/preempt.h @@ -0,0 +1,92 @@ +#ifndef __ASM_PREEMPT_H +#define __ASM_PREEMPT_H + +#include <linux/thread_info.h> + +#define PREEMPT_ENABLED (0) + +static __always_inline int preempt_count(void) +{ + return current_thread_info()->preempt_count; +} + +static __always_inline int *preempt_count_ptr(void) +{ + return ¤t_thread_info()->preempt_count; +} + +static __always_inline void preempt_count_set(int pc) +{ + *preempt_count_ptr() = pc; +} + +/* + * must be macros to avoid header recursion hell + */ +#define task_preempt_count(p) \ + (task_thread_info(p)->preempt_count & ~PREEMPT_NEED_RESCHED) + +#define init_task_preempt_count(p) do { \ + task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \ +} while (0) + +#define init_idle_preempt_count(p, cpu) do { \ + task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ +} while (0) + +static __always_inline void set_preempt_need_resched(void) +{ +} + +static __always_inline void clear_preempt_need_resched(void) +{ +} + +static __always_inline bool test_preempt_need_resched(void) +{ + return false; +} + +/* + * The various preempt_count add/sub methods + */ + +static __always_inline void __preempt_count_add(int val) +{ + *preempt_count_ptr() += val; +} + +static __always_inline void __preempt_count_sub(int val) +{ + *preempt_count_ptr() -= val; +} + +static __always_inline bool __preempt_count_dec_and_test(void) +{ + /* + * Because of load-store architectures cannot do per-cpu atomic + * operations; we cannot use PREEMPT_NEED_RESCHED because it might get + * lost. + */ + return !--*preempt_count_ptr() && tif_need_resched(); +} + +/* + * Returns true when we need to resched and can (barring IRQ state). + */ +static __always_inline bool should_resched(void) +{ + return unlikely(!preempt_count() && tif_need_resched()); +} + +#ifdef CONFIG_PREEMPT +extern asmlinkage void preempt_schedule(void); +#define __preempt_schedule() preempt_schedule() + +#ifdef CONFIG_CONTEXT_TRACKING +extern asmlinkage void preempt_schedule_context(void); +#define __preempt_schedule_context() preempt_schedule_context() +#endif +#endif /* CONFIG_PREEMPT */ + +#endif /* __ASM_PREEMPT_H */ diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h new file mode 100644 index 00000000000..6383d54bf98 --- /dev/null +++ b/include/asm-generic/qrwlock.h @@ -0,0 +1,166 @@ +/* + * Queue read/write lock + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. + * + * Authors: Waiman Long <waiman.long@hp.com> + */ +#ifndef __ASM_GENERIC_QRWLOCK_H +#define __ASM_GENERIC_QRWLOCK_H + +#include <linux/atomic.h> +#include <asm/barrier.h> +#include <asm/processor.h> + +#include <asm-generic/qrwlock_types.h> + +/* + * Writer states & reader shift and bias + */ +#define _QW_WAITING 1 /* A writer is waiting */ +#define _QW_LOCKED 0xff /* A writer holds the lock */ +#define _QW_WMASK 0xff /* Writer mask */ +#define _QR_SHIFT 8 /* Reader count shift */ +#define _QR_BIAS (1U << _QR_SHIFT) + +/* + * External function declarations + */ +extern void queue_read_lock_slowpath(struct qrwlock *lock); +extern void queue_write_lock_slowpath(struct qrwlock *lock); + +/** + * queue_read_can_lock- would read_trylock() succeed? + * @lock: Pointer to queue rwlock structure + */ +static inline int queue_read_can_lock(struct qrwlock *lock) +{ + return !(atomic_read(&lock->cnts) & _QW_WMASK); +} + +/** + * queue_write_can_lock- would write_trylock() succeed? + * @lock: Pointer to queue rwlock structure + */ +static inline int queue_write_can_lock(struct qrwlock *lock) +{ + return !atomic_read(&lock->cnts); +} + +/** + * queue_read_trylock - try to acquire read lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + * Return: 1 if lock acquired, 0 if failed + */ +static inline int queue_read_trylock(struct qrwlock *lock) +{ + u32 cnts; + + cnts = atomic_read(&lock->cnts); + if (likely(!(cnts & _QW_WMASK))) { + cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts); + if (likely(!(cnts & _QW_WMASK))) + return 1; + atomic_sub(_QR_BIAS, &lock->cnts); + } + return 0; +} + +/** + * queue_write_trylock - try to acquire write lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + * Return: 1 if lock acquired, 0 if failed + */ +static inline int queue_write_trylock(struct qrwlock *lock) +{ + u32 cnts; + + cnts = atomic_read(&lock->cnts); + if (unlikely(cnts)) + return 0; + + return likely(atomic_cmpxchg(&lock->cnts, + cnts, cnts | _QW_LOCKED) == cnts); +} +/** + * queue_read_lock - acquire read lock of a queue rwlock + * @lock: Pointer to queue rwlock structure + */ +static inline void queue_read_lock(struct qrwlock *lock) +{ + u32 cnts; + + cnts = atomic_add_return(_QR_BIAS, &lock->cnts); + if (likely(!(cnts & _QW_WMASK))) + return; + + /* The slowpath will decrement the reader count, if necessary. */ + queue_read_lock_slowpath(lock); +} + +/** + * queue_write_lock - acquire write lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + */ +static inline void queue_write_lock(struct qrwlock *lock) +{ + /* Optimize for the unfair lock case where the fair flag is 0. */ + if (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0) + return; + + queue_write_lock_slowpath(lock); +} + +/** + * queue_read_unlock - release read lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + */ +static inline void queue_read_unlock(struct qrwlock *lock) +{ + /* + * Atomically decrement the reader count + */ + smp_mb__before_atomic(); + atomic_sub(_QR_BIAS, &lock->cnts); +} + +#ifndef queue_write_unlock +/** + * queue_write_unlock - release write lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + */ +static inline void queue_write_unlock(struct qrwlock *lock) +{ + /* + * If the writer field is atomic, it can be cleared directly. + * Otherwise, an atomic subtraction will be used to clear it. + */ + smp_mb__before_atomic(); + atomic_sub(_QW_LOCKED, &lock->cnts); +} +#endif + +/* + * Remapping rwlock architecture specific functions to the corresponding + * queue rwlock functions. + */ +#define arch_read_can_lock(l) queue_read_can_lock(l) +#define arch_write_can_lock(l) queue_write_can_lock(l) +#define arch_read_lock(l) queue_read_lock(l) +#define arch_write_lock(l) queue_write_lock(l) +#define arch_read_trylock(l) queue_read_trylock(l) +#define arch_write_trylock(l) queue_write_trylock(l) +#define arch_read_unlock(l) queue_read_unlock(l) +#define arch_write_unlock(l) queue_write_unlock(l) + +#endif /* __ASM_GENERIC_QRWLOCK_H */ diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h new file mode 100644 index 00000000000..4d76f24df51 --- /dev/null +++ b/include/asm-generic/qrwlock_types.h @@ -0,0 +1,21 @@ +#ifndef __ASM_GENERIC_QRWLOCK_TYPES_H +#define __ASM_GENERIC_QRWLOCK_TYPES_H + +#include <linux/types.h> +#include <asm/spinlock_types.h> + +/* + * The queue read/write lock data structure + */ + +typedef struct qrwlock { + atomic_t cnts; + arch_spinlock_t lock; +} arch_rwlock_t; + +#define __ARCH_RW_LOCK_UNLOCKED { \ + .cnts = ATOMIC_INIT(0), \ + .lock = __ARCH_SPIN_LOCK_UNLOCKED, \ +} + +#endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */ diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h index b4ea8f50fc6..5e752b95905 100644 --- a/include/asm-generic/resource.h +++ b/include/asm-generic/resource.h @@ -12,7 +12,7 @@ [RLIMIT_CPU] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_FSIZE] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_DATA] = { RLIM_INFINITY, RLIM_INFINITY }, \ - [RLIMIT_STACK] = { _STK_LIM, _STK_LIM_MAX }, \ + [RLIMIT_STACK] = { _STK_LIM, RLIM_INFINITY }, \ [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \ [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_NPROC] = { 0, 0 }, \ diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h index bb1e2cdeb9b..d48bf5a95cc 100644 --- a/include/asm-generic/rwsem.h +++ b/include/asm-generic/rwsem.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_RWSEM_H -#define _ASM_POWERPC_RWSEM_H +#ifndef _ASM_GENERIC_RWSEM_H +#define _ASM_GENERIC_RWSEM_H #ifndef _LINUX_RWSEM_H #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." @@ -8,7 +8,7 @@ #ifdef __KERNEL__ /* - * R/W semaphores for PPC using the stuff in lib/rwsem.c. + * R/W semaphores originally for PPC using the stuff in lib/rwsem.c. * Adapted largely from include/asm-i386/rwsem.h * by Paul Mackerras <paulus@samba.org>. */ @@ -16,7 +16,7 @@ /* * the semaphore definition */ -#ifdef CONFIG_PPC64 +#ifdef CONFIG_64BIT # define RWSEM_ACTIVE_MASK 0xffffffffL #else # define RWSEM_ACTIVE_MASK 0x0000ffffL @@ -129,4 +129,4 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) } #endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_RWSEM_H */ +#endif /* _ASM_GENERIC_RWSEM_H */ diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index c1a1216e29c..f1a24b5c3b9 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -3,6 +3,26 @@ /* References to section boundaries */ +/* + * Usage guidelines: + * _text, _data: architecture specific, don't use them in arch-independent code + * [_stext, _etext]: contains .text.* sections, may also contain .rodata.* + * and/or .init.* sections + * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.* + * and/or .init.* sections. + * [__start_rodata, __end_rodata]: contains .rodata.* sections + * [__init_begin, __init_end]: contains .init.* sections, but .init.text.* + * may be out of this range on some architectures. + * [_sinittext, _einittext]: contains .init.text.* sections + * [__bss_start, __bss_stop]: contains BSS sections + * + * Following global variables are optional and may be unavailable on some + * architectures and/or kernel configurations. + * _text, _data + * __kprobes_text_start, __kprobes_text_end + * __entry_text_start, __entry_text_end + * __ctors_start, __ctors_end + */ extern char _text[], _stext[], _etext[]; extern char _data[], _sdata[], _edata[]; extern char __bss_start[], __bss_stop[]; @@ -12,7 +32,6 @@ extern char _end[]; extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; extern char __kprobes_text_start[], __kprobes_text_end[]; extern char __entry_text_start[], __entry_text_end[]; -extern char __initdata_begin[], __initdata_end[]; extern char __start_rodata[], __end_rodata[]; /* Start and end of .ctors section - used for constructor calls. */ diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h index b685d3bd32e..3d1a3af5cf5 100644 --- a/include/asm-generic/siginfo.h +++ b/include/asm-generic/siginfo.h @@ -32,6 +32,6 @@ static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) #endif -extern int copy_siginfo_to_user(struct siginfo __user *to, struct siginfo *from); +extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from); #endif diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h new file mode 100644 index 00000000000..f57eb7b5c23 --- /dev/null +++ b/include/asm-generic/simd.h @@ -0,0 +1,14 @@ + +#include <linux/hardirq.h> + +/* + * may_use_simd - whether it is allowable at this time to issue SIMD + * instructions or access the SIMD register file + * + * As architectures typically don't preserve the SIMD register file when + * taking an interrupt, !in_interrupt() should be a reasonable default. + */ +static __must_check inline bool may_use_simd(void) +{ + return !in_interrupt(); +} diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h index 5b09392db67..d401e5463fb 100644 --- a/include/asm-generic/syscall.h +++ b/include/asm-generic/syscall.h @@ -144,8 +144,6 @@ void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, /** * syscall_get_arch - return the AUDIT_ARCH for the current system call - * @task: task of interest, must be in system call entry tracing - * @regs: task_pt_regs() of @task * * Returns the AUDIT_ARCH_* based on the system call convention in use. * @@ -155,5 +153,5 @@ void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, * Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must * provide an implementation of this. */ -int syscall_get_arch(struct task_struct *task, struct pt_regs *regs); +int syscall_get_arch(void); #endif /* _ASM_SYSCALL_H */ diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h index 1db51b8524e..1f74be5113b 100644 --- a/include/asm-generic/syscalls.h +++ b/include/asm-generic/syscalls.h @@ -21,24 +21,8 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long fd, off_t pgoff); #endif -#ifndef CONFIG_GENERIC_SIGALTSTACK -#ifndef sys_sigaltstack -asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *, - struct pt_regs *); -#endif -#endif - #ifndef sys_rt_sigreturn asmlinkage long sys_rt_sigreturn(struct pt_regs *regs); #endif -#ifndef sys_rt_sigsuspend -asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize); -#endif - -#ifndef sys_rt_sigaction -asmlinkage long sys_rt_sigaction(int sig, const struct sigaction __user *act, - struct sigaction __user *oact, size_t sigsetsize); -#endif - #endif /* __ASM_GENERIC_SYSCALLS_H */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 25f01d0bc14..5672d7ea1fa 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -97,9 +97,12 @@ struct mmu_gather { unsigned long start; unsigned long end; unsigned int need_flush : 1, /* Did free PTEs */ - fast_mode : 1; /* No batching */ - - unsigned int fullmm; + /* we are in the middle of an operation to clear + * a full mm and can make some optimizations */ + fullmm : 1, + /* we have performed an operation which + * requires a complete flush of the tlb */ + need_flush_all : 1; struct mmu_gather_batch *active; struct mmu_gather_batch local; @@ -109,20 +112,7 @@ struct mmu_gather { #define HAVE_GENERIC_MMU_GATHER -static inline int tlb_fast_mode(struct mmu_gather *tlb) -{ -#ifdef CONFIG_SMP - return tlb->fast_mode; -#else - /* - * For UP we don't need to worry about TLB flush - * and page free order so much.. - */ - return 1; -#endif -} - -void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); +void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); void tlb_flush_mmu(struct mmu_gather *tlb); void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end); diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index 9788568f797..72d8803832f 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -3,11 +3,10 @@ /* * User space memory access functions, these should work - * on a ny machine that has kernel and user data in the same + * on any machine that has kernel and user data in the same * address space, e.g. all NOMMU machines. */ #include <linux/sched.h> -#include <linux/mm.h> #include <linux/string.h> #include <asm/segment.h> @@ -32,7 +31,9 @@ static inline void set_fs(mm_segment_t fs) } #endif +#ifndef segment_eq #define segment_eq(a, b) ((a).seg == (b).seg) +#endif #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -162,18 +163,24 @@ static inline __must_check long __copy_to_user(void __user *to, #define put_user(x, ptr) \ ({ \ - might_sleep(); \ + might_fault(); \ access_ok(VERIFY_WRITE, ptr, sizeof(*ptr)) ? \ __put_user(x, ptr) : \ -EFAULT; \ }) +#ifndef __put_user_fn + static inline int __put_user_fn(size_t size, void __user *ptr, void *x) { size = __copy_to_user(ptr, x, size); return size ? -EFAULT : size; } +#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) + +#endif + extern int __put_user_bad(void) __attribute__((noreturn)); #define __get_user(x, ptr) \ @@ -218,18 +225,23 @@ extern int __put_user_bad(void) __attribute__((noreturn)); #define get_user(x, ptr) \ ({ \ - might_sleep(); \ + might_fault(); \ access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ? \ __get_user(x, ptr) : \ -EFAULT; \ }) +#ifndef __get_user_fn static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) { size = __copy_from_user(x, ptr, size); return size ? -EFAULT : size; } +#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) + +#endif + extern int __get_user_bad(void) __attribute__((noreturn)); #ifndef __copy_from_user_inatomic @@ -243,7 +255,7 @@ extern int __get_user_bad(void) __attribute__((noreturn)); static inline long copy_from_user(void *to, const void __user * from, unsigned long n) { - might_sleep(); + might_fault(); if (access_ok(VERIFY_READ, from, n)) return __copy_from_user(to, from, n); else @@ -253,7 +265,7 @@ static inline long copy_from_user(void *to, static inline long copy_to_user(void __user *to, const void *from, unsigned long n) { - might_sleep(); + might_fault(); if (access_ok(VERIFY_WRITE, to, n)) return __copy_to_user(to, from, n); else @@ -324,7 +336,7 @@ __clear_user(void __user *to, unsigned long n) static inline __must_check unsigned long clear_user(void __user *to, unsigned long n) { - might_sleep(); + might_fault(); if (!access_ok(VERIFY_WRITE, to, n)) return n; diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h index 03cf5936bad..1ac097279db 100644 --- a/include/asm-generic/unaligned.h +++ b/include/asm-generic/unaligned.h @@ -4,22 +4,27 @@ /* * This is the most generic implementation of unaligned accesses * and should work almost anywhere. - * - * If an architecture can handle unaligned accesses in hardware, - * it may want to use the linux/unaligned/access_ok.h implementation - * instead. */ #include <asm/byteorder.h> +/* Set by the arch if it can handle unaligned accesses in hardware. */ +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +# include <linux/unaligned/access_ok.h> +#endif + #if defined(__LITTLE_ENDIAN) -# include <linux/unaligned/le_struct.h> -# include <linux/unaligned/be_byteshift.h> +# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +# include <linux/unaligned/le_struct.h> +# include <linux/unaligned/be_byteshift.h> +# endif # include <linux/unaligned/generic.h> # define get_unaligned __get_unaligned_le # define put_unaligned __put_unaligned_le #elif defined(__BIG_ENDIAN) -# include <linux/unaligned/be_struct.h> -# include <linux/unaligned/le_byteshift.h> +# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +# include <linux/unaligned/be_struct.h> +# include <linux/unaligned/le_byteshift.h> +# endif # include <linux/unaligned/generic.h> # define get_unaligned __get_unaligned_be # define put_unaligned __put_unaligned_be diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index a36991ab334..cccc86ecfea 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h @@ -1,4 +1,5 @@ #include <uapi/asm-generic/unistd.h> +#include <linux/export.h> /* * These are required system calls, we should @@ -9,16 +10,3 @@ #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_LLSEEK #endif -#define __ARCH_WANT_SYS_RT_SIGACTION -#define __ARCH_WANT_SYS_RT_SIGSUSPEND -#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND - -/* - * "Conditional" syscalls - * - * What we want is __attribute__((weak,alias("sys_ni_syscall"))), - * but it doesn't work on all toolchains, so we just do it by hand - */ -#ifndef cond_syscall -#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") -#endif diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index d1ea7ce0b4c..c1c0b0cf39b 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -52,13 +52,7 @@ #define LOAD_OFFSET 0 #endif -#ifndef SYMBOL_PREFIX -#define VMLINUX_SYMBOL(sym) sym -#else -#define PASTE2(x,y) x##y -#define PASTE(x,y) PASTE2(x,y) -#define VMLINUX_SYMBOL(sym) PASTE(SYMBOL_PREFIX, sym) -#endif +#include <linux/export.h> /* Align . to a 8 byte boundary equals to maximum function alignment. */ #define ALIGN_FUNCTION() . = ALIGN(8) @@ -74,14 +68,6 @@ * are handled as text/data or they can be discarded (which * often happens at runtime) */ -#ifdef CONFIG_HOTPLUG -#define DEV_KEEP(sec) *(.dev##sec) -#define DEV_DISCARD(sec) -#else -#define DEV_KEEP(sec) -#define DEV_DISCARD(sec) *(.dev##sec) -#endif - #ifdef CONFIG_HOTPLUG_CPU #define CPU_KEEP(sec) *(.cpu##sec) #define CPU_DISCARD(sec) @@ -123,6 +109,15 @@ #define BRANCH_PROFILE() #endif +#ifdef CONFIG_KPROBES +#define KPROBE_BLACKLIST() . = ALIGN(8); \ + VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \ + *(_kprobe_blacklist) \ + VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .; +#else +#define KPROBE_BLACKLIST() +#endif + #ifdef CONFIG_EVENT_TRACING #define FTRACE_EVENTS() . = ALIGN(8); \ VMLINUX_SYMBOL(__start_ftrace_events) = .; \ @@ -136,8 +131,12 @@ #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \ VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; +#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \ + *(__tracepoint_str) /* Trace_printk fmt' pointer */ \ + VMLINUX_SYMBOL(__stop___tracepoint_str) = .; #else #define TRACE_PRINTKS() +#define TRACEPOINT_STR() #endif #ifdef CONFIG_FTRACE_SYSCALLS @@ -150,6 +149,23 @@ #endif +#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) +#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) +#define OF_TABLE(cfg, name) __OF_TABLE(config_enabled(cfg), name) +#define _OF_TABLE_0(name) +#define _OF_TABLE_1(name) \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__##name##_of_table) = .; \ + *(__##name##_of_table) \ + *(__##name##_of_table_end) + +#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) +#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) +#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) +#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) +#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) +#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon) + #define KERNEL_DTB() \ STRUCT_ALIGN(); \ VMLINUX_SYMBOL(__dtb_start) = .; \ @@ -161,10 +177,6 @@ *(.data) \ *(.ref.data) \ *(.data..shared_aligned) /* percpu related */ \ - DEV_KEEP(init.data) \ - DEV_KEEP(exit.data) \ - CPU_KEEP(init.data) \ - CPU_KEEP(exit.data) \ MEM_KEEP(init.data) \ MEM_KEEP(exit.data) \ *(.data.unlikely) \ @@ -181,7 +193,8 @@ VMLINUX_SYMBOL(__stop___verbose) = .; \ LIKELY_PROFILE() \ BRANCH_PROFILE() \ - TRACE_PRINTKS() + TRACE_PRINTKS() \ + TRACEPOINT_STR() /* * Data section helpers @@ -264,13 +277,6 @@ VMLINUX_SYMBOL(__end_builtin_fw) = .; \ } \ \ - /* RapidIO route ops */ \ - .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \ - VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \ - *(.rio_switch_ops) \ - VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \ - } \ - \ TRACEDATA \ \ /* Kernel symbol table: Normal symbols */ \ @@ -351,10 +357,6 @@ /* __*init sections */ \ __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ *(.ref.rodata) \ - DEV_KEEP(init.rodata) \ - DEV_KEEP(exit.rodata) \ - CPU_KEEP(init.rodata) \ - CPU_KEEP(exit.rodata) \ MEM_KEEP(init.rodata) \ MEM_KEEP(exit.rodata) \ } \ @@ -395,10 +397,6 @@ *(.text.hot) \ *(.text) \ *(.ref.text) \ - DEV_KEEP(init.text) \ - DEV_KEEP(exit.text) \ - CPU_KEEP(init.text) \ - CPU_KEEP(exit.text) \ MEM_KEEP(init.text) \ MEM_KEEP(exit.text) \ *(.text.unlikely) @@ -474,6 +472,7 @@ #define KERNEL_CTORS() . = ALIGN(8); \ VMLINUX_SYMBOL(__ctors_start) = .; \ *(.ctors) \ + *(.init_array) \ VMLINUX_SYMBOL(__ctors_end) = .; #else #define KERNEL_CTORS() @@ -482,38 +481,33 @@ /* init and exit section handling */ #define INIT_DATA \ *(.init.data) \ - DEV_DISCARD(init.data) \ - CPU_DISCARD(init.data) \ MEM_DISCARD(init.data) \ KERNEL_CTORS() \ MCOUNT_REC() \ *(.init.rodata) \ FTRACE_EVENTS() \ TRACE_SYSCALLS() \ - DEV_DISCARD(init.rodata) \ - CPU_DISCARD(init.rodata) \ + KPROBE_BLACKLIST() \ MEM_DISCARD(init.rodata) \ - KERNEL_DTB() + CLK_OF_TABLES() \ + RESERVEDMEM_OF_TABLES() \ + CLKSRC_OF_TABLES() \ + CPU_METHOD_OF_TABLES() \ + KERNEL_DTB() \ + IRQCHIP_OF_MATCH_TABLE() \ + EARLYCON_OF_TABLES() #define INIT_TEXT \ *(.init.text) \ - DEV_DISCARD(init.text) \ - CPU_DISCARD(init.text) \ MEM_DISCARD(init.text) #define EXIT_DATA \ *(.exit.data) \ - DEV_DISCARD(exit.data) \ - DEV_DISCARD(exit.rodata) \ - CPU_DISCARD(exit.data) \ - CPU_DISCARD(exit.rodata) \ MEM_DISCARD(exit.data) \ MEM_DISCARD(exit.rodata) #define EXIT_TEXT \ *(.exit.text) \ - DEV_DISCARD(exit.text) \ - CPU_DISCARD(exit.text) \ MEM_DISCARD(exit.text) #define EXIT_CALL \ @@ -699,7 +693,7 @@ . = ALIGN(PAGE_SIZE); \ *(.data..percpu..page_aligned) \ . = ALIGN(cacheline); \ - *(.data..percpu..readmostly) \ + *(.data..percpu..read_mostly) \ . = ALIGN(cacheline); \ *(.data..percpu) \ *(.data..percpu..shared_aligned) \ diff --git a/include/asm-generic/vtime.h b/include/asm-generic/vtime.h new file mode 100644 index 00000000000..b1a49677fe2 --- /dev/null +++ b/include/asm-generic/vtime.h @@ -0,0 +1 @@ +/* no content, but patch(1) dislikes empty files */ diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index 3f21f1b72e4..94f9ea8abca 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h @@ -49,4 +49,8 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct return (val + c->high_bits) & ~rhs; } +#ifndef zero_bytemask +#define zero_bytemask(mask) (~1ul << __fls(mask)) +#endif + #endif /* _ASM_WORD_AT_A_TIME_H */ |
