diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/barrier.h | 55 | ||||
-rw-r--r-- | include/asm-generic/bitops/const_hweight.h | 17 | ||||
-rw-r--r-- | include/asm-generic/bug.h | 60 | ||||
-rw-r--r-- | include/asm-generic/cputime_jiffies.h | 4 | ||||
-rw-r--r-- | include/asm-generic/cputime_nsecs.h | 5 | ||||
-rw-r--r-- | include/asm-generic/early_ioremap.h | 42 | ||||
-rw-r--r-- | include/asm-generic/fixmap.h | 97 | ||||
-rw-r--r-- | include/asm-generic/gpio.h | 2 | ||||
-rw-r--r-- | include/asm-generic/hash.h | 9 | ||||
-rw-r--r-- | include/asm-generic/int-l64.h | 49 | ||||
-rw-r--r-- | include/asm-generic/io.h | 4 | ||||
-rw-r--r-- | include/asm-generic/iomap.h | 2 | ||||
-rw-r--r-- | include/asm-generic/mcs_spinlock.h | 13 | ||||
-rw-r--r-- | include/asm-generic/percpu.h | 13 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 64 | ||||
-rw-r--r-- | include/asm-generic/rwsem.h | 10 | ||||
-rw-r--r-- | include/asm-generic/uaccess.h | 2 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 21 |
18 files changed, 355 insertions, 114 deletions
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 639d7a4d033..6f692f8ac66 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -1,4 +1,5 @@ -/* Generic barrier definitions, based on MN10300 definitions. +/* + * Generic barrier definitions, originally based on MN10300 definitions. * * It should be possible to use these on really simple architectures, * but it serves more as a starting point for new ports. @@ -16,35 +17,65 @@ #ifndef __ASSEMBLY__ -#define nop() asm volatile ("nop") +#include <linux/compiler.h> + +#ifndef nop +#define nop() asm volatile ("nop") +#endif /* - * Force strict CPU ordering. - * And yes, this is required on UP too when we're talking - * to devices. + * Force strict CPU ordering. And yes, this is required on UP too when we're + * talking to devices. * - * This implementation only contains a compiler barrier. + * Fall back to compiler barriers if nothing better is provided. */ -#define mb() asm volatile ("": : :"memory") +#ifndef mb +#define mb() barrier() +#endif + +#ifndef rmb #define rmb() mb() -#define wmb() asm volatile ("": : :"memory") +#endif + +#ifndef wmb +#define wmb() mb() +#endif + +#ifndef read_barrier_depends +#define read_barrier_depends() do { } while (0) +#endif #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while (0) +#endif + +#ifndef set_mb +#define set_mb(var, value) do { (var) = (value); mb(); } while (0) #endif -#define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) -#define read_barrier_depends() do {} while (0) -#define smp_read_barrier_depends() do {} while (0) +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ diff --git a/include/asm-generic/bitops/const_hweight.h b/include/asm-generic/bitops/const_hweight.h index fa2a50b7ee6..0a7e0662347 100644 --- a/include/asm-generic/bitops/const_hweight.h +++ b/include/asm-generic/bitops/const_hweight.h @@ -5,14 +5,15 @@ * Compile time versions of __arch_hweightN() */ #define __const_hweight8(w) \ - ( (!!((w) & (1ULL << 0))) + \ - (!!((w) & (1ULL << 1))) + \ - (!!((w) & (1ULL << 2))) + \ - (!!((w) & (1ULL << 3))) + \ - (!!((w) & (1ULL << 4))) + \ - (!!((w) & (1ULL << 5))) + \ - (!!((w) & (1ULL << 6))) + \ - (!!((w) & (1ULL << 7))) ) + ((unsigned int) \ + ((!!((w) & (1ULL << 0))) + \ + (!!((w) & (1ULL << 1))) + \ + (!!((w) & (1ULL << 2))) + \ + (!!((w) & (1ULL << 3))) + \ + (!!((w) & (1ULL << 4))) + \ + (!!((w) & (1ULL << 5))) + \ + (!!((w) & (1ULL << 6))) + \ + (!!((w) & (1ULL << 7))))) #define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 )) #define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16)) diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 7d10f962aa1..630dd237223 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -52,7 +52,7 @@ struct bug_entry { #endif #ifndef HAVE_ARCH_BUG_ON -#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0) +#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0) #endif /* @@ -106,33 +106,6 @@ extern void warn_slowpath_null(const char *file, const int line); unlikely(__ret_warn_on); \ }) -#else /* !CONFIG_BUG */ -#ifndef HAVE_ARCH_BUG -#define BUG() do {} while(0) -#endif - -#ifndef HAVE_ARCH_BUG_ON -#define BUG_ON(condition) do { if (condition) ; } while(0) -#endif - -#ifndef HAVE_ARCH_WARN_ON -#define WARN_ON(condition) ({ \ - int __ret_warn_on = !!(condition); \ - unlikely(__ret_warn_on); \ -}) -#endif - -#ifndef WARN -#define WARN(condition, format...) ({ \ - int __ret_warn_on = !!(condition); \ - unlikely(__ret_warn_on); \ -}) -#endif - -#define WARN_TAINT(condition, taint, format...) WARN_ON(condition) - -#endif - #define WARN_ON_ONCE(condition) ({ \ static bool __section(.data.unlikely) __warned; \ int __ret_warn_once = !!(condition); \ @@ -163,6 +136,37 @@ extern void warn_slowpath_null(const char *file, const int line); unlikely(__ret_warn_once); \ }) +#else /* !CONFIG_BUG */ +#ifndef HAVE_ARCH_BUG +#define BUG() do {} while (1) +#endif + +#ifndef HAVE_ARCH_BUG_ON +#define BUG_ON(condition) do { if (condition) ; } while (0) +#endif + +#ifndef HAVE_ARCH_WARN_ON +#define WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + no_printk(format); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#define WARN_ON_ONCE(condition) WARN_ON(condition) +#define WARN_ONCE(condition, format...) WARN(condition, format) +#define WARN_TAINT(condition, taint, format...) WARN(condition, format) +#define WARN_TAINT_ONCE(condition, taint, format...) WARN(condition, format) + +#endif + /* * WARN_ON_SMP() is for cases that the warning is either * meaningless for !SMP or may even cause failures. diff --git a/include/asm-generic/cputime_jiffies.h b/include/asm-generic/cputime_jiffies.h index 272ecba9f58..d5cb78f5398 100644 --- a/include/asm-generic/cputime_jiffies.h +++ b/include/asm-generic/cputime_jiffies.h @@ -15,8 +15,10 @@ typedef u64 __nocast cputime64_t; /* - * Convert nanoseconds to cputime + * Convert nanoseconds <-> cputime */ +#define cputime_to_nsecs(__ct) \ + jiffies_to_nsecs(cputime_to_jiffies(__ct)) #define nsecs_to_cputime64(__nsec) \ jiffies64_to_cputime64(nsecs_to_jiffies64(__nsec)) #define nsecs_to_cputime(__nsec) \ diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h index 2c9e62c2bfd..4e817606c54 100644 --- a/include/asm-generic/cputime_nsecs.h +++ b/include/asm-generic/cputime_nsecs.h @@ -44,7 +44,10 @@ typedef u64 __nocast cputime64_t; /* * Convert cputime <-> nanoseconds */ -#define nsecs_to_cputime(__nsecs) ((__force u64)(__nsecs)) +#define cputime_to_nsecs(__ct) \ + (__force u64)(__ct) +#define nsecs_to_cputime(__nsecs) \ + (__force cputime_t)(__nsecs) /* diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h new file mode 100644 index 00000000000..a5de55c04fb --- /dev/null +++ b/include/asm-generic/early_ioremap.h @@ -0,0 +1,42 @@ +#ifndef _ASM_EARLY_IOREMAP_H_ +#define _ASM_EARLY_IOREMAP_H_ + +#include <linux/types.h> + +/* + * early_ioremap() and early_iounmap() are for temporary early boot-time + * mappings, before the real ioremap() is functional. + */ +extern void __iomem *early_ioremap(resource_size_t phys_addr, + unsigned long size); +extern void *early_memremap(resource_size_t phys_addr, + unsigned long size); +extern void early_iounmap(void __iomem *addr, unsigned long size); +extern void early_memunmap(void *addr, unsigned long size); + +/* + * Weak function called by early_ioremap_reset(). It does nothing, but + * architectures may provide their own version to do any needed cleanups. + */ +extern void early_ioremap_shutdown(void); + +#if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU) +/* Arch-specific initialization */ +extern void early_ioremap_init(void); + +/* Generic initialization called by architecture code */ +extern void early_ioremap_setup(void); + +/* + * Called as last step in paging_init() so library can act + * accordingly for subsequent map/unmap requests. + */ +extern void early_ioremap_reset(void); + +#else +static inline void early_ioremap_init(void) { } +static inline void early_ioremap_setup(void) { } +static inline void early_ioremap_reset(void) { } +#endif + +#endif /* _ASM_EARLY_IOREMAP_H_ */ diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h new file mode 100644 index 00000000000..5a64ca4621f --- /dev/null +++ b/include/asm-generic/fixmap.h @@ -0,0 +1,97 @@ +/* + * fixmap.h: compile-time virtual memory allocation + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998 Ingo Molnar + * + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 + * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009 + * Break out common bits to asm-generic by Mark Salter, November 2013 + */ + +#ifndef __ASM_GENERIC_FIXMAP_H +#define __ASM_GENERIC_FIXMAP_H + +#include <linux/bug.h> + +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) + +#ifndef __ASSEMBLY__ +/* + * 'index to address' translation. If anyone tries to use the idx + * directly without translation, we catch the bug with a NULL-deference + * kernel oops. Illegal ranges of incoming indices are caught too. + */ +static __always_inline unsigned long fix_to_virt(const unsigned int idx) +{ + BUILD_BUG_ON(idx >= __end_of_fixed_addresses); + return __fix_to_virt(idx); +} + +static inline unsigned long virt_to_fix(const unsigned long vaddr) +{ + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); + return __virt_to_fix(vaddr); +} + +/* + * Provide some reasonable defaults for page flags. + * Not all architectures use all of these different types and some + * architectures use different names. + */ +#ifndef FIXMAP_PAGE_NORMAL +#define FIXMAP_PAGE_NORMAL PAGE_KERNEL +#endif +#ifndef FIXMAP_PAGE_NOCACHE +#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE +#endif +#ifndef FIXMAP_PAGE_IO +#define FIXMAP_PAGE_IO PAGE_KERNEL_IO +#endif +#ifndef FIXMAP_PAGE_CLEAR +#define FIXMAP_PAGE_CLEAR __pgprot(0) +#endif + +#ifndef set_fixmap +#define set_fixmap(idx, phys) \ + __set_fixmap(idx, phys, FIXMAP_PAGE_NORMAL) +#endif + +#ifndef clear_fixmap +#define clear_fixmap(idx) \ + __set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR) +#endif + +/* Return a pointer with offset calculated */ +#define __set_fixmap_offset(idx, phys, flags) \ +({ \ + unsigned long addr; \ + __set_fixmap(idx, phys, flags); \ + addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \ + addr; \ +}) + +#define set_fixmap_offset(idx, phys) \ + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NORMAL) + +/* + * Some hardware wants to get fixmapped without caching. + */ +#define set_fixmap_nocache(idx, phys) \ + __set_fixmap(idx, phys, FIXMAP_PAGE_NOCACHE) + +#define set_fixmap_offset_nocache(idx, phys) \ + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NOCACHE) + +/* + * Some fixmaps are for IO + */ +#define set_fixmap_io(idx, phys) \ + __set_fixmap(idx, phys, FIXMAP_PAGE_IO) + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_GENERIC_FIXMAP_H */ diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index a5f56a0213a..23e364538ab 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -69,7 +69,7 @@ static inline int gpio_direction_input(unsigned gpio) } static inline int gpio_direction_output(unsigned gpio, int value) { - return gpiod_direction_output(gpio_to_desc(gpio), value); + return gpiod_direction_output_raw(gpio_to_desc(gpio), value); } static inline int gpio_set_debounce(unsigned gpio, unsigned debounce) diff --git a/include/asm-generic/hash.h b/include/asm-generic/hash.h new file mode 100644 index 00000000000..b6312843dbd --- /dev/null +++ b/include/asm-generic/hash.h @@ -0,0 +1,9 @@ +#ifndef __ASM_GENERIC_HASH_H +#define __ASM_GENERIC_HASH_H + +struct fast_hash_ops; +static inline void setup_arch_fast_hash(struct fast_hash_ops *ops) +{ +} + +#endif /* __ASM_GENERIC_HASH_H */ diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h deleted file mode 100644 index 27d4ec0dfce..00000000000 --- a/include/asm-generic/int-l64.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * asm-generic/int-l64.h - * - * Integer declarations for architectures which use "long" - * for 64-bit types. - */ -#ifndef _ASM_GENERIC_INT_L64_H -#define _ASM_GENERIC_INT_L64_H - -#include <uapi/asm-generic/int-l64.h> - - -#ifndef __ASSEMBLY__ - -typedef signed char s8; -typedef unsigned char u8; - -typedef signed short s16; -typedef unsigned short u16; - -typedef signed int s32; -typedef unsigned int u32; - -typedef signed long s64; -typedef unsigned long u64; - -#define S8_C(x) x -#define U8_C(x) x ## U -#define S16_C(x) x -#define U16_C(x) x ## U -#define S32_C(x) x -#define U32_C(x) x ## U -#define S64_C(x) x ## L -#define U64_C(x) x ## UL - -#else /* __ASSEMBLY__ */ - -#define S8_C(x) x -#define U8_C(x) x -#define S16_C(x) x -#define U16_C(x) x -#define S32_C(x) x -#define U32_C(x) x -#define S64_C(x) x -#define U64_C(x) x - -#endif /* __ASSEMBLY__ */ - -#endif /* _ASM_GENERIC_INT_L64_H */ diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index d5afe96adba..975e1cc75ed 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -327,7 +327,7 @@ static inline void iounmap(void __iomem *addr) } #endif /* CONFIG_MMU */ -#ifdef CONFIG_HAS_IOPORT +#ifdef CONFIG_HAS_IOPORT_MAP #ifndef CONFIG_GENERIC_IOMAP static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { @@ -341,7 +341,7 @@ static inline void ioport_unmap(void __iomem *p) extern void __iomem *ioport_map(unsigned long port, unsigned int nr); extern void ioport_unmap(void __iomem *p); #endif /* CONFIG_GENERIC_IOMAP */ -#endif /* CONFIG_HAS_IOPORT */ +#endif /* CONFIG_HAS_IOPORT_MAP */ #ifndef xlate_dev_kmem_ptr #define xlate_dev_kmem_ptr(p) p diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h index 6afd7d6a989..1b41011643a 100644 --- a/include/asm-generic/iomap.h +++ b/include/asm-generic/iomap.h @@ -56,7 +56,7 @@ extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long coun extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count); extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count); -#ifdef CONFIG_HAS_IOPORT +#ifdef CONFIG_HAS_IOPORT_MAP /* Create a virtual mapping cookie for an IO port range */ extern void __iomem *ioport_map(unsigned long port, unsigned int nr); extern void ioport_unmap(void __iomem *); diff --git a/include/asm-generic/mcs_spinlock.h b/include/asm-generic/mcs_spinlock.h new file mode 100644 index 00000000000..10cd4ffc6ba --- /dev/null +++ b/include/asm-generic/mcs_spinlock.h @@ -0,0 +1,13 @@ +#ifndef __ASM_MCS_SPINLOCK_H +#define __ASM_MCS_SPINLOCK_H + +/* + * Architectures can define their own: + * + * arch_mcs_spin_lock_contended(l) + * arch_mcs_spin_unlock_contended(l) + * + * See kernel/locking/mcs_spinlock.c. + */ + +#endif /* __ASM_MCS_SPINLOCK_H */ diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index d17784ea37f..0703aa75b5e 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -56,17 +56,17 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define per_cpu(var, cpu) \ (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu))) -#ifndef __this_cpu_ptr -#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) +#ifndef raw_cpu_ptr +#define raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) #endif #ifdef CONFIG_DEBUG_PREEMPT #define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) #else -#define this_cpu_ptr(ptr) __this_cpu_ptr(ptr) +#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) #endif #define __get_cpu_var(var) (*this_cpu_ptr(&(var))) -#define __raw_get_cpu_var(var) (*__this_cpu_ptr(&(var))) +#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var))) #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA extern void setup_per_cpu_areas(void); @@ -83,7 +83,7 @@ extern void setup_per_cpu_areas(void); #define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) #define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) #define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) -#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) +#define raw_cpu_ptr(ptr) this_cpu_ptr(ptr) #endif /* SMP */ @@ -122,4 +122,7 @@ extern void setup_per_cpu_areas(void); #define PER_CPU_DEF_ATTRIBUTES #endif +/* Keep until we have removed all uses of __this_cpu_ptr */ +#define __this_cpu_ptr raw_cpu_ptr + #endif /* _ASM_GENERIC_PERCPU_H_ */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index db092345894..1ec08c198b6 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -193,6 +193,19 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b) } #endif +#ifndef __HAVE_ARCH_PTE_UNUSED +/* + * Some architectures provide facilities to virtualization guests + * so that they can flag allocated pages as unused. This allows the + * host to transparently reclaim unused pages. This function returns + * whether the pte's page is unused. + */ +static inline int pte_unused(pte_t pte) +{ + return 0; +} +#endif + #ifndef __HAVE_ARCH_PMD_SAME #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) @@ -558,6 +571,18 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp) } #endif +#ifndef pmd_move_must_withdraw +static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, + spinlock_t *old_pmd_ptl) +{ + /* + * With split pmd lock we also need to move preallocated + * PTE page table if new_pmd is on different PMD page table. + */ + return new_pmd_ptl != old_pmd_ptl; +} +#endif + /* * This function is meant to be used by sites walking pagetables with * the mmap_sem hold in read mode to protect against MADV_DONTNEED and @@ -689,6 +714,18 @@ static inline pte_t pte_mknuma(pte_t pte) } #endif +#ifndef ptep_set_numa +static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + pte_t ptent = *ptep; + + ptent = pte_mknuma(ptent); + set_pte_at(mm, addr, ptep, ptent); + return; +} +#endif + #ifndef pmd_mknuma static inline pmd_t pmd_mknuma(pmd_t pmd) { @@ -696,6 +733,18 @@ static inline pmd_t pmd_mknuma(pmd_t pmd) return pmd_clear_flags(pmd, _PAGE_PRESENT); } #endif + +#ifndef pmdp_set_numa +static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + + pmd = pmd_mknuma(pmd); + set_pmd_at(mm, addr, pmdp, pmd); + return; +} +#endif #else extern int pte_numa(pte_t pte); extern int pmd_numa(pmd_t pmd); @@ -703,6 +752,8 @@ extern pte_t pte_mknonnuma(pte_t pte); extern pmd_t pmd_mknonnuma(pmd_t pmd); extern pte_t pte_mknuma(pte_t pte); extern pmd_t pmd_mknuma(pmd_t pmd); +extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep); +extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp); #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ #else static inline int pmd_numa(pmd_t pmd) @@ -730,10 +781,23 @@ static inline pte_t pte_mknuma(pte_t pte) return pte; } +static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + return; +} + + static inline pmd_t pmd_mknuma(pmd_t pmd) { return pmd; } + +static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp) +{ + return ; +} #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_MMU */ diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h index bb1e2cdeb9b..d48bf5a95cc 100644 --- a/include/asm-generic/rwsem.h +++ b/include/asm-generic/rwsem.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_RWSEM_H -#define _ASM_POWERPC_RWSEM_H +#ifndef _ASM_GENERIC_RWSEM_H +#define _ASM_GENERIC_RWSEM_H #ifndef _LINUX_RWSEM_H #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." @@ -8,7 +8,7 @@ #ifdef __KERNEL__ /* - * R/W semaphores for PPC using the stuff in lib/rwsem.c. + * R/W semaphores originally for PPC using the stuff in lib/rwsem.c. * Adapted largely from include/asm-i386/rwsem.h * by Paul Mackerras <paulus@samba.org>. */ @@ -16,7 +16,7 @@ /* * the semaphore definition */ -#ifdef CONFIG_PPC64 +#ifdef CONFIG_64BIT # define RWSEM_ACTIVE_MASK 0xffffffffL #else # define RWSEM_ACTIVE_MASK 0x0000ffffL @@ -129,4 +129,4 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) } #endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_RWSEM_H */ +#endif /* _ASM_GENERIC_RWSEM_H */ diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index dc1269c74a5..72d8803832f 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -3,7 +3,7 @@ /* * User space memory access functions, these should work - * on a ny machine that has kernel and user data in the same + * on any machine that has kernel and user data in the same * address space, e.g. all NOMMU machines. */ #include <linux/sched.h> diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index bc2121fa913..146e4fffd71 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -167,6 +167,25 @@ #define CLK_OF_TABLES() #endif +#ifdef CONFIG_OF_RESERVED_MEM +#define RESERVEDMEM_OF_TABLES() \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__reservedmem_of_table) = .; \ + *(__reservedmem_of_table) \ + *(__reservedmem_of_table_end) +#else +#define RESERVEDMEM_OF_TABLES() +#endif + +#ifdef CONFIG_SMP +#define CPU_METHOD_OF_TABLES() . = ALIGN(8); \ + VMLINUX_SYMBOL(__cpu_method_of_table_begin) = .; \ + *(__cpu_method_of_table) \ + VMLINUX_SYMBOL(__cpu_method_of_table_end) = .; +#else +#define CPU_METHOD_OF_TABLES() +#endif + #define KERNEL_DTB() \ STRUCT_ALIGN(); \ VMLINUX_SYMBOL(__dtb_start) = .; \ @@ -490,7 +509,9 @@ TRACE_SYSCALLS() \ MEM_DISCARD(init.rodata) \ CLK_OF_TABLES() \ + RESERVEDMEM_OF_TABLES() \ CLKSRC_OF_TABLES() \ + CPU_METHOD_OF_TABLES() \ KERNEL_DTB() \ IRQCHIP_OF_MATCH_TABLE() |