diff options
Diffstat (limited to 'arch/m32r/include')
| -rw-r--r-- | arch/m32r/include/asm/Kbuild | 4 | ||||
| -rw-r--r-- | arch/m32r/include/asm/atomic.h | 7 | ||||
| -rw-r--r-- | arch/m32r/include/asm/barrier.h | 80 | ||||
| -rw-r--r-- | arch/m32r/include/asm/bitops.h | 6 | ||||
| -rw-r--r-- | arch/m32r/include/asm/cputime.h | 6 | ||||
| -rw-r--r-- | arch/m32r/include/asm/hardirq.h | 16 | ||||
| -rw-r--r-- | arch/m32r/include/asm/mmu_context.h | 2 | ||||
| -rw-r--r-- | arch/m32r/include/asm/pgalloc.h | 7 | ||||
| -rw-r--r-- | arch/m32r/include/asm/thread_info.h | 2 | ||||
| -rw-r--r-- | arch/m32r/include/uapi/asm/socket.h | 4 |
10 files changed, 19 insertions, 115 deletions
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index bebdc36ebb0..67779a74b62 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild @@ -1,5 +1,9 @@ generic-y += clkdev.h +generic-y += cputime.h generic-y += exec.h +generic-y += hash.h +generic-y += mcs_spinlock.h generic-y += module.h +generic-y += preempt.h generic-y += trace_clock.h diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index 0d81697c326..8ad0ed4182a 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h @@ -13,6 +13,7 @@ #include <asm/assembler.h> #include <asm/cmpxchg.h> #include <asm/dcache_clear.h> +#include <asm/barrier.h> /* * Atomic operations that C can't guarantee us. Useful for @@ -308,10 +309,4 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr) local_irq_restore(flags); } -/* Atomic operations are already serializing on m32r */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif /* _ASM_M32R_ATOMIC_H */ diff --git a/arch/m32r/include/asm/barrier.h b/arch/m32r/include/asm/barrier.h index 6976621efd3..1a40265e8d8 100644 --- a/arch/m32r/include/asm/barrier.h +++ b/arch/m32r/include/asm/barrier.h @@ -11,84 +11,6 @@ #define nop() __asm__ __volatile__ ("nop" : : ) -/* - * Memory barrier. - * - * mb() prevents loads and stores being reordered across this point. - * rmb() prevents loads being reordered across this point. - * wmb() prevents stores being reordered across this point. - */ -#define mb() barrier() -#define rmb() mb() -#define wmb() mb() - -/** - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier. All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies. See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * <programlisting> - * CPU 0 CPU 1 - * - * b = 2; - * memory_barrier(); - * p = &b; q = p; - * read_barrier_depends(); - * d = *q; - * </programlisting> - * - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, - * the following code, with the same initial values for "a" and "b": - * - * <programlisting> - * CPU 0 CPU 1 - * - * a = 2; - * memory_barrier(); - * b = 3; y = b; - * read_barrier_depends(); - * x = a; - * </programlisting> - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like this where there are no data dependencies. - **/ - -#define read_barrier_depends() do { } while (0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) -#define set_mb(var, value) do { var = value; barrier(); } while (0) -#endif +#include <asm-generic/barrier.h> #endif /* _ASM_M32R_BARRIER_H */ diff --git a/arch/m32r/include/asm/bitops.h b/arch/m32r/include/asm/bitops.h index d3dea9ac7d4..86ba2b42a6c 100644 --- a/arch/m32r/include/asm/bitops.h +++ b/arch/m32r/include/asm/bitops.h @@ -21,6 +21,7 @@ #include <asm/byteorder.h> #include <asm/dcache_clear.h> #include <asm/types.h> +#include <asm/barrier.h> /* * These have to be done with inline assembly: that way the bit-setting @@ -73,7 +74,7 @@ static __inline__ void set_bit(int nr, volatile void * addr) * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */ static __inline__ void clear_bit(int nr, volatile void * addr) @@ -103,9 +104,6 @@ static __inline__ void clear_bit(int nr, volatile void * addr) local_irq_restore(flags); } -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - /** * change_bit - Toggle a bit in memory * @nr: Bit to clear diff --git a/arch/m32r/include/asm/cputime.h b/arch/m32r/include/asm/cputime.h deleted file mode 100644 index 0a47550df2b..00000000000 --- a/arch/m32r/include/asm/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __M32R_CPUTIME_H -#define __M32R_CPUTIME_H - -#include <asm-generic/cputime.h> - -#endif /* __M32R_CPUTIME_H */ diff --git a/arch/m32r/include/asm/hardirq.h b/arch/m32r/include/asm/hardirq.h index 4c31c0ae215..5f2ac4f64dd 100644 --- a/arch/m32r/include/asm/hardirq.h +++ b/arch/m32r/include/asm/hardirq.h @@ -3,22 +3,6 @@ #define __ASM_HARDIRQ_H #include <asm/irq.h> - -#if NR_IRQS > 256 -#define HARDIRQ_BITS 9 -#else -#define HARDIRQ_BITS 8 -#endif - -/* - * The hardirq mask has to be large enough to have - * space for potentially all IRQ sources in the system - * nesting on a single CPU: - */ -#if (1 << HARDIRQ_BITS) < NR_IRQS -# error HARDIRQ_BITS is too low! -#endif - #include <asm-generic/hardirq.h> #endif /* __ASM_HARDIRQ_H */ diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h index a979a419816..9fc78fc4444 100644 --- a/arch/m32r/include/asm/mmu_context.h +++ b/arch/m32r/include/asm/mmu_context.h @@ -45,7 +45,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm) Flush all TLB and start new cycle. */ local_flush_tlb_all(); /* Fix version if needed. - Note that we avoid version #0 to distingush NO_CONTEXT. */ + Note that we avoid version #0 to distinguish NO_CONTEXT. */ if (!mc) mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION; } diff --git a/arch/m32r/include/asm/pgalloc.h b/arch/m32r/include/asm/pgalloc.h index 0fc73619897..2d55a064cca 100644 --- a/arch/m32r/include/asm/pgalloc.h +++ b/arch/m32r/include/asm/pgalloc.h @@ -43,7 +43,12 @@ static __inline__ pgtable_t pte_alloc_one(struct mm_struct *mm, { struct page *pte = alloc_page(GFP_KERNEL|__GFP_ZERO); - pgtable_page_ctor(pte); + if (!pte) + return NULL; + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; + } return pte; } diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h index c074f4c2e85..00171703402 100644 --- a/arch/m32r/include/asm/thread_info.h +++ b/arch/m32r/include/asm/thread_info.h @@ -53,8 +53,6 @@ struct thread_info { #endif -#define PREEMPT_ACTIVE 0x10000000 - #define THREAD_SIZE (PAGE_SIZE << 1) #define THREAD_SIZE_ORDER 1 /* diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h index 24be7c8da86..6c9a24b3aef 100644 --- a/arch/m32r/include/uapi/asm/socket.h +++ b/arch/m32r/include/uapi/asm/socket.h @@ -76,4 +76,8 @@ #define SO_BUSY_POLL 46 +#define SO_MAX_PACING_RATE 47 + +#define SO_BPF_EXTENSIONS 48 + #endif /* _ASM_M32R_SOCKET_H */ |
