aboutsummaryrefslogtreecommitdiff
path: root/arch/m32r
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m32r')
-rw-r--r--arch/m32r/Kconfig14
-rw-r--r--arch/m32r/Kconfig.debug7
-rw-r--r--arch/m32r/include/asm/Kbuild4
-rw-r--r--arch/m32r/include/asm/atomic.h7
-rw-r--r--arch/m32r/include/asm/barrier.h80
-rw-r--r--arch/m32r/include/asm/bitops.h6
-rw-r--r--arch/m32r/include/asm/cputime.h6
-rw-r--r--arch/m32r/include/asm/elf.h3
-rw-r--r--arch/m32r/include/asm/hardirq.h16
-rw-r--r--arch/m32r/include/asm/mmu_context.h2
-rw-r--r--arch/m32r/include/asm/pgalloc.h7
-rw-r--r--arch/m32r/include/asm/pgtable.h3
-rw-r--r--arch/m32r/include/asm/signal.h11
-rw-r--r--arch/m32r/include/asm/thread_info.h2
-rw-r--r--arch/m32r/include/asm/uaccess.h12
-rw-r--r--arch/m32r/include/asm/unistd.h12
-rw-r--r--arch/m32r/include/uapi/asm/socket.h12
-rw-r--r--arch/m32r/include/uapi/asm/stat.h4
-rw-r--r--arch/m32r/kernel/entry.S8
-rw-r--r--arch/m32r/kernel/process.c67
-rw-r--r--arch/m32r/kernel/signal.c16
-rw-r--r--arch/m32r/kernel/smpboot.c4
-rw-r--r--arch/m32r/kernel/time.c4
-rw-r--r--arch/m32r/kernel/traps.c15
-rw-r--r--arch/m32r/mm/discontig.c6
-rw-r--r--arch/m32r/mm/fault.c10
-rw-r--r--arch/m32r/mm/init.c90
27 files changed, 72 insertions, 356 deletions
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index f807721e19a..9e44bbd8051 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -9,12 +9,13 @@ config M32R
select HAVE_KERNEL_LZMA
select ARCH_WANT_IPC_PARSE_VERSION
select HAVE_DEBUG_BUGVERBOSE
- select HAVE_GENERIC_HARDIRQS
+ select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_ATOMIC64
select ARCH_USES_GETTIMEOFFSET
select MODULES_USE_ELF_RELA
+ select HAVE_DEBUG_STACKOVERFLOW
config SBUS
bool
@@ -27,7 +28,7 @@ config ZONE_DMA
bool
default y
-config NO_IOPORT
+config NO_IOPORT_MAP
def_bool y
config NO_DMA
@@ -274,16 +275,15 @@ source "kernel/Kconfig.preempt"
config SMP
bool "Symmetric multi-processing support"
- select USE_GENERIC_SMP_HELPERS
---help---
This enables support for systems with more than one CPU. If you have
- a system with only one CPU, like most personal computers, say N. If
- you have a system with more than one CPU, say Y.
+ a system with only one CPU, say N. If you have a system with more
+ than one CPU, say Y.
- If you say N here, the kernel will run on single and multiprocessor
+ If you say N here, the kernel will run on uni- and multiprocessor
machines, but will use only one CPU of a multiprocessor machine. If
you say Y here, the kernel will run on many, but not all,
- singleprocessor machines. On a singleprocessor machine, the kernel
+ uniprocessor machines. On a uniprocessor machine, the kernel
will run faster if you say N here.
People using multiprocessor machines who say Y here should also say
diff --git a/arch/m32r/Kconfig.debug b/arch/m32r/Kconfig.debug
index bb1afc1a31c..6c612b7691b 100644
--- a/arch/m32r/Kconfig.debug
+++ b/arch/m32r/Kconfig.debug
@@ -2,13 +2,6 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
-config DEBUG_STACKOVERFLOW
- bool "Check for stack overflows"
- depends on DEBUG_KERNEL
- help
- This option will cause messages to be printed if free stack space
- drops below a certain limit.
-
config DEBUG_PAGEALLOC
bool "Debug page memory allocations"
depends on DEBUG_KERNEL && BROKEN
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index bebdc36ebb0..67779a74b62 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -1,5 +1,9 @@
generic-y += clkdev.h
+generic-y += cputime.h
generic-y += exec.h
+generic-y += hash.h
+generic-y += mcs_spinlock.h
generic-y += module.h
+generic-y += preempt.h
generic-y += trace_clock.h
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index 0d81697c326..8ad0ed4182a 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -13,6 +13,7 @@
#include <asm/assembler.h>
#include <asm/cmpxchg.h>
#include <asm/dcache_clear.h>
+#include <asm/barrier.h>
/*
* Atomic operations that C can't guarantee us. Useful for
@@ -308,10 +309,4 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr)
local_irq_restore(flags);
}
-/* Atomic operations are already serializing on m32r */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
#endif /* _ASM_M32R_ATOMIC_H */
diff --git a/arch/m32r/include/asm/barrier.h b/arch/m32r/include/asm/barrier.h
index 6976621efd3..1a40265e8d8 100644
--- a/arch/m32r/include/asm/barrier.h
+++ b/arch/m32r/include/asm/barrier.h
@@ -11,84 +11,6 @@
#define nop() __asm__ __volatile__ ("nop" : : )
-/*
- * Memory barrier.
- *
- * mb() prevents loads and stores being reordered across this point.
- * rmb() prevents loads being reordered across this point.
- * wmb() prevents stores being reordered across this point.
- */
-#define mb() barrier()
-#define rmb() mb()
-#define wmb() mb()
-
-/**
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier. All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads. This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies. See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * b = 2;
- * memory_barrier();
- * p = &b; q = p;
- * read_barrier_depends();
- * d = *q;
- * </programlisting>
- *
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends(). However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * a = 2;
- * memory_barrier();
- * b = 3; y = b;
- * read_barrier_depends();
- * x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b". Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
- * in cases like this where there are no data dependencies.
- **/
-
-#define read_barrier_depends() do { } while (0)
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while (0)
-#define set_mb(var, value) do { var = value; barrier(); } while (0)
-#endif
+#include <asm-generic/barrier.h>
#endif /* _ASM_M32R_BARRIER_H */
diff --git a/arch/m32r/include/asm/bitops.h b/arch/m32r/include/asm/bitops.h
index d3dea9ac7d4..86ba2b42a6c 100644
--- a/arch/m32r/include/asm/bitops.h
+++ b/arch/m32r/include/asm/bitops.h
@@ -21,6 +21,7 @@
#include <asm/byteorder.h>
#include <asm/dcache_clear.h>
#include <asm/types.h>
+#include <asm/barrier.h>
/*
* These have to be done with inline assembly: that way the bit-setting
@@ -73,7 +74,7 @@ static __inline__ void set_bit(int nr, volatile void * addr)
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
* in order to ensure changes are visible on other processors.
*/
static __inline__ void clear_bit(int nr, volatile void * addr)
@@ -103,9 +104,6 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
local_irq_restore(flags);
}
-#define smp_mb__before_clear_bit() barrier()
-#define smp_mb__after_clear_bit() barrier()
-
/**
* change_bit - Toggle a bit in memory
* @nr: Bit to clear
diff --git a/arch/m32r/include/asm/cputime.h b/arch/m32r/include/asm/cputime.h
deleted file mode 100644
index 0a47550df2b..00000000000
--- a/arch/m32r/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __M32R_CPUTIME_H
-#define __M32R_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __M32R_CPUTIME_H */
diff --git a/arch/m32r/include/asm/elf.h b/arch/m32r/include/asm/elf.h
index 70896161c63..8acc9da9a15 100644
--- a/arch/m32r/include/asm/elf.h
+++ b/arch/m32r/include/asm/elf.h
@@ -128,7 +128,4 @@ typedef elf_fpreg_t elf_fpregset_t;
intent than poking at uname or /proc/cpuinfo. */
#define ELF_PLATFORM (NULL)
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
#endif /* _ASM_M32R__ELF_H */
diff --git a/arch/m32r/include/asm/hardirq.h b/arch/m32r/include/asm/hardirq.h
index 4c31c0ae215..5f2ac4f64dd 100644
--- a/arch/m32r/include/asm/hardirq.h
+++ b/arch/m32r/include/asm/hardirq.h
@@ -3,22 +3,6 @@
#define __ASM_HARDIRQ_H
#include <asm/irq.h>
-
-#if NR_IRQS > 256
-#define HARDIRQ_BITS 9
-#else
-#define HARDIRQ_BITS 8
-#endif
-
-/*
- * The hardirq mask has to be large enough to have
- * space for potentially all IRQ sources in the system
- * nesting on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
#include <asm-generic/hardirq.h>
#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h
index a979a419816..9fc78fc4444 100644
--- a/arch/m32r/include/asm/mmu_context.h
+++ b/arch/m32r/include/asm/mmu_context.h
@@ -45,7 +45,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
Flush all TLB and start new cycle. */
local_flush_tlb_all();
/* Fix version if needed.
- Note that we avoid version #0 to distingush NO_CONTEXT. */
+ Note that we avoid version #0 to distinguish NO_CONTEXT. */
if (!mc)
mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
}
diff --git a/arch/m32r/include/asm/pgalloc.h b/arch/m32r/include/asm/pgalloc.h
index 0fc73619897..2d55a064cca 100644
--- a/arch/m32r/include/asm/pgalloc.h
+++ b/arch/m32r/include/asm/pgalloc.h
@@ -43,7 +43,12 @@ static __inline__ pgtable_t pte_alloc_one(struct mm_struct *mm,
{
struct page *pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
- pgtable_page_ctor(pte);
+ if (!pte)
+ return NULL;
+ if (!pgtable_page_ctor(pte)) {
+ __free_page(pte);
+ return NULL;
+ }
return pte;
}
diff --git a/arch/m32r/include/asm/pgtable.h b/arch/m32r/include/asm/pgtable.h
index 8a28cfea272..103ce6710f0 100644
--- a/arch/m32r/include/asm/pgtable.h
+++ b/arch/m32r/include/asm/pgtable.h
@@ -347,9 +347,6 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define kern_addr_valid(addr) (1)
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
-
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
diff --git a/arch/m32r/include/asm/signal.h b/arch/m32r/include/asm/signal.h
index a5ba4a217fb..ed3ded6601e 100644
--- a/arch/m32r/include/asm/signal.h
+++ b/arch/m32r/include/asm/signal.h
@@ -16,16 +16,7 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
-struct sigaction {
- __sighandler_t sa_handler;
- unsigned long sa_flags;
- __sigrestore_t sa_restorer;
- sigset_t sa_mask; /* mask last for extensibility */
-};
-
-struct k_sigaction {
- struct sigaction sa;
-};
+#define __ARCH_HAS_SA_RESTORER
#include <asm/sigcontext.h>
#undef __HAVE_ARCH_SIG_BITOPS
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index c074f4c2e85..00171703402 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -53,8 +53,6 @@ struct thread_info {
#endif
-#define PREEMPT_ACTIVE 0x10000000
-
#define THREAD_SIZE (PAGE_SIZE << 1)
#define THREAD_SIZE_ORDER 1
/*
diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h
index 1c7047bea20..84fe7ba5303 100644
--- a/arch/m32r/include/asm/uaccess.h
+++ b/arch/m32r/include/asm/uaccess.h
@@ -216,7 +216,7 @@ extern int fixup_exception(struct pt_regs *regs);
({ \
long __gu_err = 0; \
unsigned long __gu_val; \
- might_sleep(); \
+ might_fault(); \
__get_user_size(__gu_val,(ptr),(size),__gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
@@ -227,7 +227,7 @@ extern int fixup_exception(struct pt_regs *regs);
long __gu_err = -EFAULT; \
unsigned long __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- might_sleep(); \
+ might_fault(); \
if (access_ok(VERIFY_READ,__gu_addr,size)) \
__get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
@@ -295,7 +295,7 @@ do { \
#define __put_user_nocheck(x,ptr,size) \
({ \
long __pu_err; \
- might_sleep(); \
+ might_fault(); \
__put_user_size((x),(ptr),(size),__pu_err); \
__pu_err; \
})
@@ -305,7 +305,7 @@ do { \
({ \
long __pu_err = -EFAULT; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- might_sleep(); \
+ might_fault(); \
if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
__put_user_size((x),__pu_addr,(size),__pu_err); \
__pu_err; \
@@ -597,7 +597,7 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
*/
#define copy_to_user(to,from,n) \
({ \
- might_sleep(); \
+ might_fault(); \
__generic_copy_to_user((to),(from),(n)); \
})
@@ -638,7 +638,7 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
*/
#define copy_from_user(to,from,n) \
({ \
- might_sleep(); \
+ might_fault(); \
__generic_copy_from_user((to),(from),(n)); \
})
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h
index 79b063caec8..59db8019345 100644
--- a/arch/m32r/include/asm/unistd.h
+++ b/arch/m32r/include/asm/unistd.h
@@ -20,8 +20,6 @@
#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/
#define __ARCH_WANT_SYS_OLDUMOUNT
-#define __ARCH_WANT_SYS_RT_SIGACTION
-#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
@@ -50,14 +48,4 @@
#define __IGNORE_getresgid
#define __IGNORE_chown
-/*
- * "Conditional" syscalls
- *
- * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
- * but it doesn't work on all toolchains, so we just do it by hand
- */
-#ifndef cond_syscall
-#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
-#endif
-
#endif /* _ASM_M32R_UNISTD_H */
diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h
index 5e7088a2672..6c9a24b3aef 100644
--- a/arch/m32r/include/uapi/asm/socket.h
+++ b/arch/m32r/include/uapi/asm/socket.h
@@ -22,7 +22,7 @@
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
+#define SO_REUSEPORT 15
#define SO_PASSCRED 16
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
@@ -70,4 +70,14 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#define SO_LOCK_FILTER 44
+
+#define SO_SELECT_ERR_QUEUE 45
+
+#define SO_BUSY_POLL 46
+
+#define SO_MAX_PACING_RATE 47
+
+#define SO_BPF_EXTENSIONS 48
+
#endif /* _ASM_M32R_SOCKET_H */
diff --git a/arch/m32r/include/uapi/asm/stat.h b/arch/m32r/include/uapi/asm/stat.h
index da4518f82d6..98470fe483b 100644
--- a/arch/m32r/include/uapi/asm/stat.h
+++ b/arch/m32r/include/uapi/asm/stat.h
@@ -63,10 +63,10 @@ struct stat64 {
long long st_size;
unsigned long st_blksize;
-#if defined(__BIG_ENDIAN)
+#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
unsigned long __pad4; /* future possible st_blocks high bits */
unsigned long st_blocks; /* Number 512-byte blocks allocated. */
-#elif defined(__LITTLE_ENDIAN)
+#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
unsigned long st_blocks; /* Number 512-byte blocks allocated. */
unsigned long __pad4; /* future possible st_blocks high bits */
#else
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
index 0c01543f10c..7c3db9940ce 100644
--- a/arch/m32r/kernel/entry.S
+++ b/arch/m32r/kernel/entry.S
@@ -182,13 +182,7 @@ need_resched:
ld r4, PSW(sp) ; interrupts off (exception path) ?
and3 r4, r4, #0x4000
beqz r4, restore_all
- LDIMM (r4, PREEMPT_ACTIVE)
- st r4, @(TI_PRE_COUNT, r8)
- ENABLE_INTERRUPTS(r4)
- bl schedule
- ldi r4, #0
- st r4, @(TI_PRE_COUNT, r8)
- DISABLE_INTERRUPTS(r4)
+ bl preempt_schedule_irq
bra need_resched
#endif
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index 765d0f57c78..e69221d581d 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -44,59 +44,9 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return tsk->thread.lr;
}
-/*
- * Powermanagement idle function, if any..
- */
-static void (*pm_idle)(void) = NULL;
-
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
-/*
- * We use this is we don't have any better
- * idle routine..
- */
-static void default_idle(void)
-{
- /* M32R_FIXME: Please use "cpu_sleep" mode. */
- cpu_relax();
-}
-
-/*
- * On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->work.need_resched flag instead of waiting for the
- * cross-CPU IPI to arrive. Use this option with caution.
- */
-static void poll_idle (void)
-{
- /* M32R_FIXME */
- cpu_relax();
-}
-
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle (void)
-{
- /* endless idle loop with no priority at all */
- while (1) {
- rcu_idle_enter();
- while (!need_resched()) {
- void (*idle)(void) = pm_idle;
-
- if (!idle)
- idle = default_idle;
-
- idle();
- }
- rcu_idle_exit();
- schedule_preempt_disabled();
- }
-}
-
void machine_restart(char *__unused)
{
#if defined(CONFIG_PLAT_MAPPI3)
@@ -120,24 +70,11 @@ void machine_power_off(void)
/* M32R_FIXME */
}
-static int __init idle_setup (char *str)
-{
- if (!strncmp(str, "poll", 4)) {
- printk("using poll in idle threads.\n");
- pm_idle = poll_idle;
- } else if (!strncmp(str, "sleep", 4)) {
- printk("using sleep in idle threads.\n");
- pm_idle = default_idle;
- }
-
- return 1;
-}
-
-__setup("idle=", idle_setup);
-
void show_regs(struct pt_regs * regs)
{
printk("\n");
+ show_regs_print_info(KERN_DEFAULT);
+
printk("BPC[%08lx]:PSW[%08lx]:LR [%08lx]:FP [%08lx]\n", \
regs->bpc, regs->psw, regs->lr, regs->fp);
printk("BBPC[%08lx]:BBPSW[%08lx]:SPU[%08lx]:SPI[%08lx]\n", \
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index 6e3c26a1607..d503568cb75 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -27,15 +27,6 @@
#define DEBUG_SIG 0
-asmlinkage int
-sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
- unsigned long r2, unsigned long r3, unsigned long r4,
- unsigned long r5, unsigned long r6, struct pt_regs *regs)
-{
- return do_sigaltstack(uss, uoss, regs->spu);
-}
-
-
/*
* Do a signal return; undo the signal stack.
*/
@@ -113,7 +104,7 @@ sys_rt_sigreturn(unsigned long r0, unsigned long r1,
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &result))
goto badframe;
- if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->spu) == -EFAULT)
+ if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
return result;
@@ -213,10 +204,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
- err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(regs->spu),
- &frame->uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->spu);
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 13168a769f8..bb21f4f6317 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -343,7 +343,7 @@ static void __init do_boot_cpu(int phys_id)
}
}
-int __cpuinit __cpu_up(unsigned int cpu_id, struct task_struct *tidle)
+int __cpu_up(unsigned int cpu_id, struct task_struct *tidle)
{
int timeout;
@@ -432,7 +432,7 @@ int __init start_secondary(void *unused)
*/
local_flush_tlb_all();
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
return 0;
}
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index 84dd04048db..1a15f81ea1b 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -57,7 +57,7 @@ extern void smp_local_timer_interrupt(void);
static unsigned long latch;
-u32 arch_gettimeoffset(void)
+static u32 m32r_gettimeoffset(void)
{
unsigned long elapsed_time = 0; /* [us] */
@@ -165,6 +165,8 @@ void read_persistent_clock(struct timespec *ts)
void __init time_init(void)
{
+ arch_gettimeoffset = m32r_gettimeoffset;
+
#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \
|| defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \
|| defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index 3bcb207e5b6..a7a424f852e 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -132,10 +132,8 @@ static void show_trace(struct task_struct *task, unsigned long *stack)
printk("Call Trace: ");
while (!kstack_end(stack)) {
addr = *stack++;
- if (__kernel_text_address(addr)) {
- printk("[<%08lx>] ", addr);
- print_symbol("%s\n", addr);
- }
+ if (__kernel_text_address(addr))
+ printk("[<%08lx>] %pSR\n", addr, (void *)addr);
}
printk("\n");
}
@@ -169,15 +167,6 @@ void show_stack(struct task_struct *task, unsigned long *sp)
show_trace(task, sp);
}
-void dump_stack(void)
-{
- unsigned long stack;
-
- show_trace(current, &stack);
-}
-
-EXPORT_SYMBOL(dump_stack);
-
static void show_registers(struct pt_regs *regs)
{
int i = 0;
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c
index 2c468e8b585..27196303ce3 100644
--- a/arch/m32r/mm/discontig.c
+++ b/arch/m32r/mm/discontig.c
@@ -129,11 +129,10 @@ unsigned long __init setup_memory(void)
#define START_PFN(nid) (NODE_DATA(nid)->bdata->node_min_pfn)
#define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn)
-unsigned long __init zone_sizes_init(void)
+void __init zone_sizes_init(void)
{
unsigned long zones_size[MAX_NR_ZONES], zholes_size[MAX_NR_ZONES];
unsigned long low, start_pfn;
- unsigned long holes = 0;
int nid, i;
mem_prof_t *mp;
@@ -147,7 +146,6 @@ unsigned long __init zone_sizes_init(void)
low = MAX_LOW_PFN(nid);
zones_size[ZONE_DMA] = low - start_pfn;
zholes_size[ZONE_DMA] = mp->holes;
- holes += zholes_size[ZONE_DMA];
node_set_state(nid, N_NORMAL_MEMORY);
free_area_init_node(nid, zones_size, start_pfn, zholes_size);
@@ -161,6 +159,4 @@ unsigned long __init zone_sizes_init(void)
NODE_DATA(1)->node_zones->watermark[WMARK_MIN] = 0;
NODE_DATA(1)->node_zones->watermark[WMARK_LOW] = 0;
NODE_DATA(1)->node_zones->watermark[WMARK_HIGH] = 0;
-
- return holes;
}
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index 3cdfa9c1d09..e9c6a8014bd 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
struct mm_struct *mm;
struct vm_area_struct * vma;
unsigned long page, addr;
- int write;
+ unsigned long flags = 0;
int fault;
siginfo_t info;
@@ -117,6 +117,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
if (in_atomic() || !mm)
goto bad_area_nosemaphore;
+ if (error_code & ACE_USERMODE)
+ flags |= FAULT_FLAG_USER;
+
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
@@ -166,14 +169,13 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
*/
good_area:
info.si_code = SEGV_ACCERR;
- write = 0;
switch (error_code & (ACE_WRITE|ACE_PROTECTION)) {
default: /* 3: write, present */
/* fall through */
case ACE_WRITE: /* write, not present */
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
- write++;
+ flags |= FAULT_FLAG_WRITE;
break;
case ACE_PROTECTION: /* read, present */
case 0: /* read, not present */
@@ -194,7 +196,7 @@ good_area:
*/
addr = (address & PAGE_MASK);
set_thread_fault_code(error_code);
- fault = handle_mm_fault(mm, vma, addr, write ? FAULT_FLAG_WRITE : 0);
+ fault = handle_mm_fault(mm, vma, addr, flags);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index 78b660e903d..0d4146f644d 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -28,10 +28,7 @@
#include <asm/mmu_context.h>
#include <asm/setup.h>
#include <asm/tlb.h>
-
-/* References to section boundaries */
-extern char _text, _etext, _edata;
-extern char __init_begin, __init_end;
+#include <asm/sections.h>
pgd_t swapper_pg_dir[1024];
@@ -43,7 +40,6 @@ unsigned long mmu_context_cache_dat;
#else
unsigned long mmu_context_cache_dat[NR_CPUS];
#endif
-static unsigned long hole_pages;
/*
* function prototype
@@ -60,7 +56,7 @@ void free_initrd_mem(unsigned long, unsigned long);
#define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn)
#ifndef CONFIG_DISCONTIGMEM
-unsigned long __init zone_sizes_init(void)
+void __init zone_sizes_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = {0, };
unsigned long max_dma;
@@ -86,11 +82,9 @@ unsigned long __init zone_sizes_init(void)
#endif /* CONFIG_MMU */
free_area_init_node(0, zones_size, start_pfn, 0);
-
- return 0;
}
#else /* CONFIG_DISCONTIGMEM */
-extern unsigned long zone_sizes_init(void);
+extern void zone_sizes_init(void);
#endif /* CONFIG_DISCONTIGMEM */
/*======================================================================*
@@ -108,24 +102,7 @@ void __init paging_init(void)
for (i = 0 ; i < USER_PTRS_PER_PGD * 2 ; i++)
pgd_val(pg_dir[i]) = 0;
#endif /* CONFIG_MMU */
- hole_pages = zone_sizes_init();
-}
-
-int __init reservedpages_count(void)
-{
- int reservedpages, nid, i;
-
- reservedpages = 0;
- for_each_online_node(nid) {
- unsigned long flags;
- pgdat_resize_lock(NODE_DATA(nid), &flags);
- for (i = 0 ; i < MAX_LOW_PFN(nid) - START_PFN(nid) ; i++)
- if (PageReserved(nid_page_nr(nid, i)))
- reservedpages++;
- pgdat_resize_unlock(NODE_DATA(nid), &flags);
- }
-
- return reservedpages;
+ zone_sizes_init();
}
/*======================================================================*
@@ -134,48 +111,20 @@ int __init reservedpages_count(void)
*======================================================================*/
void __init mem_init(void)
{
- int codesize, reservedpages, datasize, initsize;
- int nid;
#ifndef CONFIG_MMU
extern unsigned long memory_end;
-#endif
-
- num_physpages = 0;
- for_each_online_node(nid)
- num_physpages += MAX_LOW_PFN(nid) - START_PFN(nid) + 1;
-
- num_physpages -= hole_pages;
-#ifndef CONFIG_DISCONTIGMEM
- max_mapnr = num_physpages;
-#endif /* CONFIG_DISCONTIGMEM */
-
-#ifdef CONFIG_MMU
- high_memory = (void *)__va(PFN_PHYS(MAX_LOW_PFN(0)));
-#else
high_memory = (void *)(memory_end & PAGE_MASK);
+#else
+ high_memory = (void *)__va(PFN_PHYS(MAX_LOW_PFN(0)));
#endif /* CONFIG_MMU */
/* clear the zero-page */
memset(empty_zero_page, 0, PAGE_SIZE);
- /* this will put all low memory onto the freelists */
- for_each_online_node(nid)
- totalram_pages += free_all_bootmem_node(NODE_DATA(nid));
-
- reservedpages = reservedpages_count() - hole_pages;
- codesize = (unsigned long) &_etext - (unsigned long)&_text;
- datasize = (unsigned long) &_edata - (unsigned long)&_etext;
- initsize = (unsigned long) &__init_end - (unsigned long)&__init_begin;
-
- printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
- "%dk reserved, %dk data, %dk init)\n",
- nr_free_pages() << (PAGE_SHIFT-10),
- num_physpages << (PAGE_SHIFT-10),
- codesize >> 10,
- reservedpages << (PAGE_SHIFT-10),
- datasize >> 10,
- initsize >> 10);
+ set_max_mapnr(get_num_physpages());
+ free_all_bootmem();
+ mem_init_print_info(NULL);
}
/*======================================================================*
@@ -184,17 +133,7 @@ void __init mem_init(void)
*======================================================================*/
void free_initmem(void)
{
- unsigned long addr;
-
- addr = (unsigned long)(&__init_begin);
- for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
- printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", \
- (int)(&__init_end - &__init_begin) >> 10);
+ free_initmem_default(-1);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -204,13 +143,6 @@ void free_initmem(void)
*======================================================================*/
void free_initrd_mem(unsigned long start, unsigned long end)
{
- unsigned long p;
- for (p = start; p < end; p += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(p));
- init_page_count(virt_to_page(p));
- free_page(p);
- totalram_pages++;
- }
- printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+ free_reserved_area((void *)start, (void *)end, -1, "initrd");
}
#endif