aboutsummaryrefslogtreecommitdiff
path: root/arch/m32r/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m32r/kernel')
-rw-r--r--arch/m32r/kernel/.gitignore1
-rw-r--r--arch/m32r/kernel/Makefile6
-rw-r--r--arch/m32r/kernel/entry.S31
-rw-r--r--arch/m32r/kernel/head.S9
-rw-r--r--arch/m32r/kernel/init_task.c41
-rw-r--r--arch/m32r/kernel/irq.c51
-rw-r--r--arch/m32r/kernel/m32r_ksyms.c17
-rw-r--r--arch/m32r/kernel/module.c55
-rw-r--r--arch/m32r/kernel/process.c230
-rw-r--r--arch/m32r/kernel/ptrace.c119
-rw-r--r--arch/m32r/kernel/semaphore.c185
-rw-r--r--arch/m32r/kernel/signal.c140
-rw-r--r--arch/m32r/kernel/smp.c231
-rw-r--r--arch/m32r/kernel/smpboot.c71
-rw-r--r--arch/m32r/kernel/sys_m32r.c155
-rw-r--r--arch/m32r/kernel/syscall_table.S5
-rw-r--r--arch/m32r/kernel/time.c147
-rw-r--r--arch/m32r/kernel/traps.c31
-rw-r--r--arch/m32r/kernel/vmlinux.lds.S93
19 files changed, 278 insertions, 1340 deletions
diff --git a/arch/m32r/kernel/.gitignore b/arch/m32r/kernel/.gitignore
new file mode 100644
index 00000000000..c5f676c3c22
--- /dev/null
+++ b/arch/m32r/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/m32r/kernel/Makefile b/arch/m32r/kernel/Makefile
index e97e26e87c9..0c09dad8b1f 100644
--- a/arch/m32r/kernel/Makefile
+++ b/arch/m32r/kernel/Makefile
@@ -2,12 +2,10 @@
# Makefile for the Linux/M32R kernel.
#
-extra-y := head.o init_task.o vmlinux.lds
+extra-y := head.o vmlinux.lds
obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \
- m32r_ksyms.o sys_m32r.o semaphore.o signal.o ptrace.o
+ m32r_ksyms.o sys_m32r.o signal.o ptrace.o
obj-$(CONFIG_SMP) += smp.o smpboot.o
obj-$(CONFIG_MODULES) += module.o
-
-EXTRA_AFLAGS := -traditional
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
index d4eaa2fd181..7c3db9940ce 100644
--- a/arch/m32r/kernel/entry.S
+++ b/arch/m32r/kernel/entry.S
@@ -118,6 +118,22 @@
#define resume_kernel restore_all
#endif
+/* how to get the thread information struct from ASM */
+#define GET_THREAD_INFO(reg) GET_THREAD_INFO reg
+ .macro GET_THREAD_INFO reg
+ ldi \reg, #-THREAD_SIZE
+ and \reg, sp
+ .endm
+
+ENTRY(ret_from_kernel_thread)
+ pop r0
+ bl schedule_tail
+ GET_THREAD_INFO(r8)
+ ld r0, R0(r8)
+ ld r1, R1(r8)
+ jl r1
+ bra syscall_exit
+
ENTRY(ret_from_fork)
pop r0
bl schedule_tail
@@ -143,7 +159,7 @@ ret_from_intr:
and3 r4, r4, #0x8000 ; check BSM bit
#endif
beqz r4, resume_kernel
-ENTRY(resume_userspace)
+resume_userspace:
DISABLE_INTERRUPTS(r4) ; make sure we don't miss an interrupt
; setting need_resched or sigpending
; between sampling and the iret
@@ -166,13 +182,7 @@ need_resched:
ld r4, PSW(sp) ; interrupts off (exception path) ?
and3 r4, r4, #0x4000
beqz r4, restore_all
- LDIMM (r4, PREEMPT_ACTIVE)
- st r4, @(TI_PRE_COUNT, r8)
- ENABLE_INTERRUPTS(r4)
- bl schedule
- ldi r4, #0
- st r4, @(TI_PRE_COUNT, r8)
- DISABLE_INTERRUPTS(r4)
+ bl preempt_schedule_irq
bra need_resched
#endif
@@ -228,10 +238,9 @@ work_resched:
work_notifysig: ; deal with pending signals and
; notify-resume requests
mv r0, sp ; arg1 : struct pt_regs *regs
- ldi r1, #0 ; arg2 : sigset_t *oldset
- mv r2, r9 ; arg3 : __u32 thread_info_flags
+ mv r1, r9 ; arg2 : __u32 thread_info_flags
bl do_notify_resume
- bra restore_all
+ bra resume_userspace
; perform syscall exit tracing
ALIGN
diff --git a/arch/m32r/kernel/head.S b/arch/m32r/kernel/head.S
index dab7436d7bb..a46652dd83e 100644
--- a/arch/m32r/kernel/head.S
+++ b/arch/m32r/kernel/head.S
@@ -23,13 +23,12 @@ __INITDATA
/*
* References to members of the boot_cpu_data structure.
*/
- .text
+__HEAD
.global start_kernel
.global __bss_start
.global _end
ENTRY(stext)
ENTRY(_stext)
-ENTRY(startup_32)
/* Setup up the stack pointer */
LDIMM (r0, spi_stack_top)
LDIMM (r1, spu_stack_top)
@@ -134,7 +133,6 @@ loop1:
/*
* AP startup routine
*/
- .text
.global eit_vector
ENTRY(startup_AP)
;; setup EVB
@@ -231,6 +229,7 @@ ENTRY(startup_AP)
nop
#endif /* CONFIG_SMP */
+ .text
ENTRY(stack_start)
.long init_thread_union+8192
.long __KERNEL_DS
@@ -269,13 +268,13 @@ ENTRY(empty_zero_page)
/*------------------------------------------------------------------------
* Stack area
*/
- .section .spi
+ .section .init.data, "aw"
ALIGN
.global spi_stack_top
.zero 1024
spi_stack_top:
- .section .spu
+ .section .init.data, "aw"
ALIGN
.global spu_stack_top
.zero 1024
diff --git a/arch/m32r/kernel/init_task.c b/arch/m32r/kernel/init_task.c
deleted file mode 100644
index 9e508fd9d97..00000000000
--- a/arch/m32r/kernel/init_task.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/* orig : i386 init_task.c */
-
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/init_task.h>
-#include <linux/fs.h>
-#include <linux/mqueue.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-
-static struct fs_struct init_fs = INIT_FS;
-static struct files_struct init_files = INIT_FILES;
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
-/*
- * Initial thread structure.
- *
- * We need to make sure that this is 8192-byte aligned due to the
- * way process stacks are handled. This is done by having a special
- * "init_task" linker map entry..
- */
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
- { INIT_THREAD_INFO(init_task) };
-
-/*
- * Initial task structure.
- *
- * All other task structs will be allocated on slabs in fork.c
- */
-struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_task);
-
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
index d0c5b0b7da2..c7272b89428 100644
--- a/arch/m32r/kernel/irq.c
+++ b/arch/m32r/kernel/irq.c
@@ -18,58 +18,9 @@
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
-#include <linux/seq_file.h>
#include <linux/module.h>
#include <asm/uaccess.h>
-atomic_t irq_err_count;
-atomic_t irq_mis_count;
-
-/*
- * Generic, controller-independent functions:
- */
-
-int show_interrupts(struct seq_file *p, void *v)
-{
- int i = *(loff_t *) v, j;
- struct irqaction * action;
- unsigned long flags;
-
- if (i == 0) {
- seq_printf(p, " ");
- for_each_online_cpu(j)
- seq_printf(p, "CPU%d ",j);
- seq_putc(p, '\n');
- }
-
- if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
- action = irq_desc[i].action;
- if (!action)
- goto skip;
- seq_printf(p, "%3d: ",i);
-#ifndef CONFIG_SMP
- seq_printf(p, "%10u ", kstat_irqs(i));
-#else
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-#endif
- seq_printf(p, " %14s", irq_desc[i].chip->typename);
- seq_printf(p, " %s", action->name);
-
- for (action=action->next; action; action = action->next)
- seq_printf(p, ", %s", action->name);
-
- seq_putc(p, '\n');
-skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
- } else if (i == NR_IRQS) {
- seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
- seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
- }
- return 0;
-}
-
/*
* do_IRQ handles all normal device IRQs (the special
* SMP cross-CPU interrupts have their own specific
@@ -84,7 +35,7 @@ asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs)
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* FIXME M32R */
#endif
- __do_IRQ(irq);
+ generic_handle_irq(irq);
irq_exit();
set_irq_regs(old_regs);
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c
index 41a4c95e06d..b727e693c80 100644
--- a/arch/m32r/kernel/m32r_ksyms.c
+++ b/arch/m32r/kernel/m32r_ksyms.c
@@ -7,7 +7,6 @@
#include <linux/interrupt.h>
#include <linux/string.h>
-#include <asm/semaphore.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
@@ -15,23 +14,13 @@
#include <asm/delay.h>
#include <asm/irq.h>
#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
/* platform dependent support */
EXPORT_SYMBOL(boot_cpu_data);
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
-EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_interruptible);
-EXPORT_SYMBOL(__up);
-EXPORT_SYMBOL(__down_trylock);
-
-/* Networking helper routines. */
-/* Delay loops */
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__delay);
-EXPORT_SYMBOL(__const_udelay);
EXPORT_SYMBOL(strncpy_from_user);
EXPORT_SYMBOL(__strncpy_from_user);
@@ -48,9 +37,6 @@ EXPORT_SYMBOL(dcache_dummy);
#endif
EXPORT_SYMBOL(cpu_data);
-/* Global SMP stuff */
-EXPORT_SYMBOL(smp_call_function);
-
/* TLB flushing */
EXPORT_SYMBOL(smp_flush_tlb_page);
#endif
@@ -73,6 +59,7 @@ EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(_inb);
EXPORT_SYMBOL(_inw);
diff --git a/arch/m32r/kernel/module.c b/arch/m32r/kernel/module.c
index 8d420579438..38233b6596b 100644
--- a/arch/m32r/kernel/module.c
+++ b/arch/m32r/kernel/module.c
@@ -28,35 +28,6 @@
#define DEBUGP(fmt...)
#endif
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
-#ifdef CONFIG_MMU
- return vmalloc_exec(size);
-#else
- return vmalloc(size);
-#endif
-}
-
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
- /* FIXME: If module_region == mod->init_region, trim exception
- table entries. */
-}
-
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
#define COPY_UNALIGNED_WORD(sw, tw, align) \
{ \
void *__s = &(sw), *__t = &(tw); \
@@ -230,29 +201,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
}
return 0;
}
-
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
-#if 0
- printk(KERN_ERR "module %s: REL RELOCATION unsupported\n",
- me->name);
- return -ENOEXEC;
-#endif
- return 0;
-
-}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index a689e2978b6..e69221d581d 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -11,7 +11,7 @@
#undef DEBUG_PROCESS
#ifdef DEBUG_PROCESS
#define DPRINTK(fmt, args...) printk("%s:%d:%s: " fmt, __FILE__, __LINE__, \
- __FUNCTION__, ##args)
+ __func__, ##args)
#else
#define DPRINTK(fmt, args...)
#endif
@@ -21,11 +21,12 @@
*/
#include <linux/fs.h>
+#include <linux/slab.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
-#include <linux/slab.h>
#include <linux/hardirq.h>
+#include <linux/rcupdate.h>
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -35,8 +36,6 @@
#include <linux/err.h>
-static int hlt_counter=0;
-
/*
* Return saved PC of a blocked thread.
*/
@@ -45,74 +44,9 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return tsk->thread.lr;
}
-/*
- * Powermanagement idle function, if any..
- */
-void (*pm_idle)(void) = NULL;
-EXPORT_SYMBOL(pm_idle);
-
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
-void disable_hlt(void)
-{
- hlt_counter++;
-}
-
-EXPORT_SYMBOL(disable_hlt);
-
-void enable_hlt(void)
-{
- hlt_counter--;
-}
-
-EXPORT_SYMBOL(enable_hlt);
-
-/*
- * We use this is we don't have any better
- * idle routine..
- */
-void default_idle(void)
-{
- /* M32R_FIXME: Please use "cpu_sleep" mode. */
- cpu_relax();
-}
-
-/*
- * On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->work.need_resched flag instead of waiting for the
- * cross-CPU IPI to arrive. Use this option with caution.
- */
-static void poll_idle (void)
-{
- /* M32R_FIXME */
- cpu_relax();
-}
-
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle (void)
-{
- /* endless idle loop with no priority at all */
- while (1) {
- while (!need_resched()) {
- void (*idle)(void) = pm_idle;
-
- if (!idle)
- idle = default_idle;
-
- idle();
- }
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- }
-}
-
void machine_restart(char *__unused)
{
#if defined(CONFIG_PLAT_MAPPI3)
@@ -136,24 +70,11 @@ void machine_power_off(void)
/* M32R_FIXME */
}
-static int __init idle_setup (char *str)
-{
- if (!strncmp(str, "poll", 4)) {
- printk("using poll in idle threads.\n");
- pm_idle = poll_idle;
- } else if (!strncmp(str, "sleep", 4)) {
- printk("using sleep in idle threads.\n");
- pm_idle = default_idle;
- }
-
- return 1;
-}
-
-__setup("idle=", idle_setup);
-
void show_regs(struct pt_regs * regs)
{
printk("\n");
+ show_regs_print_info(KERN_DEFAULT);
+
printk("BPC[%08lx]:PSW[%08lx]:LR [%08lx]:FP [%08lx]\n", \
regs->bpc, regs->psw, regs->lr, regs->fp);
printk("BBPC[%08lx]:BBPSW[%08lx]:SPU[%08lx]:SPI[%08lx]\n", \
@@ -181,41 +102,6 @@ void show_regs(struct pt_regs * regs)
}
/*
- * Create a kernel thread
- */
-
-/*
- * This is the mechanism for creating a new kernel thread.
- *
- * NOTE! Only a kernel-only process(ie the swapper or direct descendants
- * who haven't done an "execve()") should use this: it will work within
- * a system call from a "real" process, but the process memory space will
- * not be free'd until both the parent and the child have exited.
- */
-static void kernel_thread_helper(void *nouse, int (*fn)(void *), void *arg)
-{
- fn(arg);
- do_exit(-1);
-}
-
-int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
-{
- struct pt_regs regs;
-
- memset(&regs, 0, sizeof (regs));
- regs.r1 = (unsigned long)fn;
- regs.r2 = (unsigned long)arg;
-
- regs.bpc = (unsigned long)kernel_thread_helper;
-
- regs.psw = M32R_PSW_BIE;
-
- /* Ok, create the new process. */
- return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL,
- NULL);
-}
-
-/*
* Free current thread data structures etc..
*/
void exit_thread(void)
@@ -242,103 +128,33 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
return 0; /* Task didn't use the fpu at all. */
}
-int copy_thread(int nr, unsigned long clone_flags, unsigned long spu,
- unsigned long unused, struct task_struct *tsk, struct pt_regs *regs)
+int copy_thread(unsigned long clone_flags, unsigned long spu,
+ unsigned long arg, struct task_struct *tsk)
{
struct pt_regs *childregs = task_pt_regs(tsk);
extern void ret_from_fork(void);
-
- /* Copy registers */
- *childregs = *regs;
-
- childregs->spu = spu;
- childregs->r0 = 0; /* Child gets zero as return value */
- regs->r0 = tsk->pid;
+ extern void ret_from_kernel_thread(void);
+
+ if (unlikely(tsk->flags & PF_KTHREAD)) {
+ memset(childregs, 0, sizeof(struct pt_regs));
+ childregs->psw = M32R_PSW_BIE;
+ childregs->r1 = spu; /* fn */
+ childregs->r0 = arg;
+ tsk->thread.lr = (unsigned long)ret_from_kernel_thread;
+ } else {
+ /* Copy registers */
+ *childregs = *current_pt_regs();
+ if (spu)
+ childregs->spu = spu;
+ childregs->r0 = 0; /* Child gets zero as return value */
+ tsk->thread.lr = (unsigned long)ret_from_fork;
+ }
tsk->thread.sp = (unsigned long)childregs;
- tsk->thread.lr = (unsigned long)ret_from_fork;
return 0;
}
/*
- * Capture the user space registers if the task is not running (in user space)
- */
-int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
-{
- /* M32R_FIXME */
- return 1;
-}
-
-asmlinkage int sys_fork(unsigned long r0, unsigned long r1, unsigned long r2,
- unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6,
- struct pt_regs regs)
-{
-#ifdef CONFIG_MMU
- return do_fork(SIGCHLD, regs.spu, &regs, 0, NULL, NULL);
-#else
- return -EINVAL;
-#endif /* CONFIG_MMU */
-}
-
-asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
- unsigned long parent_tidptr,
- unsigned long child_tidptr,
- unsigned long r4, unsigned long r5, unsigned long r6,
- struct pt_regs regs)
-{
- if (!newsp)
- newsp = regs.spu;
-
- return do_fork(clone_flags, newsp, &regs, 0,
- (int __user *)parent_tidptr, (int __user *)child_tidptr);
-}
-
-/*
- * This is trivial, and on the face of it looks like it
- * could equally well be done in user mode.
- *
- * Not so, for quite unobvious reasons - register pressure.
- * In user mode vfork() cannot have a stack frame, and if
- * done by calling the "clone()" system call directly, you
- * do not have enough call-clobbered registers to hold all
- * the information you need.
- */
-asmlinkage int sys_vfork(unsigned long r0, unsigned long r1, unsigned long r2,
- unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6,
- struct pt_regs regs)
-{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.spu, &regs, 0,
- NULL, NULL);
-}
-
-/*
- * sys_execve() executes a new program.
- */
-asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv,
- char __user * __user *uenvp,
- unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, struct pt_regs regs)
-{
- int error;
- char *filename;
-
- filename = getname(ufilename);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
-
- error = do_execve(filename, uargv, uenvp, &regs);
- if (error == 0) {
- task_lock(current);
- current->ptrace &= ~PT_DTRACE;
- task_unlock(current);
- }
- putname(filename);
-out:
- return error;
-}
-
-/*
* These bracket the sleeping functions..
*/
#define first_sched ((unsigned long) scheduling_functions_start_here)
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 9aa615d3a5b..51f5e9aa490 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -19,7 +19,6 @@
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
@@ -30,7 +29,6 @@
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
-#include <asm/system.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
@@ -78,7 +76,7 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
struct user * dummy = NULL;
#endif
- if ((off & 3) || (off < 0) || (off > sizeof(struct user) - 3))
+ if ((off & 3) || off > sizeof(struct user) - 3)
return -EIO;
off >>= 2;
@@ -140,8 +138,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
struct user * dummy = NULL;
#endif
- if ((off & 3) || off < 0 ||
- off > sizeof(struct user) - 3)
+ if ((off & 3) || off > sizeof(struct user) - 3)
return -EIO;
off >>= 2;
@@ -582,6 +579,35 @@ init_debug_traps(struct task_struct *child)
}
}
+void user_enable_single_step(struct task_struct *child)
+{
+ unsigned long next_pc;
+ unsigned long pc, insn;
+
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+
+ /* Compute next pc. */
+ pc = get_stack_long(child, PT_BPC);
+
+ if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
+ != sizeof(insn))
+ return;
+
+ compute_next_pc(insn, pc, &next_pc, child);
+ if (next_pc & 0x80000000)
+ return;
+
+ if (embed_debug_trap(child, next_pc))
+ return;
+
+ invalidate_cache();
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+ unregister_all_debug_traps(child);
+ invalidate_cache();
+}
/*
* Called by kernel/ptrace.c when detaching..
@@ -594,9 +620,11 @@ void ptrace_disable(struct task_struct *child)
}
long
-arch_ptrace(struct task_struct *child, long request, long addr, long data)
+arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/*
@@ -611,8 +639,7 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
* read the word at location addr in the USER area.
*/
case PTRACE_PEEKUSR:
- ret = ptrace_read_user(child, addr,
- (unsigned long __user *)data);
+ ret = ptrace_read_user(child, addr, datap);
break;
/*
@@ -632,84 +659,12 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = ptrace_write_user(child, addr, data);
break;
- /*
- * continue/restart and stop at next (return from) syscall
- */
- case PTRACE_SYSCALL:
- case PTRACE_CONT:
- ret = -EIO;
- if (!valid_signal(data))
- break;
- if (request == PTRACE_SYSCALL)
- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- else
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- child->exit_code = data;
- wake_up_process(child);
- ret = 0;
- break;
-
- /*
- * make the child exit. Best I can do is send it a sigkill.
- * perhaps it should be put in the status that it wants to
- * exit.
- */
- case PTRACE_KILL: {
- ret = 0;
- unregister_all_debug_traps(child);
- invalidate_cache();
- if (child->exit_state == EXIT_ZOMBIE) /* already dead */
- break;
- child->exit_code = SIGKILL;
- wake_up_process(child);
- break;
- }
-
- /*
- * execute single instruction.
- */
- case PTRACE_SINGLESTEP: {
- unsigned long next_pc;
- unsigned long pc, insn;
-
- ret = -EIO;
- if (!valid_signal(data))
- break;
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- if ((child->ptrace & PT_DTRACE) == 0) {
- /* Spurious delayed TF traps may occur */
- child->ptrace |= PT_DTRACE;
- }
-
- /* Compute next pc. */
- pc = get_stack_long(child, PT_BPC);
-
- if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
- != sizeof(insn))
- break;
-
- compute_next_pc(insn, pc, &next_pc, child);
- if (next_pc & 0x80000000)
- break;
-
- if (embed_debug_trap(child, next_pc))
- break;
-
- invalidate_cache();
- child->exit_code = data;
-
- /* give it a chance to run. */
- wake_up_process(child);
- ret = 0;
- break;
- }
-
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (void __user *)data);
+ ret = ptrace_getregs(child, datap);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (void __user *)data);
+ ret = ptrace_setregs(child, datap);
break;
default:
diff --git a/arch/m32r/kernel/semaphore.c b/arch/m32r/kernel/semaphore.c
deleted file mode 100644
index 940c2d37cfd..00000000000
--- a/arch/m32r/kernel/semaphore.c
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * linux/arch/m32r/semaphore.c
- * orig : i386 2.6.4
- *
- * M32R semaphore implementation.
- *
- * Copyright (c) 2002 - 2004 Hitoshi Yamamoto
- */
-
-/*
- * i386 semaphore implementation.
- *
- * (C) Copyright 1999 Linus Torvalds
- *
- * Portions Copyright 1999 Red Hat, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
- */
-#include <linux/sched.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <asm/semaphore.h>
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to acquire the semaphore, while the "sleeping"
- * variable is a count of such acquires.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * "sleeping" and the contention routine ordering is protected
- * by the spinlock in the semaphore's waitqueue head.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-
-/*
- * Logic:
- * - only on a boundary condition do we need to care. When we go
- * from a negative count to a non-negative, we wake people up.
- * - when we go from a non-negative count to a negative do we
- * (a) synchronize with the "sleeper" count and (b) make sure
- * that we're on the wakeup list before we synchronize so that
- * we cannot lose wakeup events.
- */
-
-asmlinkage void __up(struct semaphore *sem)
-{
- wake_up(&sem->wait);
-}
-
-asmlinkage void __sched __down(struct semaphore * sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
- unsigned long flags;
-
- tsk->state = TASK_UNINTERRUPTIBLE;
- spin_lock_irqsave(&sem->wait.lock, flags);
- add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
- sem->sleepers++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock in
- * the wait_queue_head.
- */
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- sem->sleepers = 0;
- break;
- }
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irqrestore(&sem->wait.lock, flags);
-
- schedule();
-
- spin_lock_irqsave(&sem->wait.lock, flags);
- tsk->state = TASK_UNINTERRUPTIBLE;
- }
- remove_wait_queue_locked(&sem->wait, &wait);
- wake_up_locked(&sem->wait);
- spin_unlock_irqrestore(&sem->wait.lock, flags);
- tsk->state = TASK_RUNNING;
-}
-
-asmlinkage int __sched __down_interruptible(struct semaphore * sem)
-{
- int retval = 0;
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
- unsigned long flags;
-
- tsk->state = TASK_INTERRUPTIBLE;
- spin_lock_irqsave(&sem->wait.lock, flags);
- add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
- sem->sleepers++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * With signals pending, this turns into
- * the trylock failure case - we won't be
- * sleeping, and we* can't get the lock as
- * it has contention. Just correct the count
- * and exit.
- */
- if (signal_pending(current)) {
- retval = -EINTR;
- sem->sleepers = 0;
- atomic_add(sleepers, &sem->count);
- break;
- }
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock in
- * wait_queue_head. The "-1" is because we're
- * still hoping to get the semaphore.
- */
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- sem->sleepers = 0;
- break;
- }
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irqrestore(&sem->wait.lock, flags);
-
- schedule();
-
- spin_lock_irqsave(&sem->wait.lock, flags);
- tsk->state = TASK_INTERRUPTIBLE;
- }
- remove_wait_queue_locked(&sem->wait, &wait);
- wake_up_locked(&sem->wait);
- spin_unlock_irqrestore(&sem->wait.lock, flags);
-
- tsk->state = TASK_RUNNING;
- return retval;
-}
-
-/*
- * Trylock failed - make sure we correct for
- * having decremented the count.
- *
- * We could have done the trylock with a
- * single "cmpxchg" without failure cases,
- * but then it wouldn't work on a 386.
- */
-asmlinkage int __down_trylock(struct semaphore * sem)
-{
- int sleepers;
- unsigned long flags;
-
- spin_lock_irqsave(&sem->wait.lock, flags);
- sleepers = sem->sleepers + 1;
- sem->sleepers = 0;
-
- /*
- * Add "everybody else" and us into it. They aren't
- * playing, because we own the spinlock in the
- * wait_queue_head.
- */
- if (!atomic_add_negative(sleepers, &sem->count)) {
- wake_up_locked(&sem->wait);
- }
-
- spin_unlock_irqrestore(&sem->wait.lock, flags);
- return 1;
-}
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index 18124542a6e..d503568cb75 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -20,53 +20,13 @@
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/personality.h>
-#include <linux/freezer.h>
+#include <linux/tracehook.h>
#include <asm/cacheflush.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
#define DEBUG_SIG 0
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-int do_signal(struct pt_regs *, sigset_t *);
-
-asmlinkage int
-sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
- unsigned long r2, unsigned long r3, unsigned long r4,
- unsigned long r5, unsigned long r6, struct pt_regs *regs)
-{
- sigset_t newset;
-
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
-
- if (copy_from_user(&newset, unewset, sizeof(newset)))
- return -EFAULT;
- sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
-
- spin_lock_irq(&current->sighand->siglock);
- current->saved_sigmask = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- set_thread_flag(TIF_RESTORE_SIGMASK);
- return -ERESTARTNOHAND;
-}
-
-asmlinkage int
-sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
- unsigned long r2, unsigned long r3, unsigned long r4,
- unsigned long r5, unsigned long r6, struct pt_regs *regs)
-{
- return do_sigaltstack(uss, uoss, regs->spu);
-}
-
-
/*
* Do a signal return; undo the signal stack.
*/
@@ -139,16 +99,12 @@ sys_rt_sigreturn(unsigned long r0, unsigned long r1,
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &result))
goto badframe;
- if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->spu) == -EFAULT)
+ if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
return result;
@@ -217,7 +173,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
return (void __user *)((sp - frame_size) & -8ul);
}
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
@@ -248,10 +204,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
- err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(regs->spu),
- &frame->uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->spu);
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
@@ -274,10 +227,24 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
current->comm, current->pid, frame, regs->pc);
#endif
- return;
+ return 0;
give_sigsegv:
force_sigsegv(sig, current);
+ return -EFAULT;
+}
+
+static int prev_insn(struct pt_regs *regs)
+{
+ u16 inst;
+ if (get_user(inst, (u16 __user *)(regs->bpc - 2)))
+ return -EFAULT;
+ if ((inst & 0xfff0) == 0x10f0) /* trap ? */
+ regs->bpc -= 2;
+ else
+ regs->bpc -= 4;
+ regs->syscall_nr = -1;
+ return 0;
}
/*
@@ -286,10 +253,8 @@ give_sigsegv:
static void
handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
- sigset_t *oldset, struct pt_regs *regs)
+ struct pt_regs *regs)
{
- unsigned short inst;
-
/* Are we from a system call? */
if (regs->syscall_nr >= 0) {
/* If so, check system call restarting.. */
@@ -307,23 +272,16 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
/* fallthrough */
case -ERESTARTNOINTR:
regs->r0 = regs->orig_r0;
- inst = *(unsigned short *)(regs->bpc - 2);
- if ((inst & 0xfff0) == 0x10f0) /* trap ? */
- regs->bpc -= 2;
- else
- regs->bpc -= 4;
+ if (prev_insn(regs) < 0)
+ return;
}
}
/* Set up the stack frame */
- setup_rt_frame(sig, ka, info, oldset, regs);
-
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs))
+ return;
+
+ signal_delivered(sig, info, ka, regs, 0);
}
/*
@@ -331,12 +289,11 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
-int do_signal(struct pt_regs *regs, sigset_t *oldset)
+static void do_signal(struct pt_regs *regs)
{
siginfo_t info;
int signr;
struct k_sigaction ka;
- unsigned short inst;
/*
* We want the common case to go fast, which
@@ -345,13 +302,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
* if so.
*/
if (!user_mode(regs))
- return 1;
-
- if (try_to_freeze())
- goto no_signal;
-
- if (!oldset)
- oldset = &current->blocked;
+ return;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
@@ -362,11 +313,11 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
*/
/* Whee! Actually deliver the signal. */
- handle_signal(signr, &ka, &info, oldset, regs);
- return 1;
+ handle_signal(signr, &ka, &info, regs);
+
+ return;
}
- no_signal:
/* Did we come from a system call? */
if (regs->syscall_nr >= 0) {
/* Restart the system call - no handlers present */
@@ -374,31 +325,21 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
regs->r0 == -ERESTARTSYS ||
regs->r0 == -ERESTARTNOINTR) {
regs->r0 = regs->orig_r0;
- inst = *(unsigned short *)(regs->bpc - 2);
- if ((inst & 0xfff0) == 0x10f0) /* trap ? */
- regs->bpc -= 2;
- else
- regs->bpc -= 4;
- }
- if (regs->r0 == -ERESTART_RESTARTBLOCK){
+ prev_insn(regs);
+ } else if (regs->r0 == -ERESTART_RESTARTBLOCK){
regs->r0 = regs->orig_r0;
regs->r7 = __NR_restart_syscall;
- inst = *(unsigned short *)(regs->bpc - 2);
- if ((inst & 0xfff0) == 0x10f0) /* trap ? */
- regs->bpc -= 2;
- else
- regs->bpc -= 4;
+ prev_insn(regs);
}
}
- return 0;
+ restore_saved_sigmask();
}
/*
* notification of userspace execution resumption
* - triggered by current->work.notify_resume
*/
-void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
- __u32 thread_info_flags)
+void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
{
/* Pending single-step? */
if (thread_info_flags & _TIF_SINGLESTEP)
@@ -406,7 +347,10 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
- do_signal(regs,oldset);
+ do_signal(regs);
- clear_thread_flag(TIF_IRET);
+ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ }
}
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index c837bc13b01..ce7aea34fdf 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -17,6 +17,7 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/smp.h>
@@ -25,32 +26,17 @@
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/m32r.h>
+#include <asm/tlbflush.h>
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
/* Data structures and variables */
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
/*
- * Structure and data for smp_call_function(). This is designed to minimise
- * static memory requirements. It also looks cleaner.
- */
-static DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
- void (*func) (void *info);
- void *info;
- atomic_t started;
- atomic_t finished;
- int wait;
-} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
-
-static struct call_data_struct *call_data;
-
-/*
* For flush_cache_all()
*/
static DEFINE_SPINLOCK(flushcache_lock);
@@ -76,36 +62,22 @@ extern spinlock_t ipi_lock[];
/* Function Prototypes */
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
-void smp_send_reschedule(int);
void smp_reschedule_interrupt(void);
-
-void smp_flush_cache_all(void);
void smp_flush_cache_all_interrupt(void);
-void smp_flush_tlb_all(void);
static void flush_tlb_all_ipi(void *);
-
-void smp_flush_tlb_mm(struct mm_struct *);
-void smp_flush_tlb_range(struct vm_area_struct *, unsigned long, \
- unsigned long);
-void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
static void flush_tlb_others(cpumask_t, struct mm_struct *,
struct vm_area_struct *, unsigned long);
+
void smp_invalidate_interrupt(void);
-void smp_send_stop(void);
static void stop_this_cpu(void *);
-int smp_call_function(void (*) (void *), void *, int, int);
-void smp_call_function_interrupt(void);
-
-void smp_send_timer(void);
void smp_ipi_timer_interrupt(struct pt_regs *);
void smp_local_timer_interrupt(void);
-void send_IPI_allbutself(int, int);
-static void send_IPI_mask(cpumask_t, int, int);
-unsigned long send_IPI_mask_phys(cpumask_t, int, int);
+static void send_IPI_allbutself(int, int);
+static void send_IPI_mask(const struct cpumask *, int, int);
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
/* Rescheduling request Routines */
@@ -132,7 +104,7 @@ unsigned long send_IPI_mask_phys(cpumask_t, int, int);
void smp_send_reschedule(int cpu_id)
{
WARN_ON(cpu_is_offline(cpu_id));
- send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1);
+ send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
}
/*==========================================================================*
@@ -140,8 +112,6 @@ void smp_send_reschedule(int cpu_id)
*
* Description: This routine executes on CPU which received
* 'RESCHEDULE_IPI'.
- * Rescheduling is processed at the exit of interrupt
- * operation.
*
* Born on Date: 2002.02.05
*
@@ -156,7 +126,7 @@ void smp_send_reschedule(int cpu_id)
*==========================================================================*/
void smp_reschedule_interrupt(void)
{
- /* nothing to do */
+ scheduler_ipi();
}
/*==========================================================================*
@@ -182,12 +152,12 @@ void smp_flush_cache_all(void)
unsigned long *mask;
preempt_disable();
- cpumask = cpu_online_map;
- cpu_clear(smp_processor_id(), cpumask);
+ cpumask_copy(&cpumask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &cpumask);
spin_lock(&flushcache_lock);
- mask=cpus_addr(cpumask);
+ mask=cpumask_bits(&cpumask);
atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
- send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0);
+ send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
_flush_cache_copyback_all();
while (flushcache_cpumask)
mb();
@@ -231,7 +201,7 @@ void smp_flush_tlb_all(void)
local_irq_save(flags);
__flush_tlb_all();
local_irq_restore(flags);
- smp_call_function(flush_tlb_all_ipi, NULL, 1, 1);
+ smp_call_function(flush_tlb_all_ipi, NULL, 1);
preempt_enable();
}
@@ -283,8 +253,8 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
preempt_disable();
cpu_id = smp_processor_id();
mmc = &mm->context[cpu_id];
- cpu_mask = mm->cpu_vm_mask;
- cpu_clear(cpu_id, cpu_mask);
+ cpumask_copy(&cpu_mask, mm_cpumask(mm));
+ cpumask_clear_cpu(cpu_id, &cpu_mask);
if (*mmc != NO_CONTEXT) {
local_irq_save(flags);
@@ -292,10 +262,10 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
if (mm == current->mm)
activate_context(mm);
else
- cpu_clear(cpu_id, mm->cpu_vm_mask);
+ cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
local_irq_restore(flags);
}
- if (!cpus_empty(cpu_mask))
+ if (!cpumask_empty(&cpu_mask))
flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
preempt_enable();
@@ -353,8 +323,8 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
preempt_disable();
cpu_id = smp_processor_id();
mmc = &mm->context[cpu_id];
- cpu_mask = mm->cpu_vm_mask;
- cpu_clear(cpu_id, cpu_mask);
+ cpumask_copy(&cpu_mask, mm_cpumask(mm));
+ cpumask_clear_cpu(cpu_id, &cpu_mask);
#ifdef DEBUG_SMP
if (!mm)
@@ -368,7 +338,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
__flush_tlb_page(va);
local_irq_restore(flags);
}
- if (!cpus_empty(cpu_mask))
+ if (!cpumask_empty(&cpu_mask))
flush_tlb_others(cpu_mask, mm, vma, va);
preempt_enable();
@@ -415,14 +385,14 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
* - current CPU must not be in mask
* - mask must exist :)
*/
- BUG_ON(cpus_empty(cpumask));
+ BUG_ON(cpumask_empty(&cpumask));
- BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+ BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
BUG_ON(!mm);
/* If a CPU which we ran on has gone down, OK. */
- cpus_and(cpumask, cpumask, cpu_online_map);
- if (cpus_empty(cpumask))
+ cpumask_and(&cpumask, &cpumask, cpu_online_mask);
+ if (cpumask_empty(&cpumask))
return;
/*
@@ -436,16 +406,16 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
flush_mm = mm;
flush_vma = vma;
flush_va = va;
- mask=cpus_addr(cpumask);
+ mask=cpumask_bits(&cpumask);
atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
/*
* We have to send the IPI only to
* CPUs affected.
*/
- send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0);
+ send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
- while (!cpus_empty(flush_cpumask)) {
+ while (!cpumask_empty((cpumask_t*)&flush_cpumask)) {
/* nothing. lockup detection does not belong here */
mb();
}
@@ -480,7 +450,7 @@ void smp_invalidate_interrupt(void)
int cpu_id = smp_processor_id();
unsigned long *mmc = &flush_mm->context[cpu_id];
- if (!cpu_isset(cpu_id, flush_cpumask))
+ if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
return;
if (flush_va == FLUSH_ALL) {
@@ -488,7 +458,7 @@ void smp_invalidate_interrupt(void)
if (flush_mm == current->active_mm)
activate_context(flush_mm);
else
- cpu_clear(cpu_id, flush_mm->cpu_vm_mask);
+ cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm));
} else {
unsigned long va = flush_va;
@@ -498,7 +468,7 @@ void smp_invalidate_interrupt(void)
__flush_tlb_page(va);
}
}
- cpu_clear(cpu_id, flush_cpumask);
+ cpumask_clear_cpu(cpu_id, (cpumask_t*)&flush_cpumask);
}
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
@@ -524,7 +494,7 @@ void smp_invalidate_interrupt(void)
*==========================================================================*/
void smp_send_stop(void)
{
- smp_call_function(stop_this_cpu, NULL, 1, 0);
+ smp_call_function(stop_this_cpu, NULL, 0);
}
/*==========================================================================*
@@ -550,7 +520,7 @@ static void stop_this_cpu(void *dummy)
/*
* Remove this CPU:
*/
- cpu_clear(cpu_id, cpu_online_map);
+ set_cpu_online(cpu_id, false);
/*
* PSW IE = 1;
@@ -565,86 +535,14 @@ static void stop_this_cpu(void *dummy)
for ( ; ; );
}
-/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
-/* Call function Routines */
-/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
-
-/*==========================================================================*
- * Name: smp_call_function
- *
- * Description: This routine sends a 'CALL_FUNCTION_IPI' to all other CPUs
- * in the system.
- *
- * Born on Date: 2002.02.05
- *
- * Arguments: *func - The function to run. This must be fast and
- * non-blocking.
- * *info - An arbitrary pointer to pass to the function.
- * nonatomic - currently unused.
- * wait - If true, wait (atomically) until function has
- * completed on other CPUs.
- *
- * Returns: 0 on success, else a negative status code. Does not return
- * until remote CPUs are nearly ready to execute <<func>> or
- * are or have executed.
- *
- * Cautions: You must not call this function with disabled interrupts or
- * from a hardware interrupt handler, you may call it from a
- * bottom half handler.
- *
- * Modification log:
- * Date Who Description
- * ---------- --- --------------------------------------------------------
- *
- *==========================================================================*/
-int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
- int wait)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- struct call_data_struct data;
- int cpus;
-
-#ifdef DEBUG_SMP
- unsigned long flags;
- __save_flags(flags);
- if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
- BUG();
-#endif /* DEBUG_SMP */
-
- /* Holding any lock stops cpus from going down. */
- spin_lock(&call_lock);
- cpus = num_online_cpus() - 1;
-
- if (!cpus) {
- spin_unlock(&call_lock);
- return 0;
- }
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- call_data = &data;
- mb();
-
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_allbutself(CALL_FUNCTION_IPI, 0);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- barrier();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- barrier();
- spin_unlock(&call_lock);
+ send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
+}
- return 0;
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
}
/*==========================================================================*
@@ -666,27 +564,16 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
*==========================================================================*/
void smp_call_function_interrupt(void)
{
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- int wait = call_data->wait;
-
- /*
- * Notify initiating CPU that I've grabbed the data and am
- * about to execute the function
- */
- mb();
- atomic_inc(&call_data->started);
- /*
- * At this point the info structure may be out of scope unless wait==1
- */
irq_enter();
- (*func)(info);
+ generic_smp_call_function_interrupt();
irq_exit();
+}
- if (wait) {
- mb();
- atomic_inc(&call_data->finished);
- }
+void smp_call_function_single_interrupt(void)
+{
+ irq_enter();
+ generic_smp_call_function_single_interrupt();
+ irq_exit();
}
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
@@ -824,14 +711,14 @@ void smp_local_timer_interrupt(void)
* ---------- --- --------------------------------------------------------
*
*==========================================================================*/
-void send_IPI_allbutself(int ipi_num, int try)
+static void send_IPI_allbutself(int ipi_num, int try)
{
cpumask_t cpumask;
- cpumask = cpu_online_map;
- cpu_clear(smp_processor_id(), cpumask);
+ cpumask_copy(&cpumask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &cpumask);
- send_IPI_mask(cpumask, ipi_num, try);
+ send_IPI_mask(&cpumask, ipi_num, try);
}
/*==========================================================================*
@@ -854,7 +741,7 @@ void send_IPI_allbutself(int ipi_num, int try)
* ---------- --- --------------------------------------------------------
*
*==========================================================================*/
-static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
+static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
{
cpumask_t physid_mask, tmp;
int cpu_id, phys_id;
@@ -863,16 +750,16 @@ static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
if (num_cpus <= 1) /* NO MP */
return;
- cpus_and(tmp, cpumask, cpu_online_map);
- BUG_ON(!cpus_equal(cpumask, tmp));
+ cpumask_and(&tmp, cpumask, cpu_online_mask);
+ BUG_ON(!cpumask_equal(cpumask, &tmp));
- physid_mask = CPU_MASK_NONE;
- for_each_cpu_mask(cpu_id, cpumask){
+ cpumask_clear(&physid_mask);
+ for_each_cpu(cpu_id, cpumask) {
if ((phys_id = cpu_to_physid(cpu_id)) != -1)
- cpu_set(phys_id, physid_mask);
+ cpumask_set_cpu(phys_id, &physid_mask);
}
- send_IPI_mask_phys(physid_mask, ipi_num, try);
+ send_IPI_mask_phys(&physid_mask, ipi_num, try);
}
/*==========================================================================*
@@ -895,19 +782,19 @@ static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
* ---------- --- --------------------------------------------------------
*
*==========================================================================*/
-unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
+unsigned long send_IPI_mask_phys(const cpumask_t *physid_mask, int ipi_num,
int try)
{
spinlock_t *ipilock;
volatile unsigned long *ipicr_addr;
unsigned long ipicr_val;
unsigned long my_physid_mask;
- unsigned long mask = cpus_addr(physid_mask)[0];
+ unsigned long mask = cpumask_bits(physid_mask)[0];
if (mask & ~physids_coerce(phys_cpu_present_map))
BUG();
- if (ipi_num >= NR_IPIS)
+ if (ipi_num >= NR_IPIS || ipi_num < 0)
BUG();
mask <<= IPI_SHIFT;
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 2c03ac1d005..bb21f4f6317 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -40,6 +40,7 @@
*/
#include <linux/module.h>
+#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -72,17 +73,11 @@ static unsigned int bsp_phys_id = -1;
/* Bitmask of physically existing CPUs */
physid_mask_t phys_cpu_present_map;
-/* Bitmask of currently online CPUs */
-cpumask_t cpu_online_map;
-EXPORT_SYMBOL(cpu_online_map);
-
cpumask_t cpu_bootout_map;
cpumask_t cpu_bootin_map;
static cpumask_t cpu_callin_map;
cpumask_t cpu_callout_map;
EXPORT_SYMBOL(cpu_callout_map);
-cpumask_t cpu_possible_map = CPU_MASK_ALL;
-EXPORT_SYMBOL(cpu_possible_map);
/* Per CPU bogomips and other parameters */
struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned;
@@ -114,12 +109,8 @@ static unsigned int calibration_result;
/* Function Prototypes */
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
-void smp_prepare_boot_cpu(void);
-void smp_prepare_cpus(unsigned int);
static void init_ipi_lock(void);
static void do_boot_cpu(int);
-int __cpu_up(unsigned int);
-void smp_cpus_done(unsigned int);
int start_secondary(void *);
static void smp_callin(void);
@@ -136,13 +127,13 @@ static void unmap_cpu_to_physid(int, int);
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
/* Boot up APs Routines : BSP */
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
-void __devinit smp_prepare_boot_cpu(void)
+void smp_prepare_boot_cpu(void)
{
bsp_phys_id = hard_smp_processor_id();
physid_set(bsp_phys_id, phys_cpu_present_map);
- cpu_set(0, cpu_online_map); /* BSP's cpu_id == 0 */
- cpu_set(0, cpu_callout_map);
- cpu_set(0, cpu_callin_map);
+ set_cpu_online(0, true); /* BSP's cpu_id == 0 */
+ cpumask_set_cpu(0, &cpu_callout_map);
+ cpumask_set_cpu(0, &cpu_callin_map);
/*
* Initialize the logical to physical CPU number mapping
@@ -183,7 +174,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
physid_set(phys_id, phys_cpu_present_map);
#ifndef CONFIG_HOTPLUG_CPU
- cpu_present_map = cpu_possible_map;
+ init_cpu_present(cpu_possible_mask);
#endif
show_mp_info(nr_cpu);
@@ -218,7 +209,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (!physid_isset(phys_id, phys_cpu_present_map))
continue;
- if ((max_cpus >= 0) && (max_cpus <= cpucount + 1))
+ if (max_cpus <= cpucount + 1)
continue;
do_boot_cpu(phys_id);
@@ -299,10 +290,10 @@ static void __init do_boot_cpu(int phys_id)
send_status = 0;
boot_status = 0;
- cpu_set(phys_id, cpu_bootout_map);
+ cpumask_set_cpu(phys_id, &cpu_bootout_map);
/* Send Startup IPI */
- send_IPI_mask_phys(cpumask_of_cpu(phys_id), CPU_BOOT_IPI, 0);
+ send_IPI_mask_phys(cpumask_of(phys_id), CPU_BOOT_IPI, 0);
Dprintk("Waiting for send to finish...\n");
timeout = 0;
@@ -311,7 +302,7 @@ static void __init do_boot_cpu(int phys_id)
do {
Dprintk("+");
udelay(1000);
- send_status = !cpu_isset(phys_id, cpu_bootin_map);
+ send_status = !cpumask_test_cpu(phys_id, &cpu_bootin_map);
} while (send_status && (timeout++ < 100));
Dprintk("After Startup.\n");
@@ -321,19 +312,19 @@ static void __init do_boot_cpu(int phys_id)
* allow APs to start initializing.
*/
Dprintk("Before Callout %d.\n", cpu_id);
- cpu_set(cpu_id, cpu_callout_map);
+ cpumask_set_cpu(cpu_id, &cpu_callout_map);
Dprintk("After Callout %d.\n", cpu_id);
/*
* Wait 5s total for a response
*/
for (timeout = 0; timeout < 5000; timeout++) {
- if (cpu_isset(cpu_id, cpu_callin_map))
+ if (cpumask_test_cpu(cpu_id, &cpu_callin_map))
break; /* It has booted */
udelay(1000);
}
- if (cpu_isset(cpu_id, cpu_callin_map)) {
+ if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
/* number CPUs logically, starting from 1 (BSP is 0) */
Dprintk("OK.\n");
} else {
@@ -345,28 +336,28 @@ static void __init do_boot_cpu(int phys_id)
if (send_status || boot_status) {
unmap_cpu_to_physid(cpu_id, phys_id);
- cpu_clear(cpu_id, cpu_callout_map);
- cpu_clear(cpu_id, cpu_callin_map);
- cpu_clear(cpu_id, cpu_initialized);
+ cpumask_clear_cpu(cpu_id, &cpu_callout_map);
+ cpumask_clear_cpu(cpu_id, &cpu_callin_map);
+ cpumask_clear_cpu(cpu_id, &cpu_initialized);
cpucount--;
}
}
-int __cpuinit __cpu_up(unsigned int cpu_id)
+int __cpu_up(unsigned int cpu_id, struct task_struct *tidle)
{
int timeout;
- cpu_set(cpu_id, smp_commenced_mask);
+ cpumask_set_cpu(cpu_id, &smp_commenced_mask);
/*
* Wait 5s total for a response
*/
for (timeout = 0; timeout < 5000; timeout++) {
- if (cpu_isset(cpu_id, cpu_online_map))
+ if (cpu_online(cpu_id))
break;
udelay(1000);
}
- if (!cpu_isset(cpu_id, cpu_online_map))
+ if (!cpu_online(cpu_id))
BUG();
return 0;
@@ -378,11 +369,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
unsigned long bogosum = 0;
for (timeout = 0; timeout < 5000; timeout++) {
- if (cpus_equal(cpu_callin_map, cpu_online_map))
+ if (cpumask_equal(&cpu_callin_map, cpu_online_mask))
break;
udelay(1000);
}
- if (!cpus_equal(cpu_callin_map, cpu_online_map))
+ if (!cpumask_equal(&cpu_callin_map, cpu_online_mask))
BUG();
for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++)
@@ -393,7 +384,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
*/
Dprintk("Before bogomips.\n");
if (cpucount) {
- for_each_cpu_mask(cpu_id, cpu_online_map)
+ for_each_cpu(cpu_id,cpu_online_mask)
bogosum += cpu_data[cpu_id].loops_per_jiffy;
printk(KERN_INFO "Total of %d processors activated " \
@@ -430,7 +421,7 @@ int __init start_secondary(void *unused)
cpu_init();
preempt_disable();
smp_callin();
- while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
+ while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
cpu_relax();
smp_online();
@@ -441,7 +432,7 @@ int __init start_secondary(void *unused)
*/
local_flush_tlb_all();
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
return 0;
}
@@ -468,7 +459,7 @@ static void __init smp_callin(void)
int cpu_id = smp_processor_id();
unsigned long timeout;
- if (cpu_isset(cpu_id, cpu_callin_map)) {
+ if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
printk("huh, phys CPU#%d, CPU#%d already present??\n",
phys_id, cpu_id);
BUG();
@@ -479,7 +470,7 @@ static void __init smp_callin(void)
timeout = jiffies + (2 * HZ);
while (time_before(jiffies, timeout)) {
/* Has the boot CPU finished it's STARTUP sequence ? */
- if (cpu_isset(cpu_id, cpu_callout_map))
+ if (cpumask_test_cpu(cpu_id, &cpu_callout_map))
break;
cpu_relax();
}
@@ -491,13 +482,15 @@ static void __init smp_callin(void)
}
/* Allow the master to continue. */
- cpu_set(cpu_id, cpu_callin_map);
+ cpumask_set_cpu(cpu_id, &cpu_callin_map);
}
static void __init smp_online(void)
{
int cpu_id = smp_processor_id();
+ notify_cpu_starting(cpu_id);
+
local_irq_enable();
/* Get our bogomips. */
@@ -506,7 +499,7 @@ static void __init smp_online(void)
/* Save our processor parameters */
smp_store_cpu_info(cpu_id);
- cpu_set(cpu_id, cpu_online_map);
+ set_cpu_online(cpu_id, true);
}
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
@@ -595,7 +588,7 @@ int setup_profiling_timer(unsigned int multiplier)
* accounting. At that time they also adjust their APIC timers
* accordingly.
*/
- for (i = 0; i < NR_CPUS; ++i)
+ for_each_possible_cpu(i)
per_cpu(prof_multiplier, i) = multiplier;
return 0;
diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c
index 6d7a80fdad4..c3fdd632fba 100644
--- a/arch/m32r/kernel/sys_m32r.c
+++ b/arch/m32r/kernel/sys_m32r.c
@@ -76,142 +76,6 @@ asmlinkage int sys_tas(int __user *addr)
return oldval;
}
-/*
- * sys_pipe() is the normal C calling standard for creating
- * a pipe. It's not the way Unix traditionally does this, though.
- */
-asmlinkage int
-sys_pipe(unsigned long r0, unsigned long r1, unsigned long r2,
- unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, struct pt_regs regs)
-{
- int fd[2];
- int error;
-
- error = do_pipe(fd);
- if (!error) {
- if (copy_to_user((void __user *)r0, fd, 2*sizeof(int)))
- error = -EFAULT;
- }
- return error;
-}
-
-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- int error = -EBADF;
- struct file *file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
-
-/*
- * sys_ipc() is the de-multiplexer for the SysV IPC calls..
- *
- * This is really horribly ugly.
- */
-asmlinkage int sys_ipc(uint call, int first, int second,
- int third, void __user *ptr, long fifth)
-{
- int version, ret;
-
- version = call >> 16; /* hack for backward compatibility */
- call &= 0xffff;
-
- switch (call) {
- case SEMOP:
- return sys_semtimedop(first, (struct sembuf __user *)ptr,
- second, NULL);
- case SEMTIMEDOP:
- return sys_semtimedop(first, (struct sembuf __user *)ptr,
- second, (const struct timespec __user *)fifth);
- case SEMGET:
- return sys_semget (first, second, third);
- case SEMCTL: {
- union semun fourth;
- if (!ptr)
- return -EINVAL;
- if (get_user(fourth.__pad, (void __user * __user *) ptr))
- return -EFAULT;
- return sys_semctl (first, second, third, fourth);
- }
-
- case MSGSND:
- return sys_msgsnd (first, (struct msgbuf __user *) ptr,
- second, third);
- case MSGRCV:
- switch (version) {
- case 0: {
- struct ipc_kludge tmp;
- if (!ptr)
- return -EINVAL;
-
- if (copy_from_user(&tmp,
- (struct ipc_kludge __user *) ptr,
- sizeof (tmp)))
- return -EFAULT;
- return sys_msgrcv (first, tmp.msgp, second,
- tmp.msgtyp, third);
- }
- default:
- return sys_msgrcv (first,
- (struct msgbuf __user *) ptr,
- second, fifth, third);
- }
- case MSGGET:
- return sys_msgget ((key_t) first, second);
- case MSGCTL:
- return sys_msgctl (first, second,
- (struct msqid_ds __user *) ptr);
- case SHMAT: {
- ulong raddr;
-
- if (!access_ok(VERIFY_WRITE, (ulong __user *) third,
- sizeof(ulong)))
- return -EFAULT;
- ret = do_shmat (first, (char __user *) ptr, second, &raddr);
- if (ret)
- return ret;
- return put_user (raddr, (ulong __user *) third);
- }
- case SHMDT:
- return sys_shmdt ((char __user *)ptr);
- case SHMGET:
- return sys_shmget (first, second, third);
- case SHMCTL:
- return sys_shmctl (first, second,
- (struct shmid_ds __user *) ptr);
- default:
- return -ENOSYS;
- }
-}
-
-asmlinkage int sys_uname(struct old_utsname __user * name)
-{
- int err;
- if (!name)
- return -EFAULT;
- down_read(&uts_sem);
- err = copy_to_user(name, utsname(), sizeof (*name));
- up_read(&uts_sem);
- return err?-EFAULT:0;
-}
-
asmlinkage int sys_cacheflush(void *addr, int bytes, int cache)
{
/* This should flush more selectively ... */
@@ -224,22 +88,3 @@ asmlinkage int sys_cachectl(char *addr, int nbytes, int op)
/* Not implemented yet. */
return -ENOSYS;
}
-
-/*
- * Do a system call from kernel instead of calling sys_execve so we
- * end up with proper pt_regs.
- */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
-{
- register long __scno __asm__ ("r7") = __NR_execve;
- register long __arg3 __asm__ ("r2") = (long)(envp);
- register long __arg2 __asm__ ("r1") = (long)(argv);
- register long __res __asm__ ("r0") = (long)(filename);
- __asm__ __volatile__ (
- "trap #" SYSCALL_VECTOR "|| nop"
- : "=r" (__res)
- : "r" (__scno), "0" (__res), "r" (__arg2),
- "r" (__arg3)
- : "memory");
- return __res;
-}
diff --git a/arch/m32r/kernel/syscall_table.S b/arch/m32r/kernel/syscall_table.S
index aa3bf4cfab3..f365c19795e 100644
--- a/arch/m32r/kernel/syscall_table.S
+++ b/arch/m32r/kernel/syscall_table.S
@@ -168,7 +168,7 @@ ENTRY(sys_call_table)
.long sys_tas /* vm86 syscall holder */
.long sys_ni_syscall /* query_module syscall holder */
.long sys_poll
- .long sys_nfsservctl
+ .long sys_ni_syscall /* was nfsservctl */
.long sys_setresgid /* 170 */
.long sys_getresgid
.long sys_prctl
@@ -191,7 +191,7 @@ ENTRY(sys_call_table)
.long sys_ni_syscall /* streams2 */
.long sys_vfork /* 190 */
.long sys_getrlimit
- .long sys_mmap2
+ .long sys_mmap_pgoff
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
@@ -324,3 +324,4 @@ ENTRY(sys_call_table)
.long sys_ni_syscall
.long sys_eventfd
.long sys_fallocate
+ .long sys_setns /* 325 */
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index 994cc155635..1a15f81ea1b 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -33,8 +33,16 @@
#include <asm/hw_irq.h>
+#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE)
+/* this needs a better home */
+DEFINE_SPINLOCK(rtc_lock);
+
+#ifdef CONFIG_RTC_DRV_CMOS_MODULE
+EXPORT_SYMBOL(rtc_lock);
+#endif
+#endif /* pc-style 'CMOS' RTC support */
+
#ifdef CONFIG_SMP
-extern void send_IPI_allbutself(int, int);
extern void smp_local_timer_interrupt(void);
#endif
@@ -49,7 +57,7 @@ extern void smp_local_timer_interrupt(void);
static unsigned long latch;
-static unsigned long do_gettimeoffset(void)
+static u32 m32r_gettimeoffset(void)
{
unsigned long elapsed_time = 0; /* [us] */
@@ -67,7 +75,7 @@ static unsigned long do_gettimeoffset(void)
count = 0;
count = (latch - count) * TICK_SIZE;
- elapsed_time = (count + latch / 2) / latch;
+ elapsed_time = DIV_ROUND_CLOSEST(count, latch);
/* NOTE: LATCH is equal to the "interval" value (= reload count). */
#else /* CONFIG_SMP */
@@ -85,7 +93,7 @@ static unsigned long do_gettimeoffset(void)
p_count = count;
count = (latch - count) * TICK_SIZE;
- elapsed_time = (count + latch / 2) / latch;
+ elapsed_time = DIV_ROUND_CLOSEST(count, latch);
/* NOTE: LATCH is equal to the "interval" value (= reload count). */
#endif /* CONFIG_SMP */
#elif defined(CONFIG_CHIP_M32310)
@@ -94,127 +102,23 @@ static unsigned long do_gettimeoffset(void)
#error no chip configuration
#endif
- return elapsed_time;
-}
-
-/*
- * This version of gettimeofday has near microsecond resolution.
- */
-void do_gettimeofday(struct timeval *tv)
-{
- unsigned long seq;
- unsigned long usec, sec;
- unsigned long max_ntp_tick = tick_usec - tickadj;
-
- do {
- seq = read_seqbegin(&xtime_lock);
-
- usec = do_gettimeoffset();
-
- /*
- * If time_adjust is negative then NTP is slowing the clock
- * so make sure not to go into next possible interval.
- * Better to lose some accuracy than have time go backwards..
- */
- if (unlikely(time_adjust < 0))
- usec = min(usec, max_ntp_tick);
-
- sec = xtime.tv_sec;
- usec += (xtime.tv_nsec / 1000);
- } while (read_seqretry(&xtime_lock, seq));
-
- while (usec >= 1000000) {
- usec -= 1000000;
- sec++;
- }
-
- tv->tv_sec = sec;
- tv->tv_usec = usec;
-}
-
-EXPORT_SYMBOL(do_gettimeofday);
-
-int do_settimeofday(struct timespec *tv)
-{
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec, nsec = tv->tv_nsec;
-
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
- return -EINVAL;
-
- write_seqlock_irq(&xtime_lock);
- /*
- * This is revolting. We need to set "xtime" correctly. However, the
- * value in this location is the value at the most recent update of
- * wall time. Discover what correction gettimeofday() would have
- * made, and then undo it!
- */
- nsec -= do_gettimeoffset() * NSEC_PER_USEC;
-
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
- ntp_clear();
- write_sequnlock_irq(&xtime_lock);
- clock_was_set();
-
- return 0;
-}
-
-EXPORT_SYMBOL(do_settimeofday);
-
-/*
- * In order to set the CMOS clock precisely, set_rtc_mmss has to be
- * called 500 ms after the second nowtime has started, because when
- * nowtime is written into the registers of the CMOS clock, it will
- * jump to the next second precisely 500 ms later. Check the Motorola
- * MC146818A or Dallas DS12887 data sheet for details.
- *
- * BUG: This routine does not handle hour overflow properly; it just
- * sets the minutes. Usually you won't notice until after reboot!
- */
-static inline int set_rtc_mmss(unsigned long nowtime)
-{
- return 0;
+ return elapsed_time * 1000;
}
-/* last time the cmos clock got updated */
-static long last_rtc_update = 0;
-
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
*/
-irqreturn_t timer_interrupt(int irq, void *dev_id)
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
#ifndef CONFIG_SMP
profile_tick(CPU_PROFILING);
#endif
- do_timer(1);
+ xtime_update(1);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- write_seqlock(&xtime_lock);
- if (ntp_synced()
- && xtime.tv_sec > last_rtc_update + 660
- && (xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2
- && (xtime.tv_nsec / 1000) <= 500000 + ((unsigned)TICK_SIZE) / 2)
- {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else /* do it again in 60 s */
- last_rtc_update = xtime.tv_sec - 600;
- }
- write_sequnlock(&xtime_lock);
/* As we return to user mode fire off the other CPU schedulers..
this is basically because we don't yet share IRQ's around.
This message is rigged to be safe on the 386 - basically it's
@@ -228,14 +132,13 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-struct irqaction irq0 = {
+static struct irqaction irq0 = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED,
- .mask = CPU_MASK_NONE,
.name = "MFT2",
};
-void __init time_init(void)
+void read_persistent_clock(struct timespec *ts)
{
unsigned int epoch, year, mon, day, hour, min, sec;
@@ -255,10 +158,14 @@ void __init time_init(void)
epoch = 1952;
year += epoch;
- xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
- xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
+ ts->tv_sec = mktime(year, mon, day, hour, min, sec);
+ ts->tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+}
+
+
+void __init time_init(void)
+{
+ arch_gettimeoffset = m32r_gettimeoffset;
#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \
|| defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \
@@ -272,7 +179,7 @@ void __init time_init(void)
bus_clock = boot_cpu_data.bus_clock;
divide = boot_cpu_data.timer_divide;
- latch = (bus_clock/divide + HZ / 2) / HZ;
+ latch = DIV_ROUND_CLOSEST(bus_clock/divide, HZ);
printk("Timer start : latch = %ld\n", latch);
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index 89ba4a0b5d5..a7a424f852e 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -18,10 +18,9 @@
#include <asm/page.h>
#include <asm/processor.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/smp.h>
@@ -40,6 +39,7 @@ extern void smp_invalidate_interrupt(void);
extern void smp_call_function_interrupt(void);
extern void smp_ipi_timer_interrupt(void);
extern void smp_flush_cache_all_interrupt(void);
+extern void smp_call_function_single_interrupt(void);
/*
* for Boot AP function
@@ -60,7 +60,7 @@ extern unsigned long eit_vector[];
((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \
+ 0xff000000UL
-void set_eit_vector_entries(void)
+static void set_eit_vector_entries(void)
{
extern void default_eit_handler(void);
extern void system_call(void);
@@ -103,8 +103,8 @@ void set_eit_vector_entries(void)
eit_vector[186] = (unsigned long)smp_call_function_interrupt;
eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
- eit_vector[189] = 0;
- eit_vector[190] = 0;
+ eit_vector[189] = 0; /* CPU_BOOT_IPI */
+ eit_vector[190] = (unsigned long)smp_call_function_single_interrupt;
eit_vector[191] = 0;
#endif
_flush_cache_copyback_all();
@@ -120,9 +120,9 @@ void __init trap_init(void)
cpu_init();
}
-int kstack_depth_to_print = 24;
+static int kstack_depth_to_print = 24;
-void show_trace(struct task_struct *task, unsigned long *stack)
+static void show_trace(struct task_struct *task, unsigned long *stack)
{
unsigned long addr;
@@ -132,10 +132,8 @@ void show_trace(struct task_struct *task, unsigned long *stack)
printk("Call Trace: ");
while (!kstack_end(stack)) {
addr = *stack++;
- if (__kernel_text_address(addr)) {
- printk("[<%08lx>] ", addr);
- print_symbol("%s\n", addr);
- }
+ if (__kernel_text_address(addr))
+ printk("[<%08lx>] %pSR\n", addr, (void *)addr);
}
printk("\n");
}
@@ -169,15 +167,6 @@ void show_stack(struct task_struct *task, unsigned long *sp)
show_trace(task, sp);
}
-void dump_stack(void)
-{
- unsigned long stack;
-
- show_trace(current, &stack);
-}
-
-EXPORT_SYMBOL(dump_stack);
-
static void show_registers(struct pt_regs *regs)
{
int i = 0;
@@ -223,7 +212,7 @@ bad:
printk("\n");
}
-DEFINE_SPINLOCK(die_lock);
+static DEFINE_SPINLOCK(die_lock);
void die(const char * str, struct pt_regs * regs, long err)
{
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
index 41b07854fcc..018e4a711d7 100644
--- a/arch/m32r/kernel/vmlinux.lds.S
+++ b/arch/m32r/kernel/vmlinux.lds.S
@@ -4,6 +4,7 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/addrspace.h>
#include <asm/page.h>
+#include <asm/thread_info.h>
OUTPUT_ARCH(m32r)
#if defined(__LITTLE_ENDIAN__)
@@ -27,6 +28,7 @@ SECTIONS
_text = .; /* Text and read-only data */
.boot : { *(.boot) } = 0
.text : {
+ HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
@@ -39,96 +41,28 @@ SECTIONS
#endif
_etext = .; /* End of text section */
- . = ALIGN(16); /* Exception table */
- __start___ex_table = .;
- __ex_table : { *(__ex_table) }
- __stop___ex_table = .;
+ EXCEPTION_TABLE(16)
+ NOTES
+ _sdata = .; /* Start of data section */
RODATA
-
- /* writeable */
- .data : { /* Data */
- *(.spu)
- *(.spi)
- DATA_DATA
- CONSTRUCTORS
- }
-
- . = ALIGN(4096);
- __nosave_begin = .;
- .data_nosave : { *(.data.nosave) }
- . = ALIGN(4096);
- __nosave_end = .;
-
- . = ALIGN(4096);
- .data.page_aligned : { *(.data.idt) }
-
- . = ALIGN(32);
- .data.cacheline_aligned : { *(.data.cacheline_aligned) }
-
+ RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
_edata = .; /* End of data section */
- . = ALIGN(8192); /* init_task */
- .data.init_task : { *(.data.init_task) }
-
/* will be freed after init */
- . = ALIGN(4096); /* Init code and data */
+ . = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
- .init.text : {
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
- .init.data : { INIT_DATA }
- . = ALIGN(16);
- __setup_start = .;
- .init.setup : { *(.init.setup) }
- __setup_end = .;
- __initcall_start = .;
- .initcall.init : {
- INITCALLS
- }
- __initcall_end = .;
- __con_initcall_start = .;
- .con_initcall.init : { *(.con_initcall.init) }
- __con_initcall_end = .;
- SECURITY_INIT
- . = ALIGN(4);
- __alt_instructions = .;
- .altinstructions : { *(.altinstructions) }
- __alt_instructions_end = .;
- .altinstr_replacement : { *(.altinstr_replacement) }
- /* .exit.text is discard at runtime, not link time, to deal with references
- from .altinstructions and .eh_frame */
- .exit.text : { EXIT_TEXT }
- .exit.data : { EXIT_DATA }
-
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(4096);
- __initramfs_start = .;
- .init.ramfs : { *(.init.ramfs) }
- __initramfs_end = .;
-#endif
-
- PERCPU(4096)
- . = ALIGN(4096);
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
+ PERCPU_SECTION(32)
+ . = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
- __bss_start = .; /* BSS */
- .bss : { *(.bss) }
- . = ALIGN(4);
- __bss_stop = .;
+ BSS_SECTION(0, 0, 4)
_end = . ;
- /* Sections to be discarded */
- /DISCARD/ : {
- EXIT_TEXT
- EXIT_DATA
- *(.exitcall.exit)
- }
-
/* Stabs debugging sections. */
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
@@ -137,4 +71,7 @@ SECTIONS
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
+
+ /* Sections to be discarded */
+ DISCARDS
}