aboutsummaryrefslogtreecommitdiff
path: root/arch/m32r/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m32r/kernel')
-rw-r--r--arch/m32r/kernel/Makefile20
-rw-r--r--arch/m32r/kernel/align.c585
-rw-r--r--arch/m32r/kernel/entry.S1000
-rw-r--r--arch/m32r/kernel/head.S287
-rw-r--r--arch/m32r/kernel/init_task.c41
-rw-r--r--arch/m32r/kernel/io_m32700ut.c472
-rw-r--r--arch/m32r/kernel/io_mappi.c384
-rw-r--r--arch/m32r/kernel/io_mappi2.c461
-rw-r--r--arch/m32r/kernel/io_oaks32r.c251
-rw-r--r--arch/m32r/kernel/io_opsput.c390
-rw-r--r--arch/m32r/kernel/io_usrv.c249
-rw-r--r--arch/m32r/kernel/irq.c91
-rw-r--r--arch/m32r/kernel/m32r_ksyms.c140
-rw-r--r--arch/m32r/kernel/module.c259
-rw-r--r--arch/m32r/kernel/process.c359
-rw-r--r--arch/m32r/kernel/ptrace.c829
-rw-r--r--arch/m32r/kernel/semaphore.c186
-rw-r--r--arch/m32r/kernel/setup.c420
-rw-r--r--arch/m32r/kernel/setup_m32700ut.c478
-rw-r--r--arch/m32r/kernel/setup_mappi.c160
-rw-r--r--arch/m32r/kernel/setup_mappi2.c212
-rw-r--r--arch/m32r/kernel/setup_oaks32r.c143
-rw-r--r--arch/m32r/kernel/setup_opsput.c482
-rw-r--r--arch/m32r/kernel/setup_usrv.c256
-rw-r--r--arch/m32r/kernel/signal.c438
-rw-r--r--arch/m32r/kernel/smp.c965
-rw-r--r--arch/m32r/kernel/smpboot.c630
-rw-r--r--arch/m32r/kernel/sys_m32r.c217
-rw-r--r--arch/m32r/kernel/time.c318
-rw-r--r--arch/m32r/kernel/traps.c332
-rw-r--r--arch/m32r/kernel/vmlinux.lds.S143
31 files changed, 11198 insertions, 0 deletions
diff --git a/arch/m32r/kernel/Makefile b/arch/m32r/kernel/Makefile
new file mode 100644
index 00000000000..cfd690bf6d8
--- /dev/null
+++ b/arch/m32r/kernel/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the Linux/M32R kernel.
+#
+
+extra-y := head.o init_task.o vmlinux.lds
+
+obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \
+ m32r_ksyms.o sys_m32r.o semaphore.o signal.o ptrace.o
+
+obj-$(CONFIG_SMP) += smp.o smpboot.o
+obj-$(CONFIG_PLAT_MAPPI) += setup_mappi.o io_mappi.o
+obj-$(CONFIG_PLAT_MAPPI2) += setup_mappi2.o io_mappi2.o
+obj-$(CONFIG_PLAT_USRV) += setup_usrv.o io_usrv.o
+obj-$(CONFIG_PLAT_M32700UT) += setup_m32700ut.o io_m32700ut.o
+obj-$(CONFIG_PLAT_OPSPUT) += setup_opsput.o io_opsput.o
+obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_PLAT_OAKS32R) += setup_oaks32r.o io_oaks32r.o
+
+EXTRA_AFLAGS := -traditional
+
diff --git a/arch/m32r/kernel/align.c b/arch/m32r/kernel/align.c
new file mode 100644
index 00000000000..48ec2971423
--- /dev/null
+++ b/arch/m32r/kernel/align.c
@@ -0,0 +1,585 @@
+/*
+ * align.c - address exception handler for M32R
+ *
+ * Copyright (c) 2003 Hitoshi Yamamoto
+ */
+
+#include <linux/config.h>
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+
+static int get_reg(struct pt_regs *regs, int nr)
+{
+ int val;
+
+ if (nr < 4)
+ val = *(unsigned long *)(&regs->r0 + nr);
+ else if (nr < 7)
+ val = *(unsigned long *)(&regs->r4 + (nr - 4));
+ else if (nr < 13)
+ val = *(unsigned long *)(&regs->r7 + (nr - 7));
+ else
+ val = *(unsigned long *)(&regs->fp + (nr - 13));
+
+ return val;
+}
+
+static void set_reg(struct pt_regs *regs, int nr, int val)
+{
+ if (nr < 4)
+ *(unsigned long *)(&regs->r0 + nr) = val;
+ else if (nr < 7)
+ *(unsigned long *)(&regs->r4 + (nr - 4)) = val;
+ else if (nr < 13)
+ *(unsigned long *)(&regs->r7 + (nr - 7)) = val;
+ else
+ *(unsigned long *)(&regs->fp + (nr - 13)) = val;
+}
+
+#define REG1(insn) (((insn) & 0x0f00) >> 8)
+#define REG2(insn) ((insn) & 0x000f)
+#define PSW_BC 0x100
+
+/* O- instruction */
+#define ISA_LD1 0x20c0 /* ld Rdest, @Rsrc */
+#define ISA_LD2 0x20e0 /* ld Rdest, @Rsrc+ */
+#define ISA_LDH 0x20a0 /* ldh Rdest, @Rsrc */
+#define ISA_LDUH 0x20b0 /* lduh Rdest, @Rsrc */
+#define ISA_ST1 0x2040 /* st Rsrc1, @Rsrc2 */
+#define ISA_ST2 0x2060 /* st Rsrc1, @+Rsrc2 */
+#define ISA_ST3 0x2070 /* st Rsrc1, @-Rsrc2 */
+#define ISA_STH1 0x2020 /* sth Rsrc1, @Rsrc2 */
+#define ISA_STH2 0x2030 /* sth Rsrc1, @Rsrc2+ */
+
+#ifdef CONFIG_ISA_DUAL_ISSUE
+
+/* OS instruction */
+#define ISA_ADD 0x00a0 /* add Rdest, Rsrc */
+#define ISA_ADDI 0x4000 /* addi Rdest, #imm8 */
+#define ISA_ADDX 0x0090 /* addx Rdest, Rsrc */
+#define ISA_AND 0x00c0 /* and Rdest, Rsrc */
+#define ISA_CMP 0x0040 /* cmp Rsrc1, Rsrc2 */
+#define ISA_CMPEQ 0x0060 /* cmpeq Rsrc1, Rsrc2 */
+#define ISA_CMPU 0x0050 /* cmpu Rsrc1, Rsrc2 */
+#define ISA_CMPZ 0x0070 /* cmpz Rsrc */
+#define ISA_LDI 0x6000 /* ldi Rdest, #imm8 */
+#define ISA_MV 0x1080 /* mv Rdest, Rsrc */
+#define ISA_NEG 0x0030 /* neg Rdest, Rsrc */
+#define ISA_NOP 0x7000 /* nop */
+#define ISA_NOT 0x00b0 /* not Rdest, Rsrc */
+#define ISA_OR 0x00e0 /* or Rdest, Rsrc */
+#define ISA_SUB 0x0020 /* sub Rdest, Rsrc */
+#define ISA_SUBX 0x0010 /* subx Rdest, Rsrc */
+#define ISA_XOR 0x00d0 /* xor Rdest, Rsrc */
+
+/* -S instruction */
+#define ISA_MUL 0x1060 /* mul Rdest, Rsrc */
+#define ISA_MULLO_A0 0x3010 /* mullo Rsrc1, Rsrc2, A0 */
+#define ISA_MULLO_A1 0x3090 /* mullo Rsrc1, Rsrc2, A1 */
+#define ISA_MVFACMI_A0 0x50f2 /* mvfacmi Rdest, A0 */
+#define ISA_MVFACMI_A1 0x50f6 /* mvfacmi Rdest, A1 */
+
+static int emu_addi(unsigned short insn, struct pt_regs *regs)
+{
+ char imm = (char)(insn & 0xff);
+ int dest = REG1(insn);
+ int val;
+
+ val = get_reg(regs, dest);
+ val += imm;
+ set_reg(regs, dest, val);
+
+ return 0;
+}
+
+static int emu_ldi(unsigned short insn, struct pt_regs *regs)
+{
+ char imm = (char)(insn & 0xff);
+
+ set_reg(regs, REG1(insn), (int)imm);
+
+ return 0;
+}
+
+static int emu_add(unsigned short insn, struct pt_regs *regs)
+{
+ int dest = REG1(insn);
+ int src = REG2(insn);
+ int val;
+
+ val = get_reg(regs, dest);
+ val += get_reg(regs, src);
+ set_reg(regs, dest, val);
+
+ return 0;
+}
+
+static int emu_addx(unsigned short insn, struct pt_regs *regs)
+{
+ int dest = REG1(insn);
+ unsigned int val, tmp;
+
+ val = regs->psw & PSW_BC ? 1 : 0;
+ tmp = get_reg(regs, dest);
+ val += tmp;
+ val += (unsigned int)get_reg(regs, REG2(insn));
+ set_reg(regs, dest, val);
+
+ /* C bit set */
+ if (val < tmp)
+ regs->psw |= PSW_BC;
+ else
+ regs->psw &= ~(PSW_BC);
+
+ return 0;
+}
+
+static int emu_and(unsigned short insn, struct pt_regs *regs)
+{
+ int dest = REG1(insn);
+ int val;
+
+ val = get_reg(regs, dest);
+ val &= get_reg(regs, REG2(insn));
+ set_reg(regs, dest, val);
+
+ return 0;
+}
+
+static int emu_cmp(unsigned short insn, struct pt_regs *regs)
+{
+ if (get_reg(regs, REG1(insn)) < get_reg(regs, REG2(insn)))
+ regs->psw |= PSW_BC;
+ else
+ regs->psw &= ~(PSW_BC);
+
+ return 0;
+}
+
+static int emu_cmpeq(unsigned short insn, struct pt_regs *regs)
+{
+ if (get_reg(regs, REG1(insn)) == get_reg(regs, REG2(insn)))
+ regs->psw |= PSW_BC;
+ else
+ regs->psw &= ~(PSW_BC);
+
+ return 0;
+}
+
+static int emu_cmpu(unsigned short insn, struct pt_regs *regs)
+{
+ if ((unsigned int)get_reg(regs, REG1(insn))
+ < (unsigned int)get_reg(regs, REG2(insn)))
+ regs->psw |= PSW_BC;
+ else
+ regs->psw &= ~(PSW_BC);
+
+ return 0;
+}
+
+static int emu_cmpz(unsigned short insn, struct pt_regs *regs)
+{
+ if (!get_reg(regs, REG2(insn)))
+ regs->psw |= PSW_BC;
+ else
+ regs->psw &= ~(PSW_BC);
+
+ return 0;
+}
+
+static int emu_mv(unsigned short insn, struct pt_regs *regs)
+{
+ int val;
+
+ val = get_reg(regs, REG2(insn));
+ set_reg(regs, REG1(insn), val);
+
+ return 0;
+}
+
+static int emu_neg(unsigned short insn, struct pt_regs *regs)
+{
+ int val;
+
+ val = get_reg(regs, REG2(insn));
+ set_reg(regs, REG1(insn), 0 - val);
+
+ return 0;
+}
+
+static int emu_not(unsigned short insn, struct pt_regs *regs)
+{
+ int val;
+
+ val = get_reg(regs, REG2(insn));
+ set_reg(regs, REG1(insn), ~val);
+
+ return 0;
+}
+
+static int emu_or(unsigned short insn, struct pt_regs *regs)
+{
+ int dest = REG1(insn);
+ int val;
+
+ val = get_reg(regs, dest);
+ val |= get_reg(regs, REG2(insn));
+ set_reg(regs, dest, val);
+
+ return 0;
+}
+
+static int emu_sub(unsigned short insn, struct pt_regs *regs)
+{
+ int dest = REG1(insn);
+ int val;
+
+ val = get_reg(regs, dest);
+ val -= get_reg(regs, REG2(insn));
+ set_reg(regs, dest, val);
+
+ return 0;
+}
+
+static int emu_subx(unsigned short insn, struct pt_regs *regs)
+{
+ int dest = REG1(insn);
+ unsigned int val, tmp;
+
+ val = tmp = get_reg(regs, dest);
+ val -= (unsigned int)get_reg(regs, REG2(insn));
+ val -= regs->psw & PSW_BC ? 1 : 0;
+ set_reg(regs, dest, val);
+
+ /* C bit set */
+ if (val > tmp)
+ regs->psw |= PSW_BC;
+ else
+ regs->psw &= ~(PSW_BC);
+
+ return 0;
+}
+
+static int emu_xor(unsigned short insn, struct pt_regs *regs)
+{
+ int dest = REG1(insn);
+ unsigned int val;
+
+ val = (unsigned int)get_reg(regs, dest);
+ val ^= (unsigned int)get_reg(regs, REG2(insn));
+ set_reg(regs, dest, val);
+
+ return 0;
+}
+
+static int emu_mul(unsigned short insn, struct pt_regs *regs)
+{
+ int dest = REG1(insn);
+ int reg1, reg2;
+
+ reg1 = get_reg(regs, dest);
+ reg2 = get_reg(regs, REG2(insn));
+
+ __asm__ __volatile__ (
+ "mul %0, %1; \n\t"
+ : "+r" (reg1) : "r" (reg2)
+ );
+
+ set_reg(regs, dest, reg1);
+
+ return 0;
+}
+
+static int emu_mullo_a0(unsigned short insn, struct pt_regs *regs)
+{
+ int reg1, reg2;
+
+ reg1 = get_reg(regs, REG1(insn));
+ reg2 = get_reg(regs, REG2(insn));
+
+ __asm__ __volatile__ (
+ "mullo %0, %1, a0; \n\t"
+ "mvfachi %0, a0; \n\t"
+ "mvfaclo %1, a0; \n\t"
+ : "+r" (reg1), "+r" (reg2)
+ );
+
+ regs->acc0h = reg1;
+ regs->acc0l = reg2;
+
+ return 0;
+}
+
+static int emu_mullo_a1(unsigned short insn, struct pt_regs *regs)
+{
+ int reg1, reg2;
+
+ reg1 = get_reg(regs, REG1(insn));
+ reg2 = get_reg(regs, REG2(insn));
+
+ __asm__ __volatile__ (
+ "mullo %0, %1, a0; \n\t"
+ "mvfachi %0, a0; \n\t"
+ "mvfaclo %1, a0; \n\t"
+ : "+r" (reg1), "+r" (reg2)
+ );
+
+ regs->acc1h = reg1;
+ regs->acc1l = reg2;
+
+ return 0;
+}
+
+static int emu_mvfacmi_a0(unsigned short insn, struct pt_regs *regs)
+{
+ unsigned long val;
+
+ val = (regs->acc0h << 16) | (regs->acc0l >> 16);
+ set_reg(regs, REG1(insn), (int)val);
+
+ return 0;
+}
+
+static int emu_mvfacmi_a1(unsigned short insn, struct pt_regs *regs)
+{
+ unsigned long val;
+
+ val = (regs->acc1h << 16) | (regs->acc1l >> 16);
+ set_reg(regs, REG1(insn), (int)val);
+
+ return 0;
+}
+
+static int emu_m32r2(unsigned short insn, struct pt_regs *regs)
+{
+ int res = -1;
+
+ if ((insn & 0x7fff) == ISA_NOP) /* nop */
+ return 0;
+
+ switch(insn & 0x7000) {
+ case ISA_ADDI: /* addi Rdest, #imm8 */
+ res = emu_addi(insn, regs);
+ break;
+ case ISA_LDI: /* ldi Rdest, #imm8 */
+ res = emu_ldi(insn, regs);
+ break;
+ default:
+ break;
+ }
+
+ if (!res)
+ return 0;
+
+ switch(insn & 0x70f0) {
+ case ISA_ADD: /* add Rdest, Rsrc */
+ res = emu_add(insn, regs);
+ break;
+ case ISA_ADDX: /* addx Rdest, Rsrc */
+ res = emu_addx(insn, regs);
+ break;
+ case ISA_AND: /* and Rdest, Rsrc */
+ res = emu_and(insn, regs);
+ break;
+ case ISA_CMP: /* cmp Rsrc1, Rsrc2 */
+ res = emu_cmp(insn, regs);
+ break;
+ case ISA_CMPEQ: /* cmpeq Rsrc1, Rsrc2 */
+ res = emu_cmpeq(insn, regs);
+ break;
+ case ISA_CMPU: /* cmpu Rsrc1, Rsrc2 */
+ res = emu_cmpu(insn, regs);
+ break;
+ case ISA_CMPZ: /* cmpz Rsrc */
+ res = emu_cmpz(insn, regs);
+ break;
+ case ISA_MV: /* mv Rdest, Rsrc */
+ res = emu_mv(insn, regs);
+ break;
+ case ISA_NEG: /* neg Rdest, Rsrc */
+ res = emu_neg(insn, regs);
+ break;
+ case ISA_NOT: /* not Rdest, Rsrc */
+ res = emu_not(insn, regs);
+ break;
+ case ISA_OR: /* or Rdest, Rsrc */
+ res = emu_or(insn, regs);
+ break;
+ case ISA_SUB: /* sub Rdest, Rsrc */
+ res = emu_sub(insn, regs);
+ break;
+ case ISA_SUBX: /* subx Rdest, Rsrc */
+ res = emu_subx(insn, regs);
+ break;
+ case ISA_XOR: /* xor Rdest, Rsrc */
+ res = emu_xor(insn, regs);
+ break;
+ case ISA_MUL: /* mul Rdest, Rsrc */
+ res = emu_mul(insn, regs);
+ break;
+ case ISA_MULLO_A0: /* mullo Rsrc1, Rsrc2 */
+ res = emu_mullo_a0(insn, regs);
+ break;
+ case ISA_MULLO_A1: /* mullo Rsrc1, Rsrc2 */
+ res = emu_mullo_a1(insn, regs);
+ break;
+ default:
+ break;
+ }
+
+ if (!res)
+ return 0;
+
+ switch(insn & 0x70ff) {
+ case ISA_MVFACMI_A0: /* mvfacmi Rdest */
+ res = emu_mvfacmi_a0(insn, regs);
+ break;
+ case ISA_MVFACMI_A1: /* mvfacmi Rdest */
+ res = emu_mvfacmi_a1(insn, regs);
+ break;
+ default:
+ break;
+ }
+
+ return res;
+}
+
+#endif /* CONFIG_ISA_DUAL_ISSUE */
+
+/*
+ * ld : ?010 dest 1100 src
+ * 0010 dest 1110 src : ld Rdest, @Rsrc+
+ * ldh : ?010 dest 1010 src
+ * lduh : ?010 dest 1011 src
+ * st : ?010 src1 0100 src2
+ * 0010 src1 0110 src2 : st Rsrc1, @+Rsrc2
+ * 0010 src1 0111 src2 : st Rsrc1, @-Rsrc2
+ * sth : ?010 src1 0010 src2
+ */
+
+static int insn_check(unsigned long insn, struct pt_regs *regs,
+ unsigned char **ucp)
+{
+ int res = 0;
+
+ /*
+ * 32bit insn
+ * ld Rdest, @(disp16, Rsrc)
+ * st Rdest, @(disp16, Rsrc)
+ */
+ if (insn & 0x80000000) { /* 32bit insn */
+ *ucp += (short)(insn & 0x0000ffff);
+ regs->bpc += 4;
+ } else { /* 16bit insn */
+#ifdef CONFIG_ISA_DUAL_ISSUE
+ /* parallel exec check */
+ if (!(regs->bpc & 0x2) && insn & 0x8000) {
+ res = emu_m32r2((unsigned short)insn, regs);
+ regs->bpc += 4;
+ } else
+#endif /* CONFIG_ISA_DUAL_ISSUE */
+ regs->bpc += 2;
+ }
+
+ return res;
+}
+
+static int emu_ld(unsigned long insn32, struct pt_regs *regs)
+{
+ unsigned char *ucp;
+ unsigned long val;
+ unsigned short insn16;
+ int size, src;
+
+ insn16 = insn32 >> 16;
+ src = REG2(insn16);
+ ucp = (unsigned char *)get_reg(regs, src);
+
+ if (insn_check(insn32, regs, &ucp))
+ return -1;
+
+ size = insn16 & 0x0040 ? 4 : 2;
+ if (copy_from_user(&val, ucp, size))
+ return -1;
+
+ if (size == 2)
+ val >>= 16;
+
+ /* ldh sign check */
+ if ((insn16 & 0x00f0) == 0x00a0 && (val & 0x8000))
+ val |= 0xffff0000;
+
+ set_reg(regs, REG1(insn16), val);
+
+ /* ld increment check */
+ if ((insn16 & 0xf0f0) == ISA_LD2) /* ld Rdest, @Rsrc+ */
+ set_reg(regs, src, (unsigned long)(ucp + 4));
+
+ return 0;
+}
+
+static int emu_st(unsigned long insn32, struct pt_regs *regs)
+{
+ unsigned char *ucp;
+ unsigned long val;
+ unsigned short insn16;
+ int size, src2;
+
+ insn16 = insn32 >> 16;
+ src2 = REG2(insn16);
+
+ ucp = (unsigned char *)get_reg(regs, src2);
+
+ if (insn_check(insn32, regs, &ucp))
+ return -1;
+
+ size = insn16 & 0x0040 ? 4 : 2;
+ val = get_reg(regs, REG1(insn16));
+ if (size == 2)
+ val <<= 16;
+
+ /* st inc/dec check */
+ if ((insn16 & 0xf0e0) == 0x2060) {
+ if (insn16 & 0x0010)
+ ucp -= 4;
+ else
+ ucp += 4;
+
+ set_reg(regs, src2, (unsigned long)ucp);
+ }
+
+ if (copy_to_user(ucp, &val, size))
+ return -1;
+
+ /* sth inc check */
+ if ((insn16 & 0xf0f0) == ISA_STH2) {
+ ucp += 2;
+ set_reg(regs, src2, (unsigned long)ucp);
+ }
+
+ return 0;
+}
+
+int handle_unaligned_access(unsigned long insn32, struct pt_regs *regs)
+{
+ unsigned short insn16;
+ int res;
+
+ insn16 = insn32 >> 16;
+
+ /* ld or st check */
+ if ((insn16 & 0x7000) != 0x2000)
+ return -1;
+
+ /* insn alignment check */
+ if ((insn16 & 0x8000) && (regs->bpc & 3))
+ return -1;
+
+ if (insn16 & 0x0080) /* ld */
+ res = emu_ld(insn32, regs);
+ else /* st */
+ res = emu_st(insn32, regs);
+
+ return res;
+}
+
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
new file mode 100644
index 00000000000..dddbf6b5ed2
--- /dev/null
+++ b/arch/m32r/kernel/entry.S
@@ -0,0 +1,1000 @@
+/*
+ * linux/arch/m32r/kernel/entry.S
+ *
+ * Copyright (c) 2001, 2002 Hirokazu Takata, Hitoshi Yamamoto, H. Kondo
+ * Copyright (c) 2003 Hitoshi Yamamoto
+ * Copyright (c) 2004 Hirokazu Takata <takata at linux-m32r.org>
+ *
+ * Taken from i386 version.
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/*
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ * Stack layout in 'ret_from_system_call':
+ * ptrace needs to have all regs on the stack.
+ * if the order here is changed, it needs to be
+ * updated in fork.c:copy_process, signal.c:do_signal,
+ * ptrace.c and ptrace.h
+ *
+ * M32Rx/M32R2 M32R
+ * @(sp) - r4 ditto
+ * @(0x04,sp) - r5 ditto
+ * @(0x08,sp) - r6 ditto
+ * @(0x0c,sp) - *pt_regs ditto
+ * @(0x10,sp) - r0 ditto
+ * @(0x14,sp) - r1 ditto
+ * @(0x18,sp) - r2 ditto
+ * @(0x1c,sp) - r3 ditto
+ * @(0x20,sp) - r7 ditto
+ * @(0x24,sp) - r8 ditto
+ * @(0x28,sp) - r9 ditto
+ * @(0x2c,sp) - r10 ditto
+ * @(0x30,sp) - r11 ditto
+ * @(0x34,sp) - r12 ditto
+ * @(0x38,sp) - syscall_nr ditto
+ * @(0x3c,sp) - acc0h @(0x3c,sp) - acch
+ * @(0x40,sp) - acc0l @(0x40,sp) - accl
+ * @(0x44,sp) - acc1h @(0x44,sp) - psw
+ * @(0x48,sp) - acc1l @(0x48,sp) - bpc
+ * @(0x4c,sp) - psw @(0x4c,sp) - bbpsw
+ * @(0x50,sp) - bpc @(0x50,sp) - bbpc
+ * @(0x54,sp) - bbpsw @(0x54,sp) - spu (cr3)
+ * @(0x58,sp) - bbpc @(0x58,sp) - fp (r13)
+ * @(0x5c,sp) - spu (cr3) @(0x5c,sp) - lr (r14)
+ * @(0x60,sp) - fp (r13) @(0x60,sp) - spi (cr12)
+ * @(0x64,sp) - lr (r14) @(0x64,sp) - orig_r0
+ * @(0x68,sp) - spi (cr2)
+ * @(0x6c,sp) - orig_r0
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <asm/irq.h>
+#include <asm/unistd.h>
+#include <asm/assembler.h>
+#include <asm/thread_info.h>
+#include <asm/errno.h>
+#include <asm/segment.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/m32r.h>
+#include <asm/mmu_context.h>
+
+#if !defined(CONFIG_MMU)
+#define sys_madvise sys_ni_syscall
+#define sys_readahead sys_ni_syscall
+#define sys_mprotect sys_ni_syscall
+#define sys_msync sys_ni_syscall
+#define sys_mlock sys_ni_syscall
+#define sys_munlock sys_ni_syscall
+#define sys_mlockall sys_ni_syscall
+#define sys_munlockall sys_ni_syscall
+#define sys_mremap sys_ni_syscall
+#define sys_mincore sys_ni_syscall
+#define sys_remap_file_pages sys_ni_syscall
+#endif /* CONFIG_MMU */
+
+#define R4(reg) @reg
+#define R5(reg) @(0x04,reg)
+#define R6(reg) @(0x08,reg)
+#define PTREGS(reg) @(0x0C,reg)
+#define R0(reg) @(0x10,reg)
+#define R1(reg) @(0x14,reg)
+#define R2(reg) @(0x18,reg)
+#define R3(reg) @(0x1C,reg)
+#define R7(reg) @(0x20,reg)
+#define R8(reg) @(0x24,reg)
+#define R9(reg) @(0x28,reg)
+#define R10(reg) @(0x2C,reg)
+#define R11(reg) @(0x30,reg)
+#define R12(reg) @(0x34,reg)
+#define SYSCALL_NR(reg) @(0x38,reg)
+#if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2)
+#define ACC0H(reg) @(0x3C,reg)
+#define ACC0L(reg) @(0x40,reg)
+#define ACC1H(reg) @(0x44,reg)
+#define ACC1L(reg) @(0x48,reg)
+#define PSW(reg) @(0x4C,reg)
+#define BPC(reg) @(0x50,reg)
+#define BBPSW(reg) @(0x54,reg)
+#define BBPC(reg) @(0x58,reg)
+#define SPU(reg) @(0x5C,reg)
+#define FP(reg) @(0x60,reg) /* FP = R13 */
+#define LR(reg) @(0x64,reg)
+#define SP(reg) @(0x68,reg)
+#define ORIG_R0(reg) @(0x6C,reg)
+#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
+#define ACCH(reg) @(0x3C,reg)
+#define ACCL(reg) @(0x40,reg)
+#define PSW(reg) @(0x44,reg)
+#define BPC(reg) @(0x48,reg)
+#define BBPSW(reg) @(0x4C,reg)
+#define BBPC(reg) @(0x50,reg)
+#define SPU(reg) @(0x54,reg)
+#define FP(reg) @(0x58,reg) /* FP = R13 */
+#define LR(reg) @(0x5C,reg)
+#define SP(reg) @(0x60,reg)
+#define ORIG_R0(reg) @(0x64,reg)
+#else
+#error unknown isa configuration
+#endif
+
+CF_MASK = 0x00000001
+TF_MASK = 0x00000100
+IF_MASK = 0x00000200
+DF_MASK = 0x00000400
+NT_MASK = 0x00004000
+VM_MASK = 0x00020000
+
+#ifdef CONFIG_PREEMPT
+#define preempt_stop(x) CLI(x)
+#else
+#define preempt_stop(x)
+#define resume_kernel restore_all
+#endif
+
+ENTRY(ret_from_fork)
+ ld r0, @sp+
+ bl schedule_tail
+ GET_THREAD_INFO(r8)
+ bra syscall_exit
+
+/*
+ * Return to user mode is not as complex as all this looks,
+ * but we want the default path for a system call return to
+ * go as quickly as possible which is why some of this is
+ * less clear than it otherwise should be.
+ */
+
+ ; userspace resumption stub bypassing syscall exit tracing
+ ALIGN
+ret_from_exception:
+ preempt_stop(r4)
+ret_from_intr:
+ ld r4, PSW(sp)
+#ifdef CONFIG_ISA_M32R2
+ and3 r4, r4, #0x8800 ; check BSM and BPM bits
+#else
+ and3 r4, r4, #0x8000 ; check BSM bit
+#endif
+ beqz r4, resume_kernel
+ENTRY(resume_userspace)
+ CLI(r4) ; make sure we don't miss an interrupt
+ ; setting need_resched or sigpending
+ ; between sampling and the iret
+ GET_THREAD_INFO(r8)
+ ld r9, @(TI_FLAGS, r8)
+ and3 r4, r9, #_TIF_WORK_MASK ; is there any work to be done on
+ ; int/exception return?
+ bnez r4, work_pending
+ bra restore_all
+
+#ifdef CONFIG_PREEMPT
+ENTRY(resume_kernel)
+ GET_THREAD_INFO(r8)
+ ld r9, @(TI_PRE_COUNT, r8) ; non-zero preempt_count ?
+ bnez r9, restore_all
+need_resched:
+ ld r9, @(TI_FLAGS, r8) ; need_resched set ?
+ and3 r4, r9, #_TIF_NEED_RESCHED
+ beqz r4, restore_all
+ ld r4, PSW(sp) ; interrupts off (exception path) ?
+ and3 r4, r4, #0x4000
+ beqz r4, restore_all
+ LDIMM (r4, PREEMPT_ACTIVE)
+ st r4, @(TI_PRE_COUNT, r8)
+ STI(r4)
+ bl schedule
+ ldi r4, #0
+ st r4, @(TI_PRE_COUNT, r8)
+ CLI(r4)
+ bra need_resched
+#endif
+
+ ; system call handler stub
+ENTRY(system_call)
+ SWITCH_TO_KERNEL_STACK
+ SAVE_ALL
+ STI(r4) ; Enable interrupt
+ st sp, PTREGS(sp) ; implicit pt_regs parameter
+ cmpui r7, #NR_syscalls
+ bnc syscall_badsys
+ st r7, SYSCALL_NR(sp) ; syscall_nr
+ ; system call tracing in operation
+ GET_THREAD_INFO(r8)
+ ld r9, @(TI_FLAGS, r8)
+ and3 r4, r9, #_TIF_SYSCALL_TRACE
+ bnez r4, syscall_trace_entry
+syscall_call:
+ slli r7, #2 ; table jump for the system call
+ LDIMM (r4, sys_call_table)
+ add r7, r4
+ ld r7, @r7
+ jl r7 ; execute system call
+ st r0, R0(sp) ; save the return value
+syscall_exit:
+ CLI(r4) ; make sure we don't miss an interrupt
+ ; setting need_resched or sigpending
+ ; between sampling and the iret
+ ld r9, @(TI_FLAGS, r8)
+ and3 r4, r9, #_TIF_ALLWORK_MASK ; current->work
+ bnez r4, syscall_exit_work
+restore_all:
+ RESTORE_ALL
+
+ # perform work that needs to be done immediately before resumption
+ # r9 : frags
+ ALIGN
+work_pending:
+ and3 r4, r9, #_TIF_NEED_RESCHED
+ beqz r4, work_notifysig
+work_resched:
+ bl schedule
+ CLI(r4) ; make sure we don't miss an interrupt
+ ; setting need_resched or sigpending
+ ; between sampling and the iret
+ ld r9, @(TI_FLAGS, r8)
+ and3 r4, r9, #_TIF_WORK_MASK ; is there any work to be done other
+ ; than syscall tracing?
+ beqz r4, restore_all
+ and3 r4, r4, #_TIF_NEED_RESCHED
+ bnez r4, work_resched
+
+work_notifysig: ; deal with pending signals and
+ ; notify-resume requests
+ mv r0, sp ; arg1 : struct pt_regs *regs
+ ldi r1, #0 ; arg2 : sigset_t *oldset
+ mv r2, r9 ; arg3 : __u32 thread_info_flags
+ bl do_notify_resume
+ bra restore_all
+
+ ; perform syscall exit tracing
+ ALIGN
+syscall_trace_entry:
+ ldi r4, #-ENOSYS
+ st r4, R0(sp)
+ bl do_syscall_trace
+ ld r0, ORIG_R0(sp)
+ ld r1, R1(sp)
+ ld r2, R2(sp)
+ ld r3, R3(sp)
+ ld r4, R4(sp)
+ ld r5, R5(sp)
+ ld r6, R6(sp)
+ ld r7, SYSCALL_NR(sp)
+ cmpui r7, #NR_syscalls
+ bc syscall_call
+ bra syscall_exit
+
+ ; perform syscall exit tracing
+ ALIGN
+syscall_exit_work:
+ ld r9, @(TI_FLAGS, r8)
+ and3 r4, r9, #_TIF_SYSCALL_TRACE
+ beqz r4, work_pending
+ STI(r4) ; could let do_syscall_trace() call
+ ; schedule() instead
+ bl do_syscall_trace
+ bra resume_userspace
+
+ ALIGN
+syscall_fault:
+ SAVE_ALL
+ GET_THREAD_INFO(r8)
+ ldi r4, #-EFAULT
+ st r4, R0(sp)
+ bra resume_userspace
+
+ ALIGN
+syscall_badsys:
+ ldi r4, #-ENOSYS
+ st r4, R0(sp)
+ bra resume_userspace
+
+ .global eit_vector
+
+ .equ ei_vec_table, eit_vector + 0x0200
+
+/*
+ * EI handler routine
+ */
+ENTRY(ei_handler)
+#if defined(CONFIG_CHIP_M32700)
+ SWITCH_TO_KERNEL_STACK
+ ; WORKAROUND: force to clear SM bit and use the kernel stack (SPI).
+#endif
+ SAVE_ALL
+ mv r1, sp ; arg1(regs)
+#if defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2) \
+ || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \
+ || defined(CONFIG_CHIP_OPSP)
+
+; GET_ICU_STATUS;
+ seth r0, #shigh(M32R_ICU_ISTS_ADDR)
+ ld r0, @(low(M32R_ICU_ISTS_ADDR),r0)
+ st r0, @-sp
+#if defined(CONFIG_SMP)
+ /*
+ * If IRQ == 0 --> Nothing to do, Not write IMASK
+ * If IRQ == IPI --> Do IPI handler, Not write IMASK
+ * If IRQ != 0, IPI --> Do do_IRQ(), Write IMASK
+ */
+ slli r0, #4
+ srli r0, #24 ; r0(irq_num<<2)
+ ;; IRQ exist check
+#if defined(CONFIG_CHIP_M32700)
+ /* WORKAROUND: IMASK bug M32700-TS1, TS2 chip. */
+ beqz r0, 3f ; if (!irq_num) goto exit
+#else
+ beqz r0, 1f ; if (!irq_num) goto exit
+#endif /* WORKAROUND */
+ ;; IPI check
+ cmpi r0, #(M32R_IRQ_IPI0<<2) ; ISN < IPI0 check
+ bc 2f
+ cmpi r0, #((M32R_IRQ_IPI7+1)<<2) ; ISN > IPI7 check
+ bnc 2f
+ LDIMM (r2, ei_vec_table)
+ add r2, r0
+ ld r2, @r2
+ beqz r2, 1f ; if (no IPI handler) goto exit
+ mv r0, r1 ; arg0(regs)
+ jl r2
+ .fillinsn
+1:
+ addi sp, #4
+ bra ret_to_intr
+#if defined(CONFIG_CHIP_M32700)
+ /* WORKAROUND: IMASK bug M32700-TS1, TS2 chip. */
+ .fillinsn
+3:
+ ld24 r14, #0x00070000
+ seth r0, #shigh(M32R_ICU_IMASK_ADDR)
+ st r14, @(low(M32R_ICU_IMASK_ADDR), r0)
+ addi sp, #4
+ bra ret_to_intr
+#endif /* WORKAROUND */
+ ;; do_IRQ
+ .fillinsn
+2:
+ srli r0, #2
+#if defined(CONFIG_PLAT_USRV)
+ add3 r2, r0, #-(M32R_IRQ_INT1) ; INT1# interrupt
+ bnez r2, 9f
+ ; read ICU status register of PLD
+ seth r0, #high(PLD_ICUISTS)
+ or3 r0, r0, #low(PLD_ICUISTS)
+ lduh r0, @r0
+ slli r0, #21
+ srli r0, #27 ; ISN
+ addi r0, #(M32700UT_PLD_IRQ_BASE)
+ .fillinsn
+9:
+#elif defined(CONFIG_PLAT_M32700UT)
+ add3 r2, r0, #-(M32R_IRQ_INT1) ; INT1# interrupt
+ bnez r2, check_int0
+ ; read ICU status register of PLD
+ seth r0, #high(PLD_ICUISTS)
+ or3 r0, r0, #low(PLD_ICUISTS)
+ lduh r0, @r0
+ slli r0, #21
+ srli r0, #27 ; ISN
+ addi r0, #(M32700UT_