aboutsummaryrefslogtreecommitdiff
path: root/arch/m32r/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/m32r/kernel
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/m32r/kernel')
-rw-r--r--arch/m32r/kernel/Makefile20
-rw-r--r--arch/m32r/kernel/align.c585
-rw-r--r--arch/m32r/kernel/entry.S1000
-rw-r--r--arch/m32r/kernel/head.S287
-rw-r--r--arch/m32r/kernel/init_task.c41
-rw-r--r--arch/m32r/kernel/io_m32700ut.c472
-rw-r--r--arch/m32r/kernel/io_mappi.c384
-rw-r--r--arch/m32r/kernel/io_mappi2.c461
-rw-r--r--arch/m32r/kernel/io_oaks32r.c251
-rw-r--r--arch/m32r/kernel/io_opsput.c390
-rw-r--r--arch/m32r/kernel/io_usrv.c249
-rw-r--r--arch/m32r/kernel/irq.c91
-rw-r--r--arch/m32r/kernel/m32r_ksyms.c140
-rw-r--r--arch/m32r/kernel/module.c259
-rw-r--r--arch/m32r/kernel/process.c359
-rw-r--r--arch/m32r/kernel/ptrace.c829
-rw-r--r--arch/m32r/kernel/semaphore.c186
-rw-r--r--arch/m32r/kernel/setup.c420
-rw-r--r--arch/m32r/kernel/setup_m32700ut.c478
-rw-r--r--arch/m32r/kernel/setup_mappi.c160
-rw-r--r--arch/m32r/kernel/setup_mappi2.c212
-rw-r--r--arch/m32r/kernel/setup_oaks32r.c143
-rw-r--r--arch/m32r/kernel/setup_opsput.c482
-rw-r--r--arch/m32r/kernel/setup_usrv.c256
-rw-r--r--arch/m32r/kernel/signal.c438
-rw-r--r--arch/m32r/kernel/smp.c965
-rw-r--r--arch/m32r/kernel/smpboot.c630
-rw-r--r--arch/m32r/kernel/sys_m32r.c217
-rw-r--r--arch/m32r/kernel/time.c318
-rw-r--r--arch/m32r/kernel/traps.c332
-rw-r--r--arch/m32r/kernel/vmlinux.lds.S143
31 files changed, 11198 insertions, 0 deletions
diff --git a/arch/m32r/kernel/Makefile b/arch/m32r/kernel/Makefile
new file mode 100644
index 00000000000..cfd690bf6d8
--- /dev/null
+++ b/arch/m32r/kernel/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the Linux/M32R kernel.
+#
+
+extra-y := head.o init_task.o vmlinux.lds
+
+obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \
+ m32r_ksyms.o sys_m32r.o semaphore.o signal.o ptrace.o
+
+obj-$(CONFIG_SMP) += smp.o smpboot.o
+obj-$(CONFIG_PLAT_MAPPI) += setup_mappi.o io_mappi.o
+obj-$(CONFIG_PLAT_MAPPI2) += setup_mappi2.o io_mappi2.o
+obj-$(CONFIG_PLAT_USRV) += setup_usrv.o io_usrv.o
+obj-$(CONFIG_PLAT_M32700UT) += setup_m32700ut.o io_m32700ut.o
+obj-$(CONFIG_PLAT_OPSPUT) += setup_opsput.o io_opsput.o
+obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_PLAT_OAKS32R) += setup_oaks32r.o io_oaks32r.o
+
+EXTRA_AFLAGS := -traditional
+
diff --git a/arch/m32r/kernel/align.c b/arch/m32r/kernel/align.c
new file mode 100644
index 00000000000..48ec2971423
--- /dev/null
+++ b/arch/m32r/kernel/align.c
@@ -0,0 +1,585 @@
+/*
+ * align.c - address exception handler for M32R
+ *
+ * Copyright (c) 2003 Hitoshi Yamamoto
+ */
+
+#include <linux/config.h>
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+
+static int get_reg(struct pt_regs *regs, int nr)
+{
+ int val;
+
+ if (nr < 4)
+ val = *(unsigned long *)(&regs->r0 + nr);
+ else if (nr < 7)
+ val = *(unsigned long *)(&regs->r4 + (nr - 4));
+ else if (nr < 13)
+ val = *(unsigned long *)(&regs->r7 + (nr - 7));
+ else
+ val = *(unsigned long *)(&regs->fp + (nr - 13));
+
+ return val;
+}
+
+static void set_reg(struct pt_regs *regs, int nr, int val)
+{
+ if (nr < 4)
+ *(unsigned long *)(&regs->r0 + nr) = val;
+ else if (nr < 7)
+ *(unsigned long *)(&regs->r4 + (nr - 4)) = val;
+ else if (nr < 13)
+ *(unsigned long *)(&regs->r7 + (nr - 7)) = val;
+ else
+ *(unsigned long *)(&regs->fp + (nr - 13)) = val;
+}
+
+#define REG1(insn) (((insn) & 0x0f00) >> 8)
+#define REG2(insn) ((insn) & 0x000f)
+#define PSW_BC 0x100
+
+/* O- instruction */
+#define ISA_LD1 0x20c0 /* ld Rdest, @Rsrc */
+#define ISA_LD2 0x20e0 /* ld Rdest, @Rsrc+ */
+#define ISA_LDH 0x20a0 /* ldh Rdest, @Rsrc */
+#define ISA_LDUH 0x20b0 /* lduh Rdest, @Rsrc */
+#define ISA_ST1 0x2040 /* st Rsrc1, @Rsrc2 */
+#define ISA_ST2 0x2060 /* st Rsrc1, @+Rsrc2 */
+#define ISA_ST3 0x2070 /* st Rsrc1, @-Rsrc2 */
+#define ISA_STH1 0x2020 /* sth Rsrc1, @Rsrc2 */
+#define ISA_STH2 0x2030 /* sth Rsrc1, @Rsrc2+ */
+
+#ifdef CONFIG_ISA_DUAL_ISSUE
+
+/* OS instruction */
+#define ISA_ADD 0x00a0 /* add Rdest, Rsrc */
+#define ISA_ADDI 0x4000 /* addi Rdest, #imm8 */
+#define ISA_ADDX 0x0090 /* addx Rdest, Rsrc */
+#define ISA_AND 0x00c0 /* and Rdest, Rsrc */
+#define ISA_CMP 0x0040 /* cmp Rsrc1, Rsrc2 */
+#define ISA_CMPEQ 0x0060 /* cmpeq Rsrc1, Rsrc2 */
+#define ISA_CMPU 0x0050 /* cmpu Rsrc1, Rsrc2 */
+#define ISA_CMPZ 0x0070 /* cmpz Rsrc */
+#define ISA_LDI 0x6000 /* ldi Rdest, #imm8 */
+#define ISA_MV 0x1080 /* mv Rdest, Rsrc */
+#define ISA_NEG 0x0030 /* neg Rdest, Rsrc */
+#define ISA_NOP 0x7000 /* nop */
+#define ISA_NOT 0x00b0 /* not Rdest, Rsrc */
+#define ISA_OR 0x00e0 /* or Rdest, Rsrc */
+#define ISA_SUB 0x0020 /* sub Rdest, Rsrc */
+#define ISA_SUBX 0x0010 /* subx Rdest, Rsrc */
+#define ISA_XOR 0x00d0 /* xor Rdest, Rsrc */
+
+/* -S instruction */
+#define ISA_MUL 0x1060 /* mul Rdest, Rsrc */
+#define ISA_MULLO_A0 0x3010 /* mullo Rsrc1, Rsrc2, A0 */
+#define ISA_MULLO_A1 0x3090 /* mullo Rsrc1, Rsrc2, A1 */
+#define ISA_MVFACMI_A0 0x50f2 /* mvfacmi Rdest, A0 */
+#define ISA_MVFACMI_A1 0x50f6 /* mvfacmi Rdest, A1 */
+
+static int emu_addi(unsigned short insn, struct pt_regs *regs)
+{
+ char imm = (char)(insn & 0xff);
+ int dest = REG1(insn);
+ int val;
+
+ val = get_reg(regs, dest);
+ val += imm;
+ set_reg(regs, dest, val);
+
+ return 0;
+}
+
+static int emu_ldi(unsigned short insn, struct pt_regs *regs)
+{
+ char imm = (char)(insn & 0xff);
+
+ set_reg(regs, REG1(insn), (int)imm);
+
+ return 0;
+}
+
+static int emu_add(unsigned short insn, struct pt_regs *regs)
+{
+ int dest = REG1(insn);
+ int src = REG2(insn);
+ int val;
+
+ val = get_reg(regs, dest);
+ val += get_reg(regs, src);
+ set_reg(regs, dest, val);
+
+ return 0;
+}
+
+static int emu_addx(unsigned short insn, struct pt_regs *regs)
+{
+ int dest = REG1(insn);
+ unsigned int val, tmp;
+
+ val = regs->psw & PSW_BC ? 1 : 0;
+ tmp = get_reg(regs, dest);
+ val += tmp;
+ val += (unsigned int)get_reg(regs, REG2(insn));
+ set_reg(regs, dest, val);
+
+ /* C bit set */
+ if (val < tmp)
+ regs->psw |= PSW_BC;
+ else
+ regs->psw &= ~(PSW_BC);
+
+ return 0;
+}
+
+static int emu_and(unsigned short insn, struct pt_regs *regs)
+{
+ int dest = REG1(insn);
+ int val;
+
+ val = get_reg(regs, dest);
+ val &= get_reg(regs, REG2(insn));
+ set_reg(regs, dest, val);
+
+ return 0;
+}
+
+static int emu_cmp(unsigned short insn, struct pt_regs *regs)
+{
+ if (get_reg(regs, REG1(insn)) < get_reg(regs, REG2(insn)))
+ regs->psw |= PSW_BC;
+ else
+ regs->psw &= ~(PSW_BC);
+
+ return 0;
+}
+
+static int emu_cmpeq(unsigned short insn, struct pt_regs *regs)
+{
+ if (get_reg(regs, REG1(insn)) == get_reg(regs, REG2(insn)))
+ regs->psw |= PSW_BC;
+ else
+ regs->psw &= ~(PSW_BC);
+
+ return 0;
+}
+
+static int emu_cmpu(unsigned short insn, struct pt_regs *regs)
+{
+ if ((unsigned int)get_reg(regs, REG1(insn))
+ < (unsigned int)get_reg(regs, REG2(insn)))
+ regs->psw |= PSW_BC;
+ else
+ regs->psw &= ~(PSW_BC);
+
+ return 0;
+}
+
+static int emu_cmpz(unsigned short insn, struct pt_regs *regs)
+{
+ if (!get_reg(regs, REG2(insn)))
+ regs->psw |= PSW_BC;
+ else
+ regs->psw &= ~(PSW_BC);
+
+ return 0;
+}
+
+static int emu_mv(unsigned short insn, struct pt_regs *regs)
+{
+ int val;
+
+ val = get_reg(regs, REG2(insn));
+ set_reg(regs, REG1(insn), val);
+
+ return 0;
+}
+
+static int emu_neg(unsigned short insn, struct pt_regs *regs)
+{
+ int val;
+
+ val = get_reg(regs, REG2(insn));
+ set_reg(regs, REG1(insn), 0 - val);
+
+ return 0;
+}
+
+static int emu_not(unsigned short insn, struct pt_regs *regs)
+{
+ int val;
+
+ val = get_reg(regs, REG2(insn));
+ set_reg(regs, REG1(insn), ~val);
+
+ return 0;
+}
+
+static int emu_or(unsigned short insn, struct pt_regs *regs)
+{
+ int dest = REG1(insn);
+ int val;
+
+ val = get_reg(regs, dest);
+ val |= get_reg(regs, REG2(insn));
+ set_reg(regs, dest, val);
+
+ return 0;
+}
+
+static int emu_sub(unsigned short insn, struct pt_regs *regs)
+{
+ int dest = REG1(insn);
+ int val;
+
+ val = get_reg(regs, dest);
+ val -= get_reg(regs, REG2(insn));
+ set_reg(regs, dest, val);
+
+ return 0;
+}
+
+static int emu_subx(unsigned short insn, struct pt_regs *regs)
+{
+ int dest = REG1(insn);
+ unsigned int val, tmp;
+
+ val = tmp = get_reg(regs, dest);
+ val -= (unsigned int)get_reg(regs, REG2(insn));
+ val -= regs->psw & PSW_BC ? 1 : 0;
+ set_reg(regs, dest, val);
+
+ /* C bit set */
+ if (val > tmp)
+ regs->psw |= PSW_BC;
+ else
+ regs->psw &= ~(PSW_BC);
+
+ return 0;
+}
+
+static int emu_xor(unsigned short insn, struct pt_regs *regs)
+{
+ int dest = REG1(insn);
+ unsigned int val;
+
+ val = (unsigned int)get_reg(regs, dest);
+ val ^= (unsigned int)get_reg(regs, REG2(insn));
+ set_reg(regs, dest, val);
+
+ return 0;
+}
+
+static int emu_mul(unsigned short insn, struct pt_regs *regs)
+{
+ int dest = REG1(insn);
+ int reg1, reg2;
+
+ reg1 = get_reg(regs, dest);
+ reg2 = get_reg(regs, REG2(insn));
+
+ __asm__ __volatile__ (
+ "mul %0, %1; \n\t"
+ : "+r" (reg1) : "r" (reg2)
+ );
+
+ set_reg(regs, dest, reg1);
+
+ return 0;
+}
+
+static int emu_mullo_a0(unsigned short insn, struct pt_regs *regs)
+{
+ int reg1, reg2;
+
+ reg1 = get_reg(regs, REG1(insn));
+ reg2 = get_reg(regs, REG2(insn));
+
+ __asm__ __volatile__ (
+ "mullo %0, %1, a0; \n\t"
+ "mvfachi %0, a0; \n\t"
+ "mvfaclo %1, a0; \n\t"
+ : "+r" (reg1), "+r" (reg2)
+ );
+
+ regs->acc0h = reg1;
+ regs->acc0l = reg2;
+
+ return 0;
+}
+
+static int emu_mullo_a1(unsigned short insn, struct pt_regs *regs)
+{
+ int reg1, reg2;
+
+ reg1 = get_reg(regs, REG1(insn));
+ reg2 = get_reg(regs, REG2(insn));
+
+ __asm__ __volatile__ (
+ "mullo %0, %1, a0; \n\t"
+ "mvfachi %0, a0; \n\t"
+ "mvfaclo %1, a0; \n\t"
+ : "+r" (reg1), "+r" (reg2)
+ );
+
+ regs->acc1h = reg1;
+ regs->acc1l = reg2;
+
+ return 0;
+}
+
+static int emu_mvfacmi_a0(unsigned short insn, struct pt_regs *regs)
+{
+ unsigned long val;
+
+ val = (regs->acc0h << 16) | (regs->acc0l >> 16);
+ set_reg(regs, REG1(insn), (int)val);
+
+ return 0;
+}
+
+static int emu_mvfacmi_a1(unsigned short insn, struct pt_regs *regs)
+{
+ unsigned long val;
+
+ val = (regs->acc1h << 16) | (regs->acc1l >> 16);
+ set_reg(regs, REG1(insn), (int)val);
+
+ return 0;
+}
+
+static int emu_m32r2(unsigned short insn, struct pt_regs *regs)
+{
+ int res = -1;
+
+ if ((insn & 0x7fff) == ISA_NOP) /* nop */
+ return 0;
+
+ switch(insn & 0x7000) {
+ case ISA_ADDI: /* addi Rdest, #imm8 */
+ res = emu_addi(insn, regs);
+ break;
+ case ISA_LDI: /* ldi Rdest, #imm8 */
+ res = emu_ldi(insn, regs);
+ break;
+ default:
+ break;
+ }
+
+ if (!res)
+ return 0;
+
+ switch(insn & 0x70f0) {
+ case ISA_ADD: /* add Rdest, Rsrc */
+ res = emu_add(insn, regs);
+ break;
+ case ISA_ADDX: /* addx Rdest, Rsrc */
+ res = emu_addx(insn, regs);
+ break;
+ case ISA_AND: /* and Rdest, Rsrc */
+ res = emu_and(insn, regs);
+ break;
+ case ISA_CMP: /* cmp Rsrc1, Rsrc2 */
+ res = emu_cmp(insn, regs);
+ break;
+ case ISA_CMPEQ: /* cmpeq Rsrc1, Rsrc2 */
+ res = emu_cmpeq(insn, regs);
+ break;
+ case ISA_CMPU: /* cmpu Rsrc1, Rsrc2 */
+ res = emu_cmpu(insn, regs);
+ break;
+ case ISA_CMPZ: /* cmpz Rsrc */
+ res = emu_cmpz(insn, regs);
+ break;
+ case ISA_MV: /* mv Rdest, Rsrc */
+ res = emu_mv(insn, regs);
+ break;
+ case ISA_NEG: /* neg Rdest, Rsrc */
+ res = emu_neg(insn, regs);
+ break;
+ case ISA_NOT: /* not Rdest, Rsrc */
+ res = emu_not(insn, regs);
+ break;
+ case ISA_OR: /* or Rdest, Rsrc */
+ res = emu_or(insn, regs);
+ break;
+ case ISA_SUB: /* sub Rdest, Rsrc */
+ res = emu_sub(insn, regs);
+ break;
+ case ISA_SUBX: /* subx Rdest, Rsrc */
+ res = emu_subx(insn, regs);
+ break;
+ case ISA_XOR: /* xor Rdest, Rsrc */
+ res = emu_xor(insn, regs);
+ break;
+ case ISA_MUL: /* mul Rdest, Rsrc */
+ res = emu_mul(insn, regs);
+ break;
+ case ISA_MULLO_A0: /* mullo Rsrc1, Rsrc2 */
+ res = emu_mullo_a0(insn, regs);
+ break;
+ case ISA_MULLO_A1: /* mullo Rsrc1, Rsrc2 */
+ res = emu_mullo_a1(insn, regs);
+ break;
+ default:
+ break;
+ }
+
+ if (!res)
+ return 0;
+
+ switch(insn & 0x70ff) {
+ case ISA_MVFACMI_A0: /* mvfacmi Rdest */
+ res = emu_mvfacmi_a0(insn, regs);
+ break;
+ case ISA_MVFACMI_A1: /* mvfacmi Rdest */
+ res = emu_mvfacmi_a1(insn, regs);
+ break;
+ default:
+ break;
+ }
+
+ return res;
+}
+
+#endif /* CONFIG_ISA_DUAL_ISSUE */
+
+/*
+ * ld : ?010 dest 1100 src
+ * 0010 dest 1110 src : ld Rdest, @Rsrc+
+ * ldh : ?010 dest 1010 src
+ * lduh : ?010 dest 1011 src
+ * st : ?010 src1 0100 src2
+ * 0010 src1 0110 src2 : st Rsrc1, @+Rsrc2
+ * 0010 src1 0111 src2 : st Rsrc1, @-Rsrc2
+ * sth : ?010 src1 0010 src2
+ */
+
+static int insn_check(unsigned long insn, struct pt_regs *regs,
+ unsigned char **ucp)
+{
+ int res = 0;
+
+ /*
+ * 32bit insn
+ * ld Rdest, @(disp16, Rsrc)
+ * st Rdest, @(disp16, Rsrc)
+ */
+ if (insn & 0x80000000) { /* 32bit insn */
+ *ucp += (short)(insn & 0x0000ffff);
+ regs->bpc += 4;
+ } else { /* 16bit insn */
+#ifdef CONFIG_ISA_DUAL_ISSUE
+ /* parallel exec check */
+ if (!(regs->bpc & 0x2) && insn & 0x8000) {
+ res = emu_m32r2((unsigned short)insn, regs);
+ regs->bpc += 4;
+ } else
+#endif /* CONFIG_ISA_DUAL_ISSUE */
+ regs->bpc += 2;
+ }
+
+ return res;
+}
+
+static int emu_ld(unsigned long insn32, struct pt_regs *regs)
+{
+ unsigned char *ucp;
+ unsigned long val;
+ unsigned short insn16;
+ int size, src;
+
+ insn16 = insn32 >> 16;
+ src = REG2(insn16);
+ ucp = (unsigned char *)get_reg(regs, src);
+
+ if (insn_check(insn32, regs, &ucp))
+ return -1;
+
+ size = insn16 & 0x0040 ? 4 : 2;
+ if (copy_from_user(&val, ucp, size))
+ return -1;
+
+ if (size == 2)
+ val >>= 16;
+
+ /* ldh sign check */
+ if ((insn16 & 0x00f0) == 0x00a0 && (val & 0x8000))
+ val |= 0xffff0000;
+
+ set_reg(regs, REG1(insn16), val);
+
+ /* ld increment check */
+ if ((insn16 & 0xf0f0) == ISA_LD2) /* ld Rdest, @Rsrc+ */
+ set_reg(regs, src, (unsigned long)(ucp + 4));
+
+ return 0;
+}
+
+static int emu_st(unsigned long insn32, struct pt_regs *regs)
+{
+ unsigned char *ucp;
+ unsigned long val;
+ unsigned short insn16;
+ int size, src2;
+
+ insn16 = insn32 >> 16;
+ src2 = REG2(insn16);
+
+ ucp = (unsigned char *)get_reg(regs, src2);
+
+ if (insn_check(insn32, regs, &ucp))
+ return -1;
+
+ size = insn16 & 0x0040 ? 4 : 2;
+ val = get_reg(regs, REG1(insn16));
+ if (size == 2)
+ val <<= 16;
+
+ /* st inc/dec check */
+ if ((insn16 & 0xf0e0) == 0x2060) {
+ if (insn16 & 0x0010)
+ ucp -= 4;
+ else
+ ucp += 4;
+
+ set_reg(regs, src2, (unsigned long)ucp);
+ }
+
+ if (copy_to_user(ucp, &val, size))
+ return -1;
+
+ /* sth inc check */
+ if ((insn16 & 0xf0f0) == ISA_STH2) {
+ ucp += 2;
+ set_reg(regs, src2, (unsigned long)ucp);
+ }
+
+ return 0;
+}
+
+int handle_unaligned_access(unsigned long insn32, struct pt_regs *regs)
+{
+ unsigned short insn16;
+ int res;
+
+ insn16 = insn32 >> 16;
+
+ /* ld or st check */
+ if ((insn16 & 0x7000) != 0x2000)
+ return -1;
+
+ /* insn alignment check */
+ if ((insn16 & 0x8000) && (regs->bpc & 3))
+ return -1;
+
+ if (insn16 & 0x0080) /* ld */
+ res = emu_ld(insn32, regs);
+ else /* st */
+ res = emu_st(insn32, regs);
+
+ return res;
+}
+
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
new file mode 100644
index 00000000000..dddbf6b5ed2
--- /dev/null
+++ b/arch/m32r/kernel/entry.S
@@ -0,0 +1,1000 @@
+/*
+ * linux/arch/m32r/kernel/entry.S
+ *
+ * Copyright (c) 2001, 2002 Hirokazu Takata, Hitoshi Yamamoto, H. Kondo
+ * Copyright (c) 2003 Hitoshi Yamamoto
+ * Copyright (c) 2004 Hirokazu Takata <takata at linux-m32r.org>
+ *
+ * Taken from i386 version.
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/*
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ * Stack layout in 'ret_from_system_call':
+ * ptrace needs to have all regs on the stack.
+ * if the order here is changed, it needs to be
+ * updated in fork.c:copy_process, signal.c:do_signal,
+ * ptrace.c and ptrace.h
+ *
+ * M32Rx/M32R2 M32R
+ * @(sp) - r4 ditto
+ * @(0x04,sp) - r5 ditto
+ * @(0x08,sp) - r6 ditto
+ * @(0x0c,sp) - *pt_regs ditto
+ * @(0x10,sp) - r0 ditto
+ * @(0x14,sp) - r1 ditto
+ * @(0x18,sp) - r2 ditto
+ * @(0x1c,sp) - r3 ditto
+ * @(0x20,sp) - r7 ditto
+ * @(0x24,sp) - r8 ditto
+ * @(0x28,sp) - r9 ditto
+ * @(0x2c,sp) - r10 ditto
+ * @(0x30,sp) - r11 ditto
+ * @(0x34,sp) - r12 ditto
+ * @(0x38,sp) - syscall_nr ditto
+ * @(0x3c,sp) - acc0h @(0x3c,sp) - acch
+ * @(0x40,sp) - acc0l @(0x40,sp) - accl
+ * @(0x44,sp) - acc1h @(0x44,sp) - psw
+ * @(0x48,sp) - acc1l @(0x48,sp) - bpc
+ * @(0x4c,sp) - psw @(0x4c,sp) - bbpsw
+ * @(0x50,sp) - bpc @(0x50,sp) - bbpc
+ * @(0x54,sp) - bbpsw @(0x54,sp) - spu (cr3)
+ * @(0x58,sp) - bbpc @(0x58,sp) - fp (r13)
+ * @(0x5c,sp) - spu (cr3) @(0x5c,sp) - lr (r14)
+ * @(0x60,sp) - fp (r13) @(0x60,sp) - spi (cr12)
+ * @(0x64,sp) - lr (r14) @(0x64,sp) - orig_r0
+ * @(0x68,sp) - spi (cr2)
+ * @(0x6c,sp) - orig_r0
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <asm/irq.h>
+#include <asm/unistd.h>
+#include <asm/assembler.h>
+#include <asm/thread_info.h>
+#include <asm/errno.h>
+#include <asm/segment.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/m32r.h>
+#include <asm/mmu_context.h>
+
+#if !defined(CONFIG_MMU)
+#define sys_madvise sys_ni_syscall
+#define sys_readahead sys_ni_syscall
+#define sys_mprotect sys_ni_syscall
+#define sys_msync sys_ni_syscall
+#define sys_mlock sys_ni_syscall
+#define sys_munlock sys_ni_syscall
+#define sys_mlockall sys_ni_syscall
+#define sys_munlockall sys_ni_syscall
+#define sys_mremap sys_ni_syscall
+#define sys_mincore sys_ni_syscall
+#define sys_remap_file_pages sys_ni_syscall
+#endif /* CONFIG_MMU */
+
+#define R4(reg) @reg
+#define R5(reg) @(0x04,reg)
+#define R6(reg) @(0x08,reg)
+#define PTREGS(reg) @(0x0C,reg)
+#define R0(reg) @(0x10,reg)
+#define R1(reg) @(0x14,reg)
+#define R2(reg) @(0x18,reg)
+#define R3(reg) @(0x1C,reg)
+#define R7(reg) @(0x20,reg)
+#define R8(reg) @(0x24,reg)
+#define R9(reg) @(0x28,reg)
+#define R10(reg) @(0x2C,reg)
+#define R11(reg) @(0x30,reg)
+#define R12(reg) @(0x34,reg)
+#define SYSCALL_NR(reg) @(0x38,reg)
+#if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2)
+#define ACC0H(reg) @(0x3C,reg)
+#define ACC0L(reg) @(0x40,reg)
+#define ACC1H(reg) @(0x44,reg)
+#define ACC1L(reg) @(0x48,reg)
+#define PSW(reg) @(0x4C,reg)
+#define BPC(reg) @(0x50,reg)
+#define BBPSW(reg) @(0x54,reg)
+#define BBPC(reg) @(0x58,reg)
+#define SPU(reg) @(0x5C,reg)
+#define FP(reg) @(0x60,reg) /* FP = R13 */
+#define LR(reg) @(0x64,reg)
+#define SP(reg) @(0x68,reg)
+#define ORIG_R0(reg) @(0x6C,reg)
+#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
+#define ACCH(reg) @(0x3C,reg)
+#define ACCL(reg) @(0x40,reg)
+#define PSW(reg) @(0x44,reg)
+#define BPC(reg) @(0x48,reg)
+#define BBPSW(reg) @(0x4C,reg)
+#define BBPC(reg) @(0x50,reg)
+#define SPU(reg) @(0x54,reg)
+#define FP(reg) @(0x58,reg) /* FP = R13 */
+#define LR(reg) @(0x5C,reg)
+#define SP(reg) @(0x60,reg)
+#define ORIG_R0(reg) @(0x64,reg)
+#else
+#error unknown isa configuration
+#endif
+
+CF_MASK = 0x00000001
+TF_MASK = 0x00000100
+IF_MASK = 0x00000200
+DF_MASK = 0x00000400
+NT_MASK = 0x00004000
+VM_MASK = 0x00020000
+
+#ifdef CONFIG_PREEMPT
+#define preempt_stop(x) CLI(x)
+#else
+#define preempt_stop(x)
+#define resume_kernel restore_all
+#endif
+
+ENTRY(ret_from_fork)
+ ld r0, @sp+
+ bl schedule_tail
+ GET_THREAD_INFO(r8)
+ bra syscall_exit
+
+/*
+ * Return to user mode is not as complex as all this looks,
+ * but we want the default path for a system call return to
+ * go as quickly as possible which is why some of this is
+ * less clear than it otherwise should be.
+ */
+
+ ; userspace resumption stub bypassing syscall exit tracing
+ ALIGN
+ret_from_exception:
+ preempt_stop(r4)
+ret_from_intr:
+ ld r4, PSW(sp)
+#ifdef CONFIG_ISA_M32R2
+ and3 r4, r4, #0x8800 ; check BSM and BPM bits
+#else
+ and3 r4, r4, #0x8000 ; check BSM bit
+#endif
+ beqz r4, resume_kernel
+ENTRY(resume_userspace)
+ CLI(r4) ; make sure we don't miss an interrupt
+ ; setting need_resched or sigpending
+ ; between sampling and the iret
+ GET_THREAD_INFO(r8)
+ ld r9, @(TI_FLAGS, r8)
+ and3 r4, r9, #_TIF_WORK_MASK ; is there any work to be done on
+ ; int/exception return?
+ bnez r4, work_pending
+ bra restore_all
+
+#ifdef CONFIG_PREEMPT
+ENTRY(resume_kernel)
+ GET_THREAD_INFO(r8)
+ ld r9, @(TI_PRE_COUNT, r8) ; non-zero preempt_count ?
+ bnez r9, restore_all
+need_resched:
+ ld r9, @(TI_FLAGS, r8) ; need_resched set ?
+ and3 r4, r9, #_TIF_NEED_RESCHED
+ beqz r4, restore_all
+ ld r4, PSW(sp) ; interrupts off (exception path) ?
+ and3 r4, r4, #0x4000
+ beqz r4, restore_all
+ LDIMM (r4, PREEMPT_ACTIVE)
+ st r4, @(TI_PRE_COUNT, r8)
+ STI(r4)
+ bl schedule
+ ldi r4, #0
+ st r4, @(TI_PRE_COUNT, r8)
+ CLI(r4)
+ bra need_resched
+#endif
+
+ ; system call handler stub
+ENTRY(system_call)
+ SWITCH_TO_KERNEL_STACK
+ SAVE_ALL
+ STI(r4) ; Enable interrupt
+ st sp, PTREGS(sp) ; implicit pt_regs parameter
+ cmpui r7, #NR_syscalls
+ bnc syscall_badsys
+ st r7, SYSCALL_NR(sp) ; syscall_nr
+ ; system call tracing in operation
+ GET_THREAD_INFO(r8)
+ ld r9, @(TI_FLAGS, r8)
+ and3 r4, r9, #_TIF_SYSCALL_TRACE
+ bnez r4, syscall_trace_entry
+syscall_call:
+ slli r7, #2 ; table jump for the system call
+ LDIMM (r4, sys_call_table)
+ add r7, r4
+ ld r7, @r7
+ jl r7 ; execute system call
+ st r0, R0(sp) ; save the return value
+syscall_exit:
+ CLI(r4) ; make sure we don't miss an interrupt
+ ; setting need_resched or sigpending
+ ; between sampling and the iret
+ ld r9, @(TI_FLAGS, r8)
+ and3 r4, r9, #_TIF_ALLWORK_MASK ; current->work
+ bnez r4, syscall_exit_work
+restore_all:
+ RESTORE_ALL
+
+ # perform work that needs to be done immediately before resumption
+ # r9 : frags
+ ALIGN
+work_pending:
+ and3 r4, r9, #_TIF_NEED_RESCHED
+ beqz r4, work_notifysig
+work_resched:
+ bl schedule
+ CLI(r4) ; make sure we don't miss an interrupt
+ ; setting need_resched or sigpending
+ ; between sampling and the iret
+ ld r9, @(TI_FLAGS, r8)
+ and3 r4, r9, #_TIF_WORK_MASK ; is there any work to be done other
+ ; than syscall tracing?
+ beqz r4, restore_all
+ and3 r4, r4, #_TIF_NEED_RESCHED
+ bnez r4, work_resched
+
+work_notifysig: ; deal with pending signals and
+ ; notify-resume requests
+ mv r0, sp ; arg1 : struct pt_regs *regs
+ ldi r1, #0 ; arg2 : sigset_t *oldset
+ mv r2, r9 ; arg3 : __u32 thread_info_flags
+ bl do_notify_resume
+ bra restore_all
+
+ ; perform syscall exit tracing
+ ALIGN
+syscall_trace_entry:
+ ldi r4, #-ENOSYS
+ st r4, R0(sp)
+ bl do_syscall_trace
+ ld r0, ORIG_R0(sp)
+ ld r1, R1(sp)
+ ld r2, R2(sp)
+ ld r3, R3(sp)
+ ld r4, R4(sp)
+ ld r5, R5(sp)
+ ld r6, R6(sp)
+ ld r7, SYSCALL_NR(sp)
+ cmpui r7, #NR_syscalls
+ bc syscall_call
+ bra syscall_exit
+
+ ; perform syscall exit tracing
+ ALIGN
+syscall_exit_work:
+ ld r9, @(TI_FLAGS, r8)
+ and3 r4, r9, #_TIF_SYSCALL_TRACE
+ beqz r4, work_pending
+ STI(r4) ; could let do_syscall_trace() call
+ ; schedule() instead
+ bl do_syscall_trace
+ bra resume_userspace
+
+ ALIGN
+syscall_fault:
+ SAVE_ALL
+ GET_THREAD_INFO(r8)
+ ldi r4, #-EFAULT
+ st r4, R0(sp)
+ bra resume_userspace
+
+ ALIGN
+syscall_badsys:
+ ldi r4, #-ENOSYS
+ st r4, R0(sp)
+ bra resume_userspace
+
+ .global eit_vector
+
+ .equ ei_vec_table, eit_vector + 0x0200
+
+/*
+ * EI handler routine
+ */
+ENTRY(ei_handler)
+#if defined(CONFIG_CHIP_M32700)
+ SWITCH_TO_KERNEL_STACK
+ ; WORKAROUND: force to clear SM bit and use the kernel stack (SPI).
+#endif
+ SAVE_ALL
+ mv r1, sp ; arg1(regs)
+#if defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2) \
+ || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \
+ || defined(CONFIG_CHIP_OPSP)
+
+; GET_ICU_STATUS;
+ seth r0, #shigh(M32R_ICU_ISTS_ADDR)
+ ld r0, @(low(M32R_ICU_ISTS_ADDR),r0)
+ st r0, @-sp
+#if defined(CONFIG_SMP)
+ /*
+ * If IRQ == 0 --> Nothing to do, Not write IMASK
+ * If IRQ == IPI --> Do IPI handler, Not write IMASK
+ * If IRQ != 0, IPI --> Do do_IRQ(), Write IMASK
+ */
+ slli r0, #4
+ srli r0, #24 ; r0(irq_num<<2)
+ ;; IRQ exist check
+#if defined(CONFIG_CHIP_M32700)
+ /* WORKAROUND: IMASK bug M32700-TS1, TS2 chip. */
+ beqz r0, 3f ; if (!irq_num) goto exit
+#else
+ beqz r0, 1f ; if (!irq_num) goto exit
+#endif /* WORKAROUND */
+ ;; IPI check
+ cmpi r0, #(M32R_IRQ_IPI0<<2) ; ISN < IPI0 check
+ bc 2f
+ cmpi r0, #((M32R_IRQ_IPI7+1)<<2) ; ISN > IPI7 check
+ bnc 2f
+ LDIMM (r2, ei_vec_table)
+ add r2, r0
+ ld r2, @r2
+ beqz r2, 1f ; if (no IPI handler) goto exit
+ mv r0, r1 ; arg0(regs)
+ jl r2
+ .fillinsn
+1:
+ addi sp, #4
+ bra ret_to_intr
+#if defined(CONFIG_CHIP_M32700)
+ /* WORKAROUND: IMASK bug M32700-TS1, TS2 chip. */
+ .fillinsn
+3:
+ ld24 r14, #0x00070000
+ seth r0, #shigh(M32R_ICU_IMASK_ADDR)
+ st r14, @(low(M32R_ICU_IMASK_ADDR), r0)
+ addi sp, #4
+ bra ret_to_intr
+#endif /* WORKAROUND */
+ ;; do_IRQ
+ .fillinsn
+2:
+ srli r0, #2
+#if defined(CONFIG_PLAT_USRV)
+ add3 r2, r0, #-(M32R_IRQ_INT1) ; INT1# interrupt
+ bnez r2, 9f
+ ; read ICU status register of PLD
+ seth r0, #high(PLD_ICUISTS)
+ or3 r0, r0, #low(PLD_ICUISTS)
+ lduh r0, @r0
+ slli r0, #21
+ srli r0, #27 ; ISN
+ addi r0, #(M32700UT_PLD_IRQ_BASE)
+ .fillinsn
+9:
+#elif defined(CONFIG_PLAT_M32700UT)
+ add3 r2, r0, #-(M32R_IRQ_INT1) ; INT1# interrupt
+ bnez r2, check_int0
+ ; read ICU status register of PLD
+ seth r0, #high(PLD_ICUISTS)
+ or3 r0, r0, #low(PLD_ICUISTS)
+ lduh r0, @r0
+ slli r0, #21
+ srli r0, #27 ; ISN
+ addi r0, #(M32700UT_PLD_IRQ_BASE)
+ bra check_end
+ .fillinsn
+check_int0:
+ add3 r2, r0, #-(M32R_IRQ_INT0) ; INT0# interrupt
+ bnez r2, check_int2
+ ; read ICU status of LAN-board
+ seth r0, #high(M32700UT_LAN_ICUISTS)
+ or3 r0, r0, #low(M32700UT_LAN_ICUISTS)
+ lduh r0, @r0
+ slli r0, #21
+ srli r0, #27 ; ISN
+ add3 r0, r0, #(M32700UT_LAN_PLD_IRQ_BASE)
+ bra check_end
+ .fillinsn
+check_int2:
+ add3 r2, r0, #-(M32R_IRQ_INT2) ; INT2# interrupt
+ bnez r2, check_end
+ ; read ICU status of LCD-board
+ seth r0, #high(M32700UT_LCD_ICUISTS)
+ or3 r0, r0, #low(M32700UT_LCD_ICUISTS)
+ lduh r0, @r0
+ slli r0, #21
+ srli r0, #27 ; ISN
+ add3 r0, r0, #(M32700UT_LCD_PLD_IRQ_BASE)
+ bra check_end
+ .fillinsn
+check_end:
+#elif defined(CONFIG_PLAT_OPSPUT)
+ add3 r2, r0, #-(M32R_IRQ_INT1) ; INT1# interrupt
+ bnez r2, check_int0
+ ; read ICU status register of PLD
+ seth r0, #high(PLD_ICUISTS)
+ or3 r0, r0, #low(PLD_ICUISTS)
+ lduh r0, @r0
+ slli r0, #21
+ srli r0, #27 ; ISN
+ addi r0, #(OPSPUT_PLD_IRQ_BASE)
+ bra check_end
+ .fillinsn
+check_int0:
+ add3 r2, r0, #-(M32R_IRQ_INT0) ; INT0# interrupt
+ bnez r2, check_int2
+ ; read ICU status of LAN-board
+ seth r0, #high(OPSPUT_LAN_ICUISTS)
+ or3 r0, r0, #low(OPSPUT_LAN_ICUISTS)
+ lduh r0, @r0
+ slli r0, #21
+ srli r0, #27 ; ISN
+ add3 r0, r0, #(OPSPUT_LAN_PLD_IRQ_BASE)
+ bra check_end
+ .fillinsn
+check_int2:
+ add3 r2, r0, #-(M32R_IRQ_INT2) ; INT2# interrupt
+ bnez r2, check_end
+ ; read ICU status of LCD-board
+ seth r0, #high(OPSPUT_LCD_ICUISTS)
+ or3 r0, r0, #low(OPSPUT_LCD_ICUISTS)
+ lduh r0, @r0
+ slli r0, #21
+ srli r0, #27 ; ISN
+ add3 r0, r0, #(OPSPUT_LCD_PLD_IRQ_BASE)
+ bra check_end
+ .fillinsn
+check_end:
+#endif /* CONFIG_PLAT_OPSPUT */
+ bl do_IRQ ; r0(irq), r1(regs)
+#else /* not CONFIG_SMP */
+ srli r0, #22 ; r0(irq)
+#if defined(CONFIG_PLAT_USRV)
+ add3 r2, r0, #-(M32R_IRQ_INT1) ; INT1# interrupt
+ bnez r2, 1f
+ ; read ICU status register of PLD
+ seth r0, #high(PLD_ICUISTS)
+ or3 r0, r0, #low(PLD_ICUISTS)
+ lduh r0, @r0
+ slli r0, #21
+ srli r0, #27 ; ISN
+ addi r0, #(M32700UT_PLD_IRQ_BASE)
+ .fillinsn
+1:
+#elif defined(CONFIG_PLAT_M32700UT)
+ add3 r2, r0, #-(M32R_IRQ_INT1) ; INT1# interrupt
+ bnez r2, check_int0
+ ; read ICU status register of PLD
+ seth r0, #high(PLD_ICUISTS)
+ or3 r0, r0, #low(PLD_ICUISTS)
+ lduh r0, @r0
+ slli r0, #21
+ srli r0, #27 ; ISN
+ addi r0, #(M32700UT_PLD_IRQ_BASE)
+ bra check_end
+ .fillinsn
+check_int0:
+ add3 r2, r0, #-(M32R_IRQ_INT0) ; INT0# interrupt
+ bnez r2, check_int2
+ ; read ICU status of LAN-board
+ seth r0, #high(M32700UT_LAN_ICUISTS)
+ or3 r0, r0, #low(M32700UT_LAN_ICUISTS)
+ lduh r0, @r0
+ slli r0, #21
+ srli r0, #27 ; ISN
+ add3 r0, r0, #(M32700UT_LAN_PLD_IRQ_BASE)
+ bra check_end
+ .fillinsn
+check_int2:
+ add3 r2, r0, #-(M32R_IRQ_INT2) ; INT2# interrupt
+ bnez r2, check_end
+ ; read ICU status of LCD-board
+ seth r0, #high(M32700UT_LCD_ICUISTS)
+ or3 r0, r0, #low(M32700UT_LCD_ICUISTS)
+ lduh r0, @r0
+ slli r0, #21
+ srli r0, #27 ; ISN
+ add3 r0, r0, #(M32700UT_LCD_PLD_IRQ_BASE)
+ bra check_end
+ .fillinsn
+check_end:
+#elif defined(CONFIG_PLAT_OPSPUT)
+ add3 r2, r0, #-(M32R_IRQ_INT1) ; INT1# interrupt
+ bnez r2, check_int0
+ ; read ICU status register of PLD
+ seth r0, #high(PLD_ICUISTS)
+ or3 r0, r0, #low(PLD_ICUISTS)
+ lduh r0, @r0
+ slli r0, #21
+ srli r0, #27 ; ISN
+ addi r0, #(OPSPUT_PLD_IRQ_BASE)
+ bra check_end
+ .fillinsn
+check_int0:
+ add3 r2, r0, #-(M32R_IRQ_INT0) ; INT0# interrupt
+ bnez r2, check_int2
+ ; read ICU status of LAN-board
+ seth r0, #high(OPSPUT_LAN_ICUISTS)
+ or3 r0, r0, #low(OPSPUT_LAN_ICUISTS)
+ lduh r0, @r0
+ slli r0, #21
+ srli r0, #27 ; ISN
+ add3 r0, r0, #(OPSPUT_LAN_PLD_IRQ_BASE)
+ bra check_end
+ .fillinsn
+check_int2:
+ add3 r2, r0, #-(M32R_IRQ_INT2) ; INT2# interrupt
+ bnez r2, check_end
+ ; read ICU status of LCD-board
+ seth r0, #high(OPSPUT_LCD_ICUISTS)
+ or3 r0, r0, #low(OPSPUT_LCD_ICUISTS)
+ lduh r0, @r0
+ slli r0, #21
+ srli r0, #27 ; ISN
+ add3 r0, r0, #(OPSPUT_LCD_PLD_IRQ_BASE)
+ bra check_end
+ .fillinsn
+check_end:
+#endif /* CONFIG_PLAT_OPSPUT */
+ bl do_IRQ
+#endif /* CONFIG_SMP */
+ ld r14, @sp+
+ seth r0, #shigh(M32R_ICU_IMASK_ADDR)
+ st r14, @(low(M32R_ICU_IMASK_ADDR),r0)
+#else
+#error no chip configuration
+#endif
+ret_to_intr:
+ bra ret_from_intr
+
+/*
+ * Default EIT handler
+ */
+ ALIGN
+int_msg:
+ .asciz "Unknown interrupt\n"
+ .byte 0
+
+ENTRY(default_eit_handler)
+ push r0
+ mvfc r0, psw
+ push r1
+ push r2
+ push r3
+ push r0
+ LDIMM (r0, __KERNEL_DS)
+ mv r0, r1
+ mv r0, r2
+ LDIMM (r0, int_msg)
+ bl printk
+ pop r0
+ pop r3
+ pop r2
+ pop r1
+ mvtc r0, psw
+ pop r0
+infinit:
+ bra infinit
+
+#ifdef CONFIG_MMU
+/*
+ * Access Exception handler
+ */
+ENTRY(ace_handler)
+ SWITCH_TO_KERNEL_STACK
+ SAVE_ALL
+
+ seth r2, #shigh(MMU_REG_BASE) /* Check status register */
+ ld r4, @(low(MESTS_offset),r2)
+ st r4, @(low(MESTS_offset),r2)
+ srl3 r1, r4, #4
+#ifdef CONFIG_CHIP_M32700
+ and3 r1, r1, #0x0000ffff
+ ; WORKAROUND: ignore TME bit for the M32700(TS1).
+#endif /* CONFIG_CHIP_M32700 */
+ beqz r1, inst
+oprand:
+ ld r2, @(low(MDEVA_offset),r2) ; set address
+ srli r2, #12
+ slli r2, #12
+ srli r1, #1
+ bra 1f
+inst:
+ and3 r1, r4, #2
+ srli r1, #1
+ or3 r1, r1, #8
+ mvfc r2, bpc ; set address
+ .fillinsn
+1:
+ mvfc r3, psw
+ mv r0, sp
+ and3 r3, r3, 0x800
+ srli r3, #9
+ or r1, r3
+ /*
+ * do_page_fault():
+ * r0 : struct pt_regs *regs
+ * r1 : unsigned long error-code
+ * r2 : unsigned long address
+ * error-code:
+ * +------+------+------+------+
+ * | bit3 | bit2 | bit1 | bit0 |
+ * +------+------+------+------+
+ * bit 3 == 0:means data, 1:means instruction
+ * bit 2 == 0:means kernel, 1:means user-mode
+ * bit 1 == 0:means read, 1:means write
+ * bit 0 == 0:means no page found 1:means protection fault
+ *
+ */
+ bl do_page_fault
+ bra ret_from_intr
+#endif /* CONFIG_MMU */
+
+
+ENTRY(alignment_check)
+/* void alignment_check(int error_code) */
+ SWITCH_TO_KERNEL_STACK
+ SAVE_ALL
+ ldi r1, #0x30 ; error_code
+ mv r0, sp ; pt_regs
+ bl do_alignment_check
+error_code:
+ bra ret_from_exception
+
+ENTRY(rie_handler)
+/* void rie_handler(int error_code) */
+ SWITCH_TO_KERNEL_STACK
+ SAVE_ALL
+ mvfc r0, bpc
+ ld r1, @r0
+ seth r0, #0xa0f0
+ st r1, @r0
+ ldi r1, #0x20 ; error_code
+ mv r0, sp ; pt_regs
+ bl do_rie_handler
+ bra error_code
+
+ENTRY(pie_handler)
+/* void pie_handler(int error_code) */
+ SWITCH_TO_KERNEL_STACK
+ SAVE_ALL
+ ldi r1, #0 ; error_code ; FIXME
+ mv r0, sp ; pt_regs
+ bl do_pie_handler
+ bra error_code
+
+ENTRY(debug_trap)
+ .global withdraw_debug_trap
+ /* void debug_trap(void) */
+ SWITCH_TO_KERNEL_STACK
+ SAVE_ALL
+ mv r0, sp ; pt_regs
+ bl withdraw_debug_trap
+ ldi r1, #0 ; error_code
+ mv r0, sp ; pt_regs
+ bl do_debug_trap
+ bra error_code
+
+
+/* Cache flushing handler */
+ENTRY(cache_flushing_handler)
+ .global _flush_cache_all
+ /* void _flush_cache_all(void); */
+ SWITCH_TO_KERNEL_STACK
+ push r0
+ push r1
+ push r2
+ push r3
+ push r4
+ push r5
+ push r6
+ push r7
+ push lr
+ bl _flush_cache_all
+ pop lr
+ pop r7
+ pop r6
+ pop r5
+ pop r4
+ pop r3
+ pop r2
+ pop r1
+ pop r0
+ rte
+
+.data
+ENTRY(sys_call_table)
+ .long sys_restart_syscall /* 0 - old "setup()" system call*/
+ .long sys_exit
+ .long sys_fork
+ .long sys_read
+ .long sys_write
+ .long sys_open /* 5 */
+ .long sys_close
+ .long sys_waitpid
+ .long sys_creat
+ .long sys_link
+ .long sys_unlink /* 10 */
+ .long sys_execve
+ .long sys_chdir
+ .long sys_time
+ .long sys_mknod
+ .long sys_chmod /* 15 */
+ .long sys_ni_syscall /* lchown16 syscall holder */
+ .long sys_ni_syscall /* old break syscall holder */
+ .long sys_ni_syscall /* old stat syscall holder */
+ .long sys_lseek
+ .long sys_getpid /* 20 */
+ .long sys_mount
+ .long sys_oldumount
+ .long sys_ni_syscall /* setuid16 syscall holder */
+ .long sys_ni_syscall /* getuid16 syscall holder */
+ .long sys_stime /* 25 */
+ .long sys_ptrace
+ .long sys_alarm
+ .long sys_ni_syscall /* old fstat syscall holder */
+ .long sys_pause
+ .long sys_utime /* 30 */
+ .long sys_ni_syscall /* old stty syscall holder */
+ .long sys_cachectl /* for M32R */ /* old gtty syscall holder */
+ .long sys_access
+ .long sys_ni_syscall /* nice syscall holder */
+ .long sys_ni_syscall /* 35 - old ftime syscall holder */
+ .long sys_sync
+ .long sys_kill
+ .long sys_rename
+ .long sys_mkdir
+ .long sys_rmdir /* 40 */
+ .long sys_dup
+ .long sys_pipe
+ .long sys_times
+ .long sys_ni_syscall /* old prof syscall holder */
+ .long sys_brk /* 45 */
+ .long sys_ni_syscall /* setgid16 syscall holder */
+ .long sys_getgid /* will be unused */
+ .long sys_ni_syscall /* signal syscall holder */
+ .long sys_ni_syscall /* geteuid16 syscall holder */
+ .long sys_ni_syscall /* 50 - getegid16 syscall holder */
+ .long sys_acct
+ .long sys_umount /* recycled never used phys() */
+ .long sys_ni_syscall /* old lock syscall holder */
+ .long sys_ioctl
+ .long sys_fcntl /* 55 - will be unused */
+ .long sys_ni_syscall /* mpx syscall holder */
+ .long sys_setpgid
+ .long sys_ni_syscall /* old ulimit syscall holder */
+ .long sys_ni_syscall /* sys_olduname */
+ .long sys_umask /* 60 */
+ .long sys_chroot
+ .long sys_ustat
+ .long sys_dup2
+ .long sys_getppid
+ .long sys_getpgrp /* 65 */
+ .long sys_setsid
+ .long sys_ni_syscall /* sigaction syscall holder */
+ .long sys_ni_syscall /* sgetmask syscall holder */
+ .long sys_ni_syscall /* ssetmask syscall holder */
+ .long sys_ni_syscall /* 70 - setreuid16 syscall holder */
+ .long sys_ni_syscall /* setregid16 syscall holder */
+ .long sys_ni_syscall /* sigsuspend syscall holder */
+ .long sys_ni_syscall /* sigpending syscall holder */
+ .long sys_sethostname
+ .long sys_setrlimit /* 75 */
+ .long sys_getrlimit/*will be unused*/
+ .long sys_getrusage
+ .long sys_gettimeofday
+ .long sys_settimeofday
+ .long sys_ni_syscall /* 80 - getgroups16 syscall holder */
+ .long sys_ni_syscall /* setgroups16 syscall holder */
+ .long sys_ni_syscall /* sys_oldselect */
+ .long sys_symlink
+ .long sys_ni_syscall /* old lstat syscall holder */
+ .long sys_readlink /* 85 */
+ .long sys_uselib
+ .long sys_swapon
+ .long sys_reboot
+ .long sys_ni_syscall /* readdir syscall holder */
+ .long sys_ni_syscall /* 90 - old_mmap syscall holder */
+ .long sys_munmap
+ .long sys_truncate
+ .long sys_ftruncate
+ .long sys_fchmod
+ .long sys_ni_syscall /* 95 - fchwon16 syscall holder */
+ .long sys_getpriority
+ .long sys_setpriority
+ .long sys_ni_syscall /* old profil syscall holder */
+ .long sys_statfs
+ .long sys_fstatfs /* 100 */
+ .long sys_ni_syscall /* ioperm syscall holder */
+ .long sys_socketcall
+ .long sys_syslog
+ .long sys_setitimer
+ .long sys_getitimer /* 105 */
+ .long sys_newstat
+ .long sys_newlstat
+ .long sys_newfstat
+ .long sys_ni_syscall /* old uname syscall holder */
+ .long sys_ni_syscall /* 110 - iopl syscall holder */
+ .long sys_vhangup
+ .long sys_ni_syscall /* idle syscall holder */
+ .long sys_ni_syscall /* vm86old syscall holder */
+ .long sys_wait4
+ .long sys_swapoff /* 115 */
+ .long sys_sysinfo
+ .long sys_ipc
+ .long sys_fsync
+ .long sys_ni_syscall /* sigreturn syscall holder */
+ .long sys_clone /* 120 */
+ .long sys_setdomainname
+ .long sys_newuname
+ .long sys_ni_syscall /* modify_ldt syscall holder */
+ .long sys_adjtimex
+ .long sys_mprotect /* 125 */
+ .long sys_ni_syscall /* sigprocmask syscall holder */
+ .long sys_ni_syscall /* create_module syscall holder */
+ .long sys_init_module
+ .long sys_delete_module
+ .long sys_ni_syscall /* 130 - get_kernel_syms */
+ .long sys_quotactl
+ .long sys_getpgid
+ .long sys_fchdir
+ .long sys_bdflush
+ .long sys_sysfs /* 135 */
+ .long sys_personality
+ .long sys_ni_syscall /* afs_syscall syscall holder */
+ .long sys_ni_syscall /* setfsuid16 syscall holder */
+ .long sys_ni_syscall /* setfsgid16 syscall holder */
+ .long sys_llseek /* 140 */
+ .long sys_getdents
+ .long sys_select
+ .long sys_flock
+ .long sys_msync
+ .long sys_readv /* 145 */
+ .long sys_writev
+ .long sys_getsid
+ .long sys_fdatasync
+ .long sys_sysctl
+ .long sys_mlock /* 150 */
+ .long sys_munlock
+ .long sys_mlockall
+ .long sys_munlockall
+ .long sys_sched_setparam
+ .long sys_sched_getparam /* 155 */
+ .long sys_sched_setscheduler
+ .long sys_sched_getscheduler
+ .long sys_sched_yield
+ .long sys_sched_get_priority_max
+ .long sys_sched_get_priority_min /* 160 */
+ .long sys_sched_rr_get_interval
+ .long sys_nanosleep
+ .long sys_mremap
+ .long sys_ni_syscall /* setresuid16 syscall holder */
+ .long sys_ni_syscall /* 165 - getresuid16 syscall holder */
+ .long sys_tas /* vm86 syscall holder */
+ .long sys_ni_syscall /* query_module syscall holder */
+ .long sys_poll
+ .long sys_nfsservctl
+ .long sys_setresgid /* 170 */
+ .long sys_getresgid
+ .long sys_prctl
+ .long sys_rt_sigreturn
+ .long sys_rt_sigaction
+ .long sys_rt_sigprocmask /* 175 */
+ .long sys_rt_sigpending
+ .long sys_rt_sigtimedwait
+ .long sys_rt_sigqueueinfo
+ .long sys_rt_sigsuspend
+ .long sys_pread64 /* 180 */
+ .long sys_pwrite64
+ .long sys_ni_syscall /* chown16 syscall holder */
+ .long sys_getcwd
+ .long sys_capget
+ .long sys_capset /* 185 */
+ .long sys_sigaltstack
+ .long sys_sendfile
+ .long sys_ni_syscall /* streams1 */
+ .long sys_ni_syscall /* streams2 */
+ .long sys_vfork /* 190 */
+ .long sys_getrlimit
+ .long sys_mmap2
+ .long sys_truncate64
+ .long sys_ftruncate64
+ .long sys_stat64 /* 195 */
+ .long sys_lstat64
+ .long sys_fstat64
+ .long sys_lchown
+ .long sys_getuid
+ .long sys_getgid /* 200 */
+ .long sys_geteuid
+ .long sys_getegid
+ .long sys_setreuid
+ .long sys_setregid
+ .long sys_getgroups /* 205 */
+ .long sys_setgroups
+ .long sys_fchown
+ .long sys_setresuid
+ .long sys_getresuid
+ .long sys_setresgid /* 210 */
+ .long sys_getresgid
+ .long sys_chown
+ .long sys_setuid
+ .long sys_setgid
+ .long sys_setfsuid /* 215 */
+ .long sys_setfsgid
+ .long sys_pivot_root
+ .long sys_mincore
+ .long sys_madvise
+ .long sys_getdents64 /* 220 */
+ .long sys_fcntl64
+ .long sys_ni_syscall /* reserved for TUX */
+ .long sys_ni_syscall /* Reserved for Security */
+ .long sys_gettid
+ .long sys_readahead /* 225 */
+ .long sys_setxattr
+ .long sys_lsetxattr
+ .long sys_fsetxattr
+ .long sys_getxattr
+ .long sys_lgetxattr /* 230 */
+ .long sys_fgetxattr
+ .long sys_listxattr
+ .long sys_llistxattr
+ .long sys_flistxattr
+ .long sys_removexattr /* 235 */
+ .long sys_lremovexattr
+ .long sys_fremovexattr
+ .long sys_tkill
+ .long sys_sendfile64
+ .long sys_futex /* 240 */
+ .long sys_sched_setaffinity
+ .long sys_sched_getaffinity
+ .long sys_ni_syscall /* reserved for "set_thread_area" system call */
+ .long sys_ni_syscall /* reserved for "get_thread_area" system call */
+ .long sys_io_setup /* 245 */
+ .long sys_io_destroy
+ .long sys_io_getevents
+ .long sys_io_submit
+ .long sys_io_cancel
+ .long sys_fadvise64 /* 250 */
+ .long sys_ni_syscall
+ .long sys_exit_group
+ .long sys_lookup_dcookie
+ .long sys_epoll_create
+ .long sys_epoll_ctl /* 255 */
+ .long sys_epoll_wait
+ .long sys_remap_file_pages
+ .long sys_set_tid_address
+ .long sys_timer_create
+ .long sys_timer_settime /* 260 */
+ .long sys_timer_gettime
+ .long sys_timer_getoverrun
+ .long sys_timer_delete
+ .long sys_clock_settime
+ .long sys_clock_gettime /* 265 */
+ .long sys_clock_getres
+ .long sys_clock_nanosleep
+ .long sys_statfs64
+ .long sys_fstatfs64
+ .long sys_tgkill /* 270 */
+ .long sys_utimes
+ .long sys_fadvise64_64
+ .long sys_ni_syscall /* Reserved for sys_vserver */
+ .long sys_ni_syscall /* Reserved for sys_mbind */
+ .long sys_ni_syscall /* Reserved for sys_get_mempolicy */
+ .long sys_ni_syscall /* Reserved for sys_set_mempolicy */
+ .long sys_mq_open
+ .long sys_mq_unlink
+ .long sys_mq_timedsend
+ .long sys_mq_timedreceive /* 280 */
+ .long sys_mq_notify
+ .long sys_mq_getsetattr
+ .long sys_ni_syscall /* reserved for kexec */
+ .long sys_waitid
+
+syscall_table_size=(.-sys_call_table)
+
diff --git a/arch/m32r/kernel/head.S b/arch/m32r/kernel/head.S
new file mode 100644
index 00000000000..3e83173995c
--- /dev/null
+++ b/arch/m32r/kernel/head.S
@@ -0,0 +1,287 @@
+/*
+ * linux/arch/m32r/kernel/head.S
+ *
+ * M32R startup code.
+ *
+ * Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto
+ */
+
+/* $Id$ */
+
+#include <linux/init.h>
+__INIT
+__INITDATA
+
+ .text
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/assembler.h>
+#include <asm/m32r.h>
+#include <asm/mmu_context.h>
+
+/*
+ * References to members of the boot_cpu_data structure.
+ */
+ .text
+ .global start_kernel
+ .global __bss_start
+ .global _end
+ENTRY(stext)
+ENTRY(_stext)
+ENTRY(startup_32)
+ /* Setup up the stack pointer */
+ LDIMM (r0, spi_stack_top)
+ LDIMM (r1, spu_stack_top)
+ mvtc r0, spi
+ mvtc r1, spu
+
+ /* Initilalize PSW */
+ ldi r0, #0x0000 /* use SPI, disable EI */
+ mvtc r0, psw
+
+ /* Set up the stack pointer */
+ LDIMM (r0, stack_start)
+ ld r0, @r0
+ mvtc r0, spi
+
+/*
+ * Clear BSS first so that there are no surprises...
+ */
+#ifdef CONFIG_ISA_DUAL_ISSUE
+
+ LDIMM (r2, __bss_start)
+ LDIMM (r3, _end)
+ sub r3, r2 ; BSS size in bytes
+ ; R4 = BSS size in longwords (rounded down)
+ mv r4, r3 || ldi r1, #0
+ srli r4, #4 || addi r2, #-4
+ beqz r4, .Lendloop1
+.Lloop1:
+#ifndef CONFIG_CHIP_M32310
+ ; Touch memory for the no-write-allocating cache.
+ ld r0, @(4,r2)
+#endif
+ st r1, @+r2 || addi r4, #-1
+ st r1, @+r2
+ st r1, @+r2
+ st r1, @+r2 || cmpeq r1, r4 ; R4 = 0?
+ bnc .Lloop1
+.Lendloop1:
+ and3 r4, r3, #15
+ addi r2, #4
+ beqz r4, .Lendloop2
+.Lloop2:
+ stb r1, @r2 || addi r4, #-1
+ addi r2, #1
+ bnez r4, .Lloop2
+.Lendloop2:
+
+#else /* not CONFIG_ISA_DUAL_ISSUE */
+
+ LDIMM (r2, __bss_start)
+ LDIMM (r3, _end)
+ sub r3, r2 ; BSS size in bytes
+ mv r4, r3
+ srli r4, #2 ; R4 = BSS size in longwords (rounded down)
+ ldi r1, #0 ; clear R1 for longwords store
+ addi r2, #-4 ; account for pre-inc store
+ beqz r4, .Lendloop1 ; any more to go?
+.Lloop1:
+ st r1, @+r2 ; yep, zero out another longword
+ addi r4, #-1 ; decrement count
+ bnez r4, .Lloop1 ; go do some more
+.Lendloop1:
+ and3 r4, r3, #3 ; get no. of remaining BSS bytes to clear
+ addi r2, #4 ; account for pre-inc store
+ beqz r4, .Lendloop2 ; any more to go?
+.Lloop2:
+ stb r1, @r2 ; yep, zero out another byte
+ addi r2, #1 ; bump address
+ addi r4, #-1 ; decrement count
+ bnez r4, .Lloop2 ; go do some more
+.Lendloop2:
+
+#endif /* not CONFIG_ISA_DUAL_ISSUE */
+
+#if 0 /* M32R_FIXME */
+/*
+ * Copy data segment from ROM to RAM.
+ */
+ .global ROM_D, TOP_DATA, END_DATA
+
+ LDIMM (r1, ROM_D)
+ LDIMM (r2, TOP_DATA)
+ LDIMM (r3, END_DATA)
+ addi r2, #-4
+ addi r3, #-4
+loop1:
+ ld r0, @r1+
+ st r0, @+r2
+ cmp r2, r3
+ bc loop1
+#endif /* 0 */
+
+/* Jump to kernel */
+ LDIMM (r2, start_kernel)
+ jl r2
+ .fillinsn
+1:
+ bra 1b ; main should never return here, but
+ ; just in case, we know what happens.
+
+#ifdef CONFIG_SMP
+/*
+ * AP startup routine
+ */
+ .text
+ .global eit_vector
+ENTRY(startup_AP)
+;; setup EVB
+ LDIMM (r4, eit_vector)
+ mvtc r4, cr5
+
+;; enable MMU
+ LDIMM (r2, init_tlb)
+ jl r2
+ seth r4, #high(MATM)
+ or3 r4, r4, #low(MATM)
+ ldi r5, #0x01
+ st r5, @r4 ; Set MATM Reg(T bit ON)
+ ld r6, @r4 ; MATM Check
+ LDIMM (r5, 1f)
+ jmp r5 ; enable MMU
+ nop
+ .fillinsn
+1:
+;; ISN check
+ ld r6, @r4 ; MATM Check
+ seth r4, #high(M32R_ICU_ISTS_ADDR)
+ or3 r4, r4, #low(M32R_ICU_ISTS_ADDR)
+ ld r5, @r4 ; Read ISTSi reg.
+ mv r6, r5
+ slli r5, #13 ; PIML check
+ srli r5, #13 ;
+ seth r4, #high(M32R_ICU_IMASK_ADDR)
+ or3 r4, r4, #low(M32R_ICU_IMASK_ADDR)
+ st r5, @r4 ; Write IMASKi reg.
+ slli r6, #4 ; ISN check
+ srli r6, #26 ;
+ seth r4, #high(M32R_IRQ_IPI5)
+ or3 r4, r4, #low(M32R_IRQ_IPI5)
+ bne r4, r6, 2f ; if (ISN != CPU_BOOT_IPI) goto sleep;
+
+;; check cpu_bootout_map and set cpu_bootin_map
+ LDIMM (r4, cpu_bootout_map)
+ ld r4, @r4
+ seth r5, #high(M32R_CPUID_PORTL)
+ or3 r5, r5, #low(M32R_CPUID_PORTL)
+ ld r5, @r5
+ ldi r6, #1
+ sll r6, r5
+ and r4, r6
+ beqz r4, 2f
+ LDIMM (r4, cpu_bootin_map)
+ ld r5, @r4
+ or r5, r6
+ st r6, @r4
+
+;; clear PSW
+ ldi r4, #0
+ mvtc r4, psw
+
+;; setup SPI
+ LDIMM (r4, stack_start)
+ ld r4, @r4
+ mvtc r4, spi
+
+;; setup BPC (start_secondary)
+ LDIMM (r4, start_secondary)
+ mvtc r4, bpc
+
+ rte ; goto startup_secondary
+ nop
+ nop
+
+ .fillinsn
+2:
+ ;; disable MMU
+ seth r4, #high(MATM)
+ or3 r4, r4, #low(MATM)
+ ldi r5, #0
+ st r5, @r4 ; Set MATM Reg(T bit OFF)
+ ld r6, @r4 ; MATM Check
+ LDIMM (r4, 3f)
+ seth r5, #high(__PAGE_OFFSET)
+ or3 r5, r5, #low(__PAGE_OFFSET)
+ not r5, r5
+ and r4, r5
+ jmp r4 ; disable MMU
+ nop
+ .fillinsn
+3:
+ ;; SLEEP and wait IPI
+ LDIMM (r4, AP_loop)
+ seth r5, #high(__PAGE_OFFSET)
+ or3 r5, r5, #low(__PAGE_OFFSET)
+ not r5, r5
+ and r4, r5
+ jmp r4
+ nop
+ nop
+#endif /* CONFIG_SMP */
+
+ENTRY(stack_start)
+ .long init_thread_union+8192
+ .long __KERNEL_DS
+
+/*
+ * This is initialized to create a identity-mapping at 0-4M (for bootup
+ * purposes) and another mapping of the 0-4M area at virtual address
+ * PAGE_OFFSET.
+ */
+ .text
+
+#define MOUNT_ROOT_RDONLY 1
+#define RAMDISK_FLAGS 0 ; 1024KB
+#define ORIG_ROOT_DEV 0x0100 ; /dev/ram0 (major:01, minor:00)
+#define LOADER_TYPE 1 ; (??? - non-zero value seems
+ ; to be needed to boot from initrd)
+
+#define COMMAND_LINE ""
+
+ .section .empty_zero_page, "aw"
+ENTRY(empty_zero_page)
+ .long MOUNT_ROOT_RDONLY /* offset: +0x00 */
+ .long RAMDISK_FLAGS
+ .long ORIG_ROOT_DEV
+ .long LOADER_TYPE
+ .long 0 /* INITRD_START */ /* +0x10 */
+ .long 0 /* INITRD_SIZE */
+ .long 0 /* CPU_CLOCK */
+ .long 0 /* BUS_CLOCK */
+ .long 0 /* TIMER_DIVIDE */ /* +0x20 */
+ .balign 256,0
+ .asciz COMMAND_LINE
+ .byte 0
+ .balign 4096,0,4096
+
+/*------------------------------------------------------------------------
+ * Stack area
+ */
+ .section .spi
+ ALIGN
+ .global spi_stack_top
+ .zero 1024
+spi_stack_top:
+
+ .section .spu
+ ALIGN
+ .global spu_stack_top
+ .zero 1024
+spu_stack_top:
+
+ .end
diff --git a/arch/m32r/kernel/init_task.c b/arch/m32r/kernel/init_task.c
new file mode 100644
index 00000000000..9e508fd9d97
--- /dev/null
+++ b/arch/m32r/kernel/init_task.c
@@ -0,0 +1,41 @@
+/* orig : i386 init_task.c */
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/fs.h>
+#include <linux/mqueue.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+EXPORT_SYMBOL(init_mm);
+
+/*
+ * Initial thread structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union
+ __attribute__((__section__(".data.init_task"))) =
+ { INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+
+EXPORT_SYMBOL(init_task);
+
diff --git a/arch/m32r/kernel/io_m32700ut.c b/arch/m32r/kernel/io_m32700ut.c
new file mode 100644
index 00000000000..371ba904e96
--- /dev/null
+++ b/arch/m32r/kernel/io_m32700ut.c
@@ -0,0 +1,472 @@
+/*
+ * linux/arch/m32r/kernel/io_m32700ut.c
+ *
+ * Typical I/O routines for M32700UT board.
+ *
+ * Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto, Takeo Takahashi
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/config.h>
+#include <asm/m32r.h>
+#include <asm/page.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+#include <linux/types.h>
+
+#define M32R_PCC_IOMAP_SIZE 0x1000
+
+#define M32R_PCC_IOSTART0 0x1000
+#define M32R_PCC_IOEND0 (M32R_PCC_IOSTART0 + M32R_PCC_IOMAP_SIZE - 1)
+
+extern void pcc_ioread_byte(int, unsigned long, void *, size_t, size_t, int);
+extern void pcc_ioread_word(int, unsigned long, void *, size_t, size_t, int);
+extern void pcc_iowrite_byte(int, unsigned long, void *, size_t, size_t, int);
+extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
+#endif /* CONFIG_PCMCIA && CONFIG_M32R_CFC */
+
+#define PORT2ADDR(port) _port2addr(port)
+#define PORT2ADDR_USB(port) _port2addr_usb(port)
+
+static inline void *_port2addr(unsigned long port)
+{
+ return (void *)(port + NONCACHE_OFFSET);
+}
+
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+static inline void *__port2addr_ata(unsigned long port)
+{
+ static int dummy_reg;
+
+ switch (port) {
+ case 0x1f0: return (void *)0xac002000;
+ case 0x1f1: return (void *)0xac012800;
+ case 0x1f2: return (void *)0xac012002;
+ case 0x1f3: return (void *)0xac012802;
+ case 0x1f4: return (void *)0xac012004;
+ case 0x1f5: return (void *)0xac012804;
+ case 0x1f6: return (void *)0xac012006;
+ case 0x1f7: return (void *)0xac012806;
+ case 0x3f6: return (void *)0xac01200e;
+ default: return (void *)&dummy_reg;
+ }
+}
+#endif
+
+/*
+ * M32700UT-LAN is located in the extended bus space
+ * from 0x10000000 to 0x13ffffff on physical address.
+ * The base address of LAN controller(LAN91C111) is 0x300.
+ */
+#define LAN_IOSTART 0x300
+#define LAN_IOEND 0x320
+static inline void *_port2addr_ne(unsigned long port)
+{
+ return (void *)(port + NONCACHE_OFFSET + 0x10000000);
+}
+static inline void *_port2addr_usb(unsigned long port)
+{
+ return (void *)((port & 0x0f) + NONCACHE_OFFSET + 0x10303000);
+}
+
+static inline void delay(void)
+{
+ __asm__ __volatile__ ("push r0; \n\t pop r0;" : : :"memory");
+}
+
+/*
+ * NIC I/O function
+ */
+
+#define PORT2ADDR_NE(port) _port2addr_ne(port)
+
+static inline unsigned char _ne_inb(void *portp)
+{
+ return *(volatile unsigned char *)portp;
+}
+
+static inline unsigned short _ne_inw(void *portp)
+{
+ return (unsigned short)le16_to_cpu(*(volatile unsigned short *)portp);
+}
+
+static inline void _ne_insb(void *portp, void *addr, unsigned long count)
+{
+ unsigned char *buf = (unsigned char *)addr;
+
+ while (count--)
+ *buf++ = _ne_inb(portp);
+}
+
+static inline void _ne_outb(unsigned char b, void *portp)
+{
+ *(volatile unsigned char *)portp = b;
+}
+
+static inline void _ne_outw(unsigned short w, void *portp)
+{
+ *(volatile unsigned short *)portp = cpu_to_le16(w);
+}
+
+unsigned char _inb(unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ return _ne_inb(PORT2ADDR_NE(port));
+
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ return *(volatile unsigned char *)__port2addr_ata(port);
+ }
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned char b;
+ pcc_ioread_byte(0, port, &b, sizeof(b), 1, 0);
+ return b;
+ } else
+#endif
+
+ return *(volatile unsigned char *)PORT2ADDR(port);
+}
+
+unsigned short _inw(unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ return _ne_inw(PORT2ADDR_NE(port));
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ return *(volatile unsigned short *)__port2addr_ata(port);
+ }
+#endif
+#if defined(CONFIG_USB)
+ else if(port >= 0x340 && port < 0x3a0)
+ return *(volatile unsigned short *)PORT2ADDR_USB(port);
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned short w;
+ pcc_ioread_word(0, port, &w, sizeof(w), 1, 0);
+ return w;
+ } else
+#endif
+ return *(volatile unsigned short *)PORT2ADDR(port);
+}
+
+unsigned long _inl(unsigned long port)
+{
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned long l;
+ pcc_ioread_word(0, port, &l, sizeof(l), 1, 0);
+ return l;
+ } else
+#endif
+ return *(volatile unsigned long *)PORT2ADDR(port);
+}
+
+unsigned char _inb_p(unsigned long port)
+{
+ unsigned char v;
+
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ v = _ne_inb(PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ return *(volatile unsigned char *)__port2addr_ata(port);
+ } else
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned char b;
+ pcc_ioread_byte(0, port, &b, sizeof(b), 1, 0);
+ return b;
+ } else
+#endif
+ v = *(volatile unsigned char *)PORT2ADDR(port);
+
+ delay();
+ return (v);
+}
+
+unsigned short _inw_p(unsigned long port)
+{
+ unsigned short v;
+
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ v = _ne_inw(PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ return *(volatile unsigned short *)__port2addr_ata(port);
+ } else
+#endif
+#if defined(CONFIG_USB)
+ if(port >= 0x340 && port < 0x3a0)
+ return *(volatile unsigned short *)PORT2ADDR_USB(port);
+ else
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned short w;
+ pcc_ioread_word(0, port, &w, sizeof(w), 1, 0);
+ return w;
+ } else
+#endif
+ v = *(volatile unsigned short *)PORT2ADDR(port);
+
+ delay();
+ return (v);
+}
+
+unsigned long _inl_p(unsigned long port)
+{
+ unsigned long v;
+
+ v = *(volatile unsigned long *)PORT2ADDR(port);
+ delay();
+ return (v);
+}
+
+void _outb(unsigned char b, unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ _ne_outb(b, PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ *(volatile unsigned char *)__port2addr_ata(port) = b;
+ } else
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_byte(0, port, &b, sizeof(b), 1, 0);
+ } else
+#endif
+ *(volatile unsigned char *)PORT2ADDR(port) = b;
+}
+
+void _outw(unsigned short w, unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ _ne_outw(w, PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ *(volatile unsigned short *)__port2addr_ata(port) = w;
+ } else
+#endif
+#if defined(CONFIG_USB)
+ if(port >= 0x340 && port < 0x3a0)
+ *(volatile unsigned short *)PORT2ADDR_USB(port) = w;
+ else
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_word(0, port, &w, sizeof(w), 1, 0);
+ } else
+#endif
+ *(volatile unsigned short *)PORT2ADDR(port) = w;
+}
+
+void _outl(unsigned long l, unsigned long port)
+{
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_word(0, port, &l, sizeof(l), 1, 0);
+ } else
+#endif
+ *(volatile unsigned long *)PORT2ADDR(port) = l;
+}
+
+void _outb_p(unsigned char b, unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ _ne_outb(b, PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ *(volatile unsigned char *)__port2addr_ata(port) = b;
+ } else
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_byte(0, port, &b, sizeof(b), 1, 0);
+ } else
+#endif
+ *(volatile unsigned char *)PORT2ADDR(port) = b;
+
+ delay();
+}
+
+void _outw_p(unsigned short w, unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ _ne_outw(w, PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ *(volatile unsigned short *)__port2addr_ata(port) = w;
+ } else
+#endif
+#if defined(CONFIG_USB)
+ if(port >= 0x340 && port < 0x3a0)
+ *(volatile unsigned short *)PORT2ADDR_USB(port) = w;
+ else
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_word(0, port, &w, sizeof(w), 1, 0);
+ } else
+#endif
+ *(volatile unsigned short *)PORT2ADDR(port) = w;
+
+ delay();
+}
+
+void _outl_p(unsigned long l, unsigned long port)
+{
+ *(volatile unsigned long *)PORT2ADDR(port) = l;
+ delay();
+}
+
+void _insb(unsigned int port, void *addr, unsigned long count)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ _ne_insb(PORT2ADDR_NE(port), addr, count);
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ unsigned char *buf = addr;
+ unsigned char *portp = __port2addr_ata(port);
+ while (count--)
+ *buf++ = *(volatile unsigned char *)portp;
+ }
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_ioread_byte(0, port, (void *)addr, sizeof(unsigned char),
+ count, 1);
+ }
+#endif
+ else {
+ unsigned char *buf = addr;
+ unsigned char *portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned char *)portp;
+ }
+}
+
+void _insw(unsigned int port, void *addr, unsigned long count)
+{
+ unsigned short *buf = addr;
+ unsigned short *portp;
+
+ if (port >= LAN_IOSTART && port < LAN_IOEND) {
+ /*
+ * This portion is only used by smc91111.c to read data
+ * from the DATA_REG. Do not swap the data.
+ */
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ *buf++ = *(volatile unsigned short *)portp;
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_ioread_word(9, port, (void *)addr, sizeof(unsigned short),
+ count, 1);
+#endif
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ portp = __port2addr_ata(port);
+ while (count--)
+ *buf++ = *(volatile unsigned short *)portp;
+#endif
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned short *)portp;
+ }
+}
+
+void _insl(unsigned int port, void *addr, unsigned long count)
+{
+ unsigned long *buf = addr;
+ unsigned long *portp;
+
+ portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned long *)portp;
+}
+
+void _outsb(unsigned int port, const void *addr, unsigned long count)
+{
+ const unsigned char *buf = addr;
+ unsigned char *portp;
+
+ if (port >= LAN_IOSTART && port < LAN_IOEND) {
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ _ne_outb(*buf++, portp);
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ portp = __port2addr_ata(port);
+ while (count--)
+ *(volatile unsigned char *)portp = *buf++;
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_byte(0, port, (void *)addr, sizeof(unsigned char),
+ count, 1);
+#endif
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned char *)portp = *buf++;
+ }
+}
+
+void _outsw(unsigned int port, const void *addr, unsigned long count)
+{
+ const unsigned short *buf = addr;
+ unsigned short *portp;
+
+ if (port >= LAN_IOSTART && port < LAN_IOEND) {
+ /*
+ * This portion is only used by smc91111.c to write data
+ * into the DATA_REG. Do not swap the data.
+ */
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ *(volatile unsigned short *)portp = *buf++;
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ portp = __port2addr_ata(port);
+ while (count--)
+ *(volatile unsigned short *)portp = *buf++;
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_word(9, port, (void *)addr, sizeof(unsigned short),
+ count, 1);
+#endif
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned short *)portp = *buf++;
+ }
+}
+
+void _outsl(unsigned int port, const void *addr, unsigned long count)
+{
+ const unsigned long *buf = addr;
+ unsigned char *portp;
+
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned long *)portp = *buf++;
+}
diff --git a/arch/m32r/kernel/io_mappi.c b/arch/m32r/kernel/io_mappi.c
new file mode 100644
index 00000000000..85688ffb52f
--- /dev/null
+++ b/arch/m32r/kernel/io_mappi.c
@@ -0,0 +1,384 @@
+/*
+ * linux/arch/m32r/kernel/io_mappi.c
+ *
+ * Typical I/O routines for Mappi board.
+ *
+ * Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto
+ */
+
+#include <linux/config.h>
+#include <asm/m32r.h>
+#include <asm/page.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_PCC)
+#include <linux/types.h>
+
+#define M32R_PCC_IOMAP_SIZE 0x1000
+
+#define M32R_PCC_IOSTART0 0x1000
+#define M32R_PCC_IOEND0 (M32R_PCC_IOSTART0 + M32R_PCC_IOMAP_SIZE - 1)
+#define M32R_PCC_IOSTART1 0x2000
+#define M32R_PCC_IOEND1 (M32R_PCC_IOSTART1 + M32R_PCC_IOMAP_SIZE - 1)
+
+extern void pcc_ioread(int, unsigned long, void *, size_t, size_t, int);
+extern void pcc_iowrite(int, unsigned long, void *, size_t, size_t, int);
+#endif /* CONFIG_PCMCIA && CONFIG_M32R_PCC */
+
+#define PORT2ADDR(port) _port2addr(port)
+
+static inline void *_port2addr(unsigned long port)
+{
+ return (void *)(port + NONCACHE_OFFSET);
+}
+
+static inline void *_port2addr_ne(unsigned long port)
+{
+ return (void *)((port<<1) + NONCACHE_OFFSET + 0x0C000000);
+}
+
+static inline void delay(void)
+{
+ __asm__ __volatile__ ("push r0; \n\t pop r0;" : : :"memory");
+}
+
+/*
+ * NIC I/O function
+ */
+
+#define PORT2ADDR_NE(port) _port2addr_ne(port)
+
+static inline unsigned char _ne_inb(void *portp)
+{
+ return (unsigned char) *(volatile unsigned short *)portp;
+}
+
+static inline unsigned short _ne_inw(void *portp)
+{
+ unsigned short tmp;
+
+ tmp = *(volatile unsigned short *)portp;
+ return le16_to_cpu(tmp);
+}
+
+static inline void _ne_outb(unsigned char b, void *portp)
+{
+ *(volatile unsigned short *)portp = (unsigned short)b;
+}
+
+static inline void _ne_outw(unsigned short w, void *portp)
+{
+ *(volatile unsigned short *)portp = cpu_to_le16(w);
+}
+
+unsigned char _inb(unsigned long port)
+{
+ if (port >= 0x300 && port < 0x320)
+ return _ne_inb(PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_PCC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned char b;
+ pcc_ioread(0, port, &b, sizeof(b), 1, 0);
+ return b;
+ } else if (port >= M32R_PCC_IOSTART1 && port <= M32R_PCC_IOEND1) {
+ unsigned char b;
+ pcc_ioread(1, port, &b, sizeof(b), 1, 0);
+ return b;
+ } else
+#endif
+
+ return *(volatile unsigned char *)PORT2ADDR(port);
+}
+
+unsigned short _inw(unsigned long port)
+{
+ if (port >= 0x300 && port < 0x320)
+ return _ne_inw(PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_PCC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned short w;
+ pcc_ioread(0, port, &w, sizeof(w), 1, 0);
+ return w;
+ } else if (port >= M32R_PCC_IOSTART1 && port <= M32R_PCC_IOEND1) {
+ unsigned short w;
+ pcc_ioread(1, port, &w, sizeof(w), 1, 0);
+ return w;
+ } else
+#endif
+ return *(volatile unsigned short *)PORT2ADDR(port);
+}
+
+unsigned long _inl(unsigned long port)
+{
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_PCC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned long l;
+ pcc_ioread(0, port, &l, sizeof(l), 1, 0);
+ return l;
+ } else if (port >= M32R_PCC_IOSTART1 && port <= M32R_PCC_IOEND1) {
+ unsigned short l;
+ pcc_ioread(1, port, &l, sizeof(l), 1, 0);
+ return l;
+ } else
+#endif
+ return *(volatile unsigned long *)PORT2ADDR(port);
+}
+
+unsigned char _inb_p(unsigned long port)
+{
+ unsigned char v;
+
+ if (port >= 0x300 && port < 0x320)
+ v = _ne_inb(PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_PCC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned char b;
+ pcc_ioread(0, port, &b, sizeof(b), 1, 0);
+ return b;
+ } else if (port >= M32R_PCC_IOSTART1 && port <= M32R_PCC_IOEND1) {
+ unsigned char b;
+ pcc_ioread(1, port, &b, sizeof(b), 1, 0);
+ return b;
+ } else
+#endif
+ v = *(volatile unsigned char *)PORT2ADDR(port);
+
+ delay();
+ return (v);
+}
+
+unsigned short _inw_p(unsigned long port)
+{
+ unsigned short v;
+
+ if (port >= 0x300 && port < 0x320)
+ v = _ne_inw(PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_PCC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned short w;
+ pcc_ioread(0, port, &w, sizeof(w), 1, 0);
+ return w;
+ } else if (port >= M32R_PCC_IOSTART1 && port <= M32R_PCC_IOEND1) {
+ unsigned short w;
+ pcc_ioread(1, port, &w, sizeof(w), 1, 0);
+ return w;
+ } else
+#endif
+ v = *(volatile unsigned short *)PORT2ADDR(port);
+
+ delay();
+ return (v);
+}
+
+unsigned long _inl_p(unsigned long port)
+{
+ unsigned long v;
+
+ v = *(volatile unsigned long *)PORT2ADDR(port);
+ delay();
+ return (v);
+}
+
+void _outb(unsigned char b, unsigned long port)
+{
+ if (port >= 0x300 && port < 0x320)
+ _ne_outb(b, PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_PCC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite(0, port, &b, sizeof(b), 1, 0);
+ } else if (port >= M32R_PCC_IOSTART1 && port <= M32R_PCC_IOEND1) {
+ pcc_iowrite(1, port, &b, sizeof(b), 1, 0);
+ } else
+#endif
+ *(volatile unsigned char *)PORT2ADDR(port) = b;
+}
+
+void _outw(unsigned short w, unsigned long port)
+{
+ if (port >= 0x300 && port < 0x320)
+ _ne_outw(w, PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_PCC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite(0, port, &w, sizeof(w), 1, 0);
+ } else if (port >= M32R_PCC_IOSTART1 && port <= M32R_PCC_IOEND1) {
+ pcc_iowrite(1, port, &w, sizeof(w), 1, 0);
+ } else
+#endif
+ *(volatile unsigned short *)PORT2ADDR(port) = w;
+}
+
+void _outl(unsigned long l, unsigned long port)
+{
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_PCC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite(0, port, &l, sizeof(l), 1, 0);
+ } else if (port >= M32R_PCC_IOSTART1 && port <= M32R_PCC_IOEND1) {
+ pcc_iowrite(1, port, &l, sizeof(l), 1, 0);
+ } else
+#endif
+ *(volatile unsigned long *)PORT2ADDR(port) = l;
+}
+
+void _outb_p(unsigned char b, unsigned long port)
+{
+ if (port >= 0x300 && port < 0x320)
+ _ne_outb(b, PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_PCC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite(0, port, &b, sizeof(b), 1, 0);
+ } else if (port >= M32R_PCC_IOSTART1 && port <= M32R_PCC_IOEND1) {
+ pcc_iowrite(1, port, &b, sizeof(b), 1, 0);
+ } else
+#endif
+ *(volatile unsigned char *)PORT2ADDR(port) = b;
+
+ delay();
+}
+
+void _outw_p(unsigned short w, unsigned long port)
+{
+ if (port >= 0x300 && port < 0x320)
+ _ne_outw(w, PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_PCC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite(0, port, &w, sizeof(w), 1, 0);
+ } else if (port >= M32R_PCC_IOSTART1 && port <= M32R_PCC_IOEND1) {
+ pcc_iowrite(1, port, &w, sizeof(w), 1, 0);
+ } else
+#endif
+ *(volatile unsigned short *)PORT2ADDR(port) = w;
+
+ delay();
+}
+
+void _outl_p(unsigned long l, unsigned long port)
+{
+ *(volatile unsigned long *)PORT2ADDR(port) = l;
+ delay();
+}
+
+void _insb(unsigned int port, void *addr, unsigned long count)
+{
+ unsigned short *buf = addr;
+ unsigned short *portp;
+
+ if (port >= 0x300 && port < 0x320){
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ *buf++ = *(volatile unsigned char *)portp;
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_PCC)
+ } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_ioread(0, port, (void *)addr, sizeof(unsigned char),
+ count, 1);
+ } else if (port >= M32R_PCC_IOSTART1 && port <= M32R_PCC_IOEND1) {
+ pcc_ioread(1, port, (void *)addr, sizeof(unsigned char),
+ count, 1);
+#endif
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned char *)portp;
+ }
+}
+
+void _insw(unsigned int port, void *addr, unsigned long count)
+{
+ unsigned short *buf = addr;
+ unsigned short *portp;
+
+ if (port >= 0x300 && port < 0x320) {
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ *buf++ = _ne_inw(portp);
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_PCC)
+ } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_ioread(0, port, (void *)addr, sizeof(unsigned short),
+ count, 1);
+ } else if (port >= M32R_PCC_IOSTART1 && port <= M32R_PCC_IOEND1) {
+ pcc_ioread(1, port, (void *)addr, sizeof(unsigned short),
+ count, 1);
+#endif
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned short *)portp;
+ }
+}
+
+void _insl(unsigned int port, void *addr, unsigned long count)
+{
+ unsigned long *buf = addr;
+ unsigned long *portp;
+
+ portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned long *)portp;
+}
+
+void _outsb(unsigned int port, const void *addr, unsigned long count)
+{
+ const unsigned char *buf = addr;
+ unsigned char *portp;
+
+ if (port >= 0x300 && port < 0x320) {
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ _ne_outb(*buf++, portp);
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_PCC)
+ } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite(0, port, (void *)addr, sizeof(unsigned char),
+ count, 1);
+ } else if (port >= M32R_PCC_IOSTART1 && port <= M32R_PCC_IOEND1) {
+ pcc_iowrite(1, port, (void *)addr, sizeof(unsigned char),
+ count, 1);
+#endif
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned char *)portp = *buf++;
+ }
+}
+
+void _outsw(unsigned int port, const void *addr, unsigned long count)
+{
+ const unsigned short *buf = addr;
+ unsigned short *portp;
+
+ if (port >= 0x300 && port < 0x320) {
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ _ne_outw(*buf++, portp);
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_PCC)
+ } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite(0, port, (void *)addr, sizeof(unsigned short),
+ count, 1);
+ } else if (port >= M32R_PCC_IOSTART1 && port <= M32R_PCC_IOEND1) {
+ pcc_iowrite(1, port, (void *)addr, sizeof(unsigned short),
+ count, 1);
+#endif
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned short *)portp = *buf++;
+ }
+}
+
+void _outsl(unsigned int port, const void *addr, unsigned long count)
+{
+ const unsigned long *buf = addr;
+ unsigned char *portp;
+
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned long *)portp = *buf++;
+}
diff --git a/arch/m32r/kernel/io_mappi2.c b/arch/m32r/kernel/io_mappi2.c
new file mode 100644
index 00000000000..4182cd4f97c
--- /dev/null
+++ b/arch/m32r/kernel/io_mappi2.c
@@ -0,0 +1,461 @@
+/*
+ * linux/arch/m32r/kernel/io_mappi2.c
+ *
+ * Typical I/O routines for Mappi2 board.
+ *
+ * Copyright (c) 2001-2003 Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto, Mamoru Sakugawa
+ */
+
+#include <linux/config.h>
+#include <asm/m32r.h>
+#include <asm/page.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+#include <linux/types.h>
+
+#define M32R_PCC_IOMAP_SIZE 0x1000
+
+#define M32R_PCC_IOSTART0 0x1000
+#define M32R_PCC_IOEND0 (M32R_PCC_IOSTART0 + M32R_PCC_IOMAP_SIZE - 1)
+
+extern void pcc_ioread_byte(int, unsigned long, void *, size_t, size_t, int);
+extern void pcc_ioread_word(int, unsigned long, void *, size_t, size_t, int);
+extern void pcc_iowrite_byte(int, unsigned long, void *, size_t, size_t, int);
+extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
+#endif /* CONFIG_PCMCIA && CONFIG_MAPPI2_CFC */
+
+#define PORT2ADDR(port) _port2addr(port)
+#define PORT2ADDR_NE(port) _port2addr_ne(port)
+#define PORT2ADDR_USB(port) _port2addr_usb(port)
+
+static inline void *_port2addr(unsigned long port)
+{
+ return (void *)(port + NONCACHE_OFFSET);
+}
+
+#define LAN_IOSTART 0x300
+#define LAN_IOEND 0x320
+
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+static inline void *__port2addr_ata(unsigned long port)
+{
+ static int dummy_reg;
+
+ switch (port) {
+ case 0x1f0: return (void *)0xac002000;
+ case 0x1f1: return (void *)0xac012800;
+ case 0x1f2: return (void *)0xac012002;
+ case 0x1f3: return (void *)0xac012802;
+ case 0x1f4: return (void *)0xac012004;
+ case 0x1f5: return (void *)0xac012804;
+ case 0x1f6: return (void *)0xac012006;
+ case 0x1f7: return (void *)0xac012806;
+ case 0x3f6: return (void *)0xac01200e;
+ default: return (void *)&dummy_reg;
+ }
+}
+#endif
+
+#ifdef CONFIG_CHIP_OPSP
+static inline void *_port2addr_ne(unsigned long port)
+{
+ return (void *)(port + NONCACHE_OFFSET + 0x10000000);
+}
+#else
+static inline void *_port2addr_ne(unsigned long port)
+{
+ return (void *)(port + NONCACHE_OFFSET + 0x04000000);
+}
+#endif
+static inline void *_port2addr_usb(unsigned long port)
+{
+ return (void *)(port + NONCACHE_OFFSET + 0x14000000);
+}
+static inline void delay(void)
+{
+ __asm__ __volatile__ ("push r0; \n\t pop r0;" : : :"memory");
+}
+
+/*
+ * NIC I/O function
+ */
+
+static inline unsigned char _ne_inb(void *portp)
+{
+ return (unsigned char) *(volatile unsigned char *)portp;
+}
+
+static inline unsigned short _ne_inw(void *portp)
+{
+ return (unsigned short)le16_to_cpu(*(volatile unsigned short *)portp);
+}
+
+static inline void _ne_insb(void *portp, void * addr, unsigned long count)
+{
+ unsigned char *buf = addr;
+
+ while (count--)
+ *buf++ = *(volatile unsigned char *)portp;
+}
+
+static inline void _ne_outb(unsigned char b, void *portp)
+{
+ *(volatile unsigned char *)portp = (unsigned char)b;
+}
+
+static inline void _ne_outw(unsigned short w, void *portp)
+{
+ *(volatile unsigned short *)portp = cpu_to_le16(w);
+}
+
+unsigned char _inb(unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ return _ne_inb(PORT2ADDR_NE(port));
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ return *(volatile unsigned char *)__port2addr_ata(port);
+ }
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned char b;
+ pcc_ioread_byte(0, port, &b, sizeof(b), 1, 0);
+ return b;
+ } else
+#endif
+
+ return *(volatile unsigned char *)PORT2ADDR(port);
+}
+
+unsigned short _inw(unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ return _ne_inw(PORT2ADDR_NE(port));
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ return *(volatile unsigned short *)__port2addr_ata(port);
+ }
+#endif
+#if defined(CONFIG_USB)
+ else if (port >= 0x340 && port < 0x3a0)
+ return *(volatile unsigned short *)PORT2ADDR_USB(port);
+#endif
+
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned short w;
+ pcc_ioread_word(0, port, &w, sizeof(w), 1, 0);
+ return w;
+ } else
+#endif
+ return *(volatile unsigned short *)PORT2ADDR(port);
+}
+
+unsigned long _inl(unsigned long port)
+{
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned long l;
+ pcc_ioread_word(0, port, &l, sizeof(l), 1, 0);
+ return l;
+ } else
+#endif
+ return *(volatile unsigned long *)PORT2ADDR(port);
+}
+
+unsigned char _inb_p(unsigned long port)
+{
+ unsigned char v;
+
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ v = _ne_inb(PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ return *(volatile unsigned char *)__port2addr_ata(port);
+ } else
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned char b;
+ pcc_ioread_byte(0, port, &b, sizeof(b), 1, 0);
+ return b;
+ } else
+#endif
+ v = *(volatile unsigned char *)PORT2ADDR(port);
+
+ delay();
+ return (v);
+}
+
+unsigned short _inw_p(unsigned long port)
+{
+ unsigned short v;
+
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ v = _ne_inw(PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ return *(volatile unsigned short *)__port2addr_ata(port);
+ } else
+#endif
+#if defined(CONFIG_USB)
+ if (port >= 0x340 && port < 0x3a0)
+ v = *(volatile unsigned short *)PORT2ADDR_USB(port);
+ else
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned short w;
+ pcc_ioread_word(0, port, &w, sizeof(w), 1, 0);
+ return w;
+ } else
+#endif
+ v = *(volatile unsigned short *)PORT2ADDR(port);
+
+ delay();
+ return (v);
+}
+
+unsigned long _inl_p(unsigned long port)
+{
+ unsigned long v;
+
+ v = *(volatile unsigned long *)PORT2ADDR(port);
+ delay();
+ return (v);
+}
+
+void _outb(unsigned char b, unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ _ne_outb(b, PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ *(volatile unsigned char *)__port2addr_ata(port) = b;
+ } else
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_byte(0, port, &b, sizeof(b), 1, 0);
+ } else
+#endif
+ *(volatile unsigned char *)PORT2ADDR(port) = b;
+}
+
+void _outw(unsigned short w, unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ _ne_outw(w, PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ *(volatile unsigned short *)__port2addr_ata(port) = w;
+ } else
+#endif
+#if defined(CONFIG_USB)
+ if (port >= 0x340 && port < 0x3a0)
+ *(volatile unsigned short *)PORT2ADDR_USB(port) = w;
+ else
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_word(0, port, &w, sizeof(w), 1, 0);
+ } else
+#endif
+ *(volatile unsigned short *)PORT2ADDR(port) = w;
+}
+
+void _outl(unsigned long l, unsigned long port)
+{
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_word(0, port, &l, sizeof(l), 1, 0);
+ } else
+#endif
+ *(volatile unsigned long *)PORT2ADDR(port) = l;
+}
+
+void _outb_p(unsigned char b, unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ _ne_outb(b, PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ *(volatile unsigned char *)__port2addr_ata(port) = b;
+ } else
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_byte(0, port, &b, sizeof(b), 1, 0);
+ } else
+#endif
+ *(volatile unsigned char *)PORT2ADDR(port) = b;
+
+ delay();
+}
+
+void _outw_p(unsigned short w, unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ _ne_outw(w, PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ *(volatile unsigned short *)__port2addr_ata(port) = w;
+ } else
+#endif
+#if defined(CONFIG_USB)
+ if (port >= 0x340 && port < 0x3a0)
+ *(volatile unsigned short *)PORT2ADDR_USB(port) = w;
+ else
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_word(0, port, &w, sizeof(w), 1, 0);
+ } else
+#endif
+ *(volatile unsigned short *)PORT2ADDR(port) = w;
+
+ delay();
+}
+
+void _outl_p(unsigned long l, unsigned long port)
+{
+ *(volatile unsigned long *)PORT2ADDR(port) = l;
+ delay();
+}
+
+void _insb(unsigned int port, void * addr, unsigned long count)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ _ne_insb(PORT2ADDR_NE(port), addr, count);
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ unsigned char *buf = addr;
+ unsigned char *portp = __port2addr_ata(port);
+ while (count--)
+ *buf++ = *(volatile unsigned char *)portp;
+ }
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_ioread_byte(0, port, (void *)addr, sizeof(unsigned char),
+ count, 1);
+ }
+#endif
+ else {
+ unsigned char *buf = addr;
+ unsigned char *portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned char *)portp;
+ }
+}
+
+void _insw(unsigned int port, void * addr, unsigned long count)
+{
+ unsigned short *buf = addr;
+ unsigned short *portp;
+
+ if (port >= LAN_IOSTART && port < LAN_IOEND) {
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ *buf++ = *(volatile unsigned short *)portp;
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_ioread_word(9, port, (void *)addr, sizeof(unsigned short),
+ count, 1);
+#endif
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ portp = __port2addr_ata(port);
+ while (count--)
+ *buf++ = *(volatile unsigned short *)portp;
+#endif
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned short *)portp;
+ }
+}
+
+void _insl(unsigned int port, void * addr, unsigned long count)
+{
+ unsigned long *buf = addr;
+ unsigned long *portp;
+
+ portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned long *)portp;
+}
+
+void _outsb(unsigned int port, const void * addr, unsigned long count)
+{
+ const unsigned char *buf = addr;
+ unsigned char *portp;
+
+ if (port >= LAN_IOSTART && port < LAN_IOEND) {
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ _ne_outb(*buf++, portp);
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ portp = __port2addr_ata(port);
+ while (count--)
+ *(volatile unsigned char *)portp = *buf++;
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_byte(0, port, (void *)addr, sizeof(unsigned char),
+ count, 1);
+#endif
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned char *)portp = *buf++;
+ }
+}
+
+void _outsw(unsigned int port, const void * addr, unsigned long count)
+{
+ const unsigned short *buf = addr;
+ unsigned short *portp;
+
+ if (port >= LAN_IOSTART && port < LAN_IOEND) {
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ *(volatile unsigned short *)portp = *buf++;
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ portp = __port2addr_ata(port);
+ while (count--)
+ *(volatile unsigned short *)portp = *buf++;
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_word(9, port, (void *)addr, sizeof(unsigned short),
+ count, 1);
+#endif
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned short *)portp = *buf++;
+ }
+}
+
+void _outsl(unsigned int port, const void * addr, unsigned long count)
+{
+ const unsigned long *buf = addr;
+ unsigned char *portp;
+
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned long *)portp = *buf++;
+}
diff --git a/arch/m32r/kernel/io_oaks32r.c b/arch/m32r/kernel/io_oaks32r.c
new file mode 100644
index 00000000000..286964794d5
--- /dev/null
+++ b/arch/m32r/kernel/io_oaks32r.c
@@ -0,0 +1,251 @@
+/*
+ * linux/arch/m32r/kernel/io_oaks32r.c
+ *
+ * Typical I/O routines for OAKS32R board.
+ *
+ * Copyright (c) 2001-2004 Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto, Mamoru Sakugawa
+ */
+
+#include <linux/config.h>
+#include <asm/m32r.h>
+#include <asm/page.h>
+#include <asm/io.h>
+
+#define PORT2ADDR(port) _port2addr(port)
+
+static inline void *_port2addr(unsigned long port)
+{
+ return (void *)(port + NONCACHE_OFFSET);
+}
+
+static inline void *_port2addr_ne(unsigned long port)
+{
+ return (void *)((port<<1) + NONCACHE_OFFSET + 0x02000000);
+}
+
+static inline void delay(void)
+{
+ __asm__ __volatile__ ("push r0; \n\t pop r0;" : : :"memory");
+}
+
+/*
+ * NIC I/O function
+ */
+
+#define PORT2ADDR_NE(port) _port2addr_ne(port)
+
+static inline unsigned char _ne_inb(void *portp)
+{
+ return *(volatile unsigned char *)(portp+1);
+}
+
+static inline unsigned short _ne_inw(void *portp)
+{
+ unsigned short tmp;
+
+ tmp = *(unsigned short *)(portp) & 0xff;
+ tmp |= *(unsigned short *)(portp+2) << 8;
+ return tmp;
+}
+
+static inline void _ne_insb(void *portp, void *addr, unsigned long count)
+{
+ unsigned char *buf = addr;
+ while (count--)
+ *buf++ = *(volatile unsigned char *)(portp+1);
+}
+
+static inline void _ne_outb(unsigned char b, void *portp)
+{
+ *(volatile unsigned char *)(portp+1) = b;
+}
+
+static inline void _ne_outw(unsigned short w, void *portp)
+{
+ *(volatile unsigned short *)portp = (w >> 8);
+ *(volatile unsigned short *)(portp+2) = (w & 0xff);
+}
+
+unsigned char _inb(unsigned long port)
+{
+ if (port >= 0x300 && port < 0x320)
+ return _ne_inb(PORT2ADDR_NE(port));
+
+ return *(volatile unsigned char *)PORT2ADDR(port);
+}
+
+unsigned short _inw(unsigned long port)
+{
+ if (port >= 0x300 && port < 0x320)
+ return _ne_inw(PORT2ADDR_NE(port));
+
+ return *(volatile unsigned short *)PORT2ADDR(port);
+}
+
+unsigned long _inl(unsigned long port)
+{
+ return *(volatile unsigned long *)PORT2ADDR(port);
+}
+
+unsigned char _inb_p(unsigned long port)
+{
+ unsigned char v;
+
+ if (port >= 0x300 && port < 0x320)
+ v = _ne_inb(PORT2ADDR_NE(port));
+ else
+ v = *(volatile unsigned char *)PORT2ADDR(port);
+
+ delay();
+ return (v);
+}
+
+unsigned short _inw_p(unsigned long port)
+{
+ unsigned short v;
+
+ if (port >= 0x300 && port < 0x320)
+ v = _ne_inw(PORT2ADDR_NE(port));
+ else
+ v = *(volatile unsigned short *)PORT2ADDR(port);
+
+ delay();
+ return (v);
+}
+
+unsigned long _inl_p(unsigned long port)
+{
+ unsigned long v;
+
+ v = *(volatile unsigned long *)PORT2ADDR(port);
+ delay();
+ return (v);
+}
+
+void _outb(unsigned char b, unsigned long port)
+{
+ if (port >= 0x300 && port < 0x320)
+ _ne_outb(b, PORT2ADDR_NE(port));
+ else
+ *(volatile unsigned char *)PORT2ADDR(port) = b;
+}
+
+void _outw(unsigned short w, unsigned long port)
+{
+ if (port >= 0x300 && port < 0x320)
+ _ne_outw(w, PORT2ADDR_NE(port));
+ else
+ *(volatile unsigned short *)PORT2ADDR(port) = w;
+}
+
+void _outl(unsigned long l, unsigned long port)
+{
+ *(volatile unsigned long *)PORT2ADDR(port) = l;
+}
+
+void _outb_p(unsigned char b, unsigned long port)
+{
+ if (port >= 0x300 && port < 0x320)
+ _ne_outb(b, PORT2ADDR_NE(port));
+ else
+ *(volatile unsigned char *)PORT2ADDR(port) = b;
+
+ delay();
+}
+
+void _outw_p(unsigned short w, unsigned long port)
+{
+ if (port >= 0x300 && port < 0x320)
+ _ne_outw(w, PORT2ADDR_NE(port));
+ else
+ *(volatile unsigned short *)PORT2ADDR(port) = w;
+
+ delay();
+}
+
+void _outl_p(unsigned long l, unsigned long port)
+{
+ *(volatile unsigned long *)PORT2ADDR(port) = l;
+ delay();
+}
+
+void _insb(unsigned int port, void *addr, unsigned long count)
+{
+ if (port >= 0x300 && port < 0x320)
+ _ne_insb(PORT2ADDR_NE(port), addr, count);
+ else {
+ unsigned char *buf = addr;
+ unsigned char *portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned char *)portp;
+ }
+}
+
+void _insw(unsigned int port, void *addr, unsigned long count)
+{
+ unsigned short *buf = addr;
+ unsigned short *portp;
+
+ if (port >= 0x300 && port < 0x320) {
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ *buf++ = _ne_inw(portp);
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned short *)portp;
+ }
+}
+
+void _insl(unsigned int port, void *addr, unsigned long count)
+{
+ unsigned long *buf = addr;
+ unsigned long *portp;
+
+ portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned long *)portp;
+}
+
+void _outsb(unsigned int port, const void *addr, unsigned long count)
+{
+ const unsigned char *buf = addr;
+ unsigned char *portp;
+
+ if (port >= 0x300 && port < 0x320) {
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ _ne_outb(*buf++, portp);
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned char *)portp = *buf++;
+ }
+}
+
+void _outsw(unsigned int port, const void *addr, unsigned long count)
+{
+ const unsigned short *buf = addr;
+ unsigned short *portp;
+
+ if (port >= 0x300 && port < 0x320) {
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ _ne_outw(*buf++, portp);
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned short *)portp = *buf++;
+ }
+}
+
+void _outsl(unsigned int port, const void *addr, unsigned long count)
+{
+ const unsigned long *buf = addr;
+ unsigned char *portp;
+
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned long *)portp = *buf++;
+}
diff --git a/arch/m32r/kernel/io_opsput.c b/arch/m32r/kernel/io_opsput.c
new file mode 100644
index 00000000000..aaf42f9f76d
--- /dev/null
+++ b/arch/m32r/kernel/io_opsput.c
@@ -0,0 +1,390 @@
+/*
+ * linux/arch/m32r/kernel/io_mappi.c
+ *
+ * Typical I/O routines for OPSPUT board.
+ *
+ * Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto, Takeo Takahashi
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/config.h>
+#include <asm/m32r.h>
+#include <asm/page.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+#include <linux/types.h>
+
+#define M32R_PCC_IOMAP_SIZE 0x1000
+
+#define M32R_PCC_IOSTART0 0x1000
+#define M32R_PCC_IOEND0 (M32R_PCC_IOSTART0 + M32R_PCC_IOMAP_SIZE - 1)
+
+extern void pcc_ioread_byte(int, unsigned long, void *, size_t, size_t, int);
+extern void pcc_ioread_word(int, unsigned long, void *, size_t, size_t, int);
+extern void pcc_iowrite_byte(int, unsigned long, void *, size_t, size_t, int);
+extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
+#endif /* CONFIG_PCMCIA && CONFIG_M32R_CFC */
+
+#define PORT2ADDR(port) _port2addr(port)
+#define PORT2ADDR_USB(port) _port2addr_usb(port)
+
+static inline void *_port2addr(unsigned long port)
+{
+ return (void *)(port + NONCACHE_OFFSET);
+}
+
+/*
+ * OPSPUT-LAN is located in the extended bus space
+ * from 0x10000000 to 0x13ffffff on physical address.
+ * The base address of LAN controller(LAN91C111) is 0x300.
+ */
+#define LAN_IOSTART 0x300
+#define LAN_IOEND 0x320
+static inline void *_port2addr_ne(unsigned long port)
+{
+ return (void *)(port + NONCACHE_OFFSET + 0x10000000);
+}
+static inline void *_port2addr_usb(unsigned long port)
+{
+ return (void *)((port & 0x0f) + NONCACHE_OFFSET + 0x10303000);
+}
+
+static inline void delay(void)
+{
+ __asm__ __volatile__ ("push r0; \n\t pop r0;" : : :"memory");
+}
+
+/*
+ * NIC I/O function
+ */
+
+#define PORT2ADDR_NE(port) _port2addr_ne(port)
+
+static inline unsigned char _ne_inb(void *portp)
+{
+ return *(volatile unsigned char *)portp;
+}
+
+static inline unsigned short _ne_inw(void *portp)
+{
+ return (unsigned short)le16_to_cpu(*(volatile unsigned short *)portp);
+}
+
+static inline void _ne_insb(void *portp, void *addr, unsigned long count)
+{
+ unsigned char *buf = (unsigned char *)addr;
+
+ while (count--)
+ *buf++ = _ne_inb(portp);
+}
+
+static inline void _ne_outb(unsigned char b, void *portp)
+{
+ *(volatile unsigned char *)portp = b;
+}
+
+static inline void _ne_outw(unsigned short w, void *portp)
+{
+ *(volatile unsigned short *)portp = cpu_to_le16(w);
+}
+
+unsigned char _inb(unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ return _ne_inb(PORT2ADDR_NE(port));
+
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned char b;
+ pcc_ioread_byte(0, port, &b, sizeof(b), 1, 0);
+ return b;
+ } else
+#endif
+
+ return *(volatile unsigned char *)PORT2ADDR(port);
+}
+
+unsigned short _inw(unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ return _ne_inw(PORT2ADDR_NE(port));
+#if defined(CONFIG_USB)
+ else if(port >= 0x340 && port < 0x3a0)
+ return *(volatile unsigned short *)PORT2ADDR_USB(port);
+#endif
+
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned short w;
+ pcc_ioread_word(0, port, &w, sizeof(w), 1, 0);
+ return w;
+ } else
+#endif
+ return *(volatile unsigned short *)PORT2ADDR(port);
+}
+
+unsigned long _inl(unsigned long port)
+{
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned long l;
+ pcc_ioread_word(0, port, &l, sizeof(l), 1, 0);
+ return l;
+ } else
+#endif
+ return *(volatile unsigned long *)PORT2ADDR(port);
+}
+
+unsigned char _inb_p(unsigned long port)
+{
+ unsigned char v;
+
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ v = _ne_inb(PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned char b;
+ pcc_ioread_byte(0, port, &b, sizeof(b), 1, 0);
+ return b;
+ } else
+#endif
+ v = *(volatile unsigned char *)PORT2ADDR(port);
+
+ delay();
+ return (v);
+}
+
+unsigned short _inw_p(unsigned long port)
+{
+ unsigned short v;
+
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ v = _ne_inw(PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_USB)
+ if(port >= 0x340 && port < 0x3a0)
+ return *(volatile unsigned short *)PORT2ADDR_USB(port);
+ else
+#endif
+
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ unsigned short w;
+ pcc_ioread_word(0, port, &w, sizeof(w), 1, 0);
+ return w;
+ } else
+#endif
+ v = *(volatile unsigned short *)PORT2ADDR(port);
+
+ delay();
+ return (v);
+}
+
+unsigned long _inl_p(unsigned long port)
+{
+ unsigned long v;
+
+ v = *(volatile unsigned long *)PORT2ADDR(port);
+ delay();
+ return (v);
+}
+
+void _outb(unsigned char b, unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ _ne_outb(b, PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_byte(0, port, &b, sizeof(b), 1, 0);
+ } else
+#endif
+ *(volatile unsigned char *)PORT2ADDR(port) = b;
+}
+
+void _outw(unsigned short w, unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ _ne_outw(w, PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_USB)
+ if(port >= 0x340 && port < 0x3a0)
+ *(volatile unsigned short *)PORT2ADDR_USB(port) = w;
+ else
+#endif
+
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_word(0, port, &w, sizeof(w), 1, 0);
+ } else
+#endif
+ *(volatile unsigned short *)PORT2ADDR(port) = w;
+}
+
+void _outl(unsigned long l, unsigned long port)
+{
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_word(0, port, &l, sizeof(l), 1, 0);
+ } else
+#endif
+ *(volatile unsigned long *)PORT2ADDR(port) = l;
+}
+
+void _outb_p(unsigned char b, unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ _ne_outb(b, PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_byte(0, port, &b, sizeof(b), 1, 0);
+ } else
+#endif
+ *(volatile unsigned char *)PORT2ADDR(port) = b;
+
+ delay();
+}
+
+void _outw_p(unsigned short w, unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ _ne_outw(w, PORT2ADDR_NE(port));
+ else
+#if defined(CONFIG_USB)
+ if(port >= 0x340 && port < 0x3a0)
+ *(volatile unsigned short *)PORT2ADDR_USB(port) = w;
+ else
+#endif
+
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_word(0, port, &w, sizeof(w), 1, 0);
+ } else
+#endif
+ *(volatile unsigned short *)PORT2ADDR(port) = w;
+
+ delay();
+}
+
+void _outl_p(unsigned long l, unsigned long port)
+{
+ *(volatile unsigned long *)PORT2ADDR(port) = l;
+ delay();
+}
+
+void _insb(unsigned int port, void *addr, unsigned long count)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ _ne_insb(PORT2ADDR_NE(port), addr, count);
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_ioread_byte(0, port, (void *)addr, sizeof(unsigned char),
+ count, 1);
+ }
+#endif
+ else {
+ unsigned char *buf = addr;
+ unsigned char *portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned char *)portp;
+ }
+}
+
+void _insw(unsigned int port, void *addr, unsigned long count)
+{
+ unsigned short *buf = addr;
+ unsigned short *portp;
+
+ if (port >= LAN_IOSTART && port < LAN_IOEND) {
+ /*
+ * This portion is only used by smc91111.c to read data
+ * from the DATA_REG. Do not swap the data.
+ */
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ *buf++ = *(volatile unsigned short *)portp;
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_ioread_word(9, port, (void *)addr, sizeof(unsigned short),
+ count, 1);
+#endif
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned short *)portp;
+ }
+}
+
+void _insl(unsigned int port, void *addr, unsigned long count)
+{
+ unsigned long *buf = addr;
+ unsigned long *portp;
+
+ portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned long *)portp;
+}
+
+void _outsb(unsigned int port, const void *addr, unsigned long count)
+{
+ const unsigned char *buf = addr;
+ unsigned char *portp;
+
+ if (port >= LAN_IOSTART && port < LAN_IOEND) {
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ _ne_outb(*buf++, portp);
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_byte(0, port, (void *)addr, sizeof(unsigned char),
+ count, 1);
+#endif
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned char *)portp = *buf++;
+ }
+}
+
+void _outsw(unsigned int port, const void *addr, unsigned long count)
+{
+ const unsigned short *buf = addr;
+ unsigned short *portp;
+
+ if (port >= LAN_IOSTART && port < LAN_IOEND) {
+ /*
+ * This portion is only used by smc91111.c to write data
+ * into the DATA_REG. Do not swap the data.
+ */
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ *(volatile unsigned short *)portp = *buf++;
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_word(9, port, (void *)addr, sizeof(unsigned short),
+ count, 1);
+#endif
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned short *)portp = *buf++;
+ }
+}
+
+void _outsl(unsigned int port, const void *addr, unsigned long count)
+{
+ const unsigned long *buf = addr;
+ unsigned char *portp;
+
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned long *)portp = *buf++;
+}
diff --git a/arch/m32r/kernel/io_usrv.c b/arch/m32r/kernel/io_usrv.c
new file mode 100644
index 00000000000..27928a0b99e
--- /dev/null
+++ b/arch/m32r/kernel/io_usrv.c
@@ -0,0 +1,249 @@
+/*
+ * linux/arch/m32r/kernel/io_usrv.c
+ *
+ * Typical I/O routines for uServer board.
+ *
+ * Copyright (c) 2001 - 2003 Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto, Takeo Takahashi
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ */
+
+#include <linux/config.h>
+#include <asm/m32r.h>
+#include <asm/page.h>
+#include <asm/io.h>
+
+#include <linux/types.h>
+#include "../drivers/m32r_cfc.h"
+
+extern void pcc_ioread_byte(int, unsigned long, void *, size_t, size_t, int);
+extern void pcc_ioread_word(int, unsigned long, void *, size_t, size_t, int);
+extern void pcc_iowrite_byte(int, unsigned long, void *, size_t, size_t, int);
+extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
+#define CFC_IOSTART CFC_IOPORT_BASE
+#define CFC_IOEND (CFC_IOSTART + (M32R_PCC_MAPSIZE * M32R_MAX_PCC) - 1)
+
+#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
+#define UART0_REGSTART 0x04c20000
+#define UART1_REGSTART 0x04c20100
+#define UART_IOMAP_SIZE 8
+#define UART0_IOSTART 0x3f8
+#define UART0_IOEND (UART0_IOSTART + UART_IOMAP_SIZE - 1)
+#define UART1_IOSTART 0x2f8
+#define UART1_IOEND (UART1_IOSTART + UART_IOMAP_SIZE - 1)
+#endif /* CONFIG_SERIAL_8250 || CONFIG_SERIAL_8250_MODULE */
+
+#define PORT2ADDR(port) _port2addr(port)
+
+static __inline__ void *_port2addr(unsigned long port)
+{
+#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
+ if (port >= UART0_IOSTART && port <= UART0_IOEND)
+ port = ((port - UART0_IOSTART) << 1) + UART0_REGSTART;
+ else if (port >= UART1_IOSTART && port <= UART1_IOEND)
+ port = ((port - UART1_IOSTART) << 1) + UART1_REGSTART;
+#endif /* CONFIG_SERIAL_8250 || CONFIG_SERIAL_8250_MODULE */
+ return (void *)(port + NONCACHE_OFFSET);
+}
+
+static __inline__ void delay(void)
+{
+ __asm__ __volatile__ ("push r0; \n\t pop r0;" : : :"memory");
+}
+
+unsigned char _inb(unsigned long port)
+{
+ if (port >= CFC_IOSTART && port <= CFC_IOEND) {
+ unsigned char b;
+ pcc_ioread_byte(0, port, &b, sizeof(b), 1, 0);
+ return b;
+ } else
+ return *(volatile unsigned char *)PORT2ADDR(port);
+}
+
+unsigned short _inw(unsigned long port)
+{
+ if (port >= CFC_IOSTART && port <= CFC_IOEND) {
+ unsigned short w;
+ pcc_ioread_word(0, port, &w, sizeof(w), 1, 0);
+ return w;
+ } else
+ return *(volatile unsigned short *)PORT2ADDR(port);
+}
+
+unsigned long _inl(unsigned long port)
+{
+ if (port >= CFC_IOSTART && port <= CFC_IOEND) {
+ unsigned long l;
+ pcc_ioread_word(0, port, &l, sizeof(l), 1, 0);
+ return l;
+ } else
+ return *(volatile unsigned long *)PORT2ADDR(port);
+}
+
+unsigned char _inb_p(unsigned long port)
+{
+ unsigned char b;
+
+ if (port >= CFC_IOSTART && port <= CFC_IOEND) {
+ pcc_ioread_byte(0, port, &b, sizeof(b), 1, 0);
+ return b;
+ } else {
+ b = *(volatile unsigned char *)PORT2ADDR(port);
+ delay();
+ return b;
+ }
+}
+
+unsigned short _inw_p(unsigned long port)
+{
+ unsigned short w;
+
+ if (port >= CFC_IOSTART && port <= CFC_IOEND) {
+ pcc_ioread_word(0, port, &w, sizeof(w), 1, 0);
+ return w;
+ } else {
+ w = *(volatile unsigned short *)PORT2ADDR(port);
+ delay();
+ return w;
+ }
+}
+
+unsigned long _inl_p(unsigned long port)
+{
+ unsigned long v;
+
+ v = *(volatile unsigned long *)PORT2ADDR(port);
+ delay();
+
+ return v;
+}
+
+void _outb(unsigned char b, unsigned long port)
+{
+ if (port >= CFC_IOSTART && port <= CFC_IOEND)
+ pcc_iowrite_byte(0, port, &b, sizeof(b), 1, 0);
+ else
+ *(volatile unsigned char *)PORT2ADDR(port) = b;
+}
+
+void _outw(unsigned short w, unsigned long port)
+{
+ if (port >= CFC_IOSTART && port <= CFC_IOEND)
+ pcc_iowrite_word(0, port, &w, sizeof(w), 1, 0);
+ else
+ *(volatile unsigned short *)PORT2ADDR(port) = w;
+}
+
+void _outl(unsigned long l, unsigned long port)
+{
+ if (port >= CFC_IOSTART && port <= CFC_IOEND)
+ pcc_iowrite_word(0, port, &l, sizeof(l), 1, 0);
+ else
+ *(volatile unsigned long *)PORT2ADDR(port) = l;
+}
+
+void _outb_p(unsigned char b, unsigned long port)
+{
+ if (port >= CFC_IOSTART && port <= CFC_IOEND)
+ pcc_iowrite_byte(0, port, &b, sizeof(b), 1, 0);
+ else
+ *(volatile unsigned char *)PORT2ADDR(port) = b;
+ delay();
+}
+
+void _outw_p(unsigned short w, unsigned long port)
+{
+ if (port >= CFC_IOSTART && port <= CFC_IOEND)
+ pcc_iowrite_word(0, port, &w, sizeof(w), 1, 0);
+ else
+ *(volatile unsigned short *)PORT2ADDR(port) = w;
+ delay();
+}
+
+void _outl_p(unsigned long l, unsigned long port)
+{
+ *(volatile unsigned long *)PORT2ADDR(port) = l;
+ delay();
+}
+
+void _insb(unsigned int port, void * addr, unsigned long count)
+{
+ if (port >= CFC_IOSTART && port <= CFC_IOEND)
+ pcc_ioread_byte(0, port, addr, sizeof(unsigned char), count, 1);
+ else {
+ unsigned char *buf = addr;
+ unsigned char *portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned char *)portp;
+ }
+}
+
+void _insw(unsigned int port, void * addr, unsigned long count)
+{
+ unsigned short *buf = addr;
+ unsigned short *portp;
+
+ if (port >= CFC_IOSTART && port <= CFC_IOEND)
+ pcc_ioread_word(0, port, addr, sizeof(unsigned short), count,
+ 1);
+ else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned short *)portp;
+ }
+}
+
+void _insl(unsigned int port, void * addr, unsigned long count)
+{
+ unsigned long *buf = addr;
+ unsigned long *portp;
+
+ portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned long *)portp;
+}
+
+void _outsb(unsigned int port, const void * addr, unsigned long count)
+{
+ const unsigned char *buf = addr;
+ unsigned char *portp;
+
+ if (port >= CFC_IOSTART && port <= CFC_IOEND)
+ pcc_iowrite_byte(0, port, (void *)addr, sizeof(unsigned char),
+ count, 1);
+ else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned char *)portp = *buf++;
+ }
+}
+
+void _outsw(unsigned int port, const void * addr, unsigned long count)
+{
+ const unsigned short *buf = addr;
+ unsigned short *portp;
+
+ if (port >= CFC_IOSTART && port <= CFC_IOEND)
+ pcc_iowrite_word(0, port, (void *)addr, sizeof(unsigned short),
+ count, 1);
+ else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned short *)portp = *buf++;
+ }
+}
+
+void _outsl(unsigned int port, const void * addr, unsigned long count)
+{
+ const unsigned long *buf = addr;
+ unsigned char *portp;
+
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned long *)portp = *buf++;
+}
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
new file mode 100644
index 00000000000..1ce63926a3c
--- /dev/null
+++ b/arch/m32r/kernel/irq.c
@@ -0,0 +1,91 @@
+/*
+ * linux/arch/m32r/kernel/irq.c
+ *
+ * Copyright (c) 2003, 2004 Hitoshi Yamamoto
+ * Copyright (c) 2004 Hirokazu Takata <takata at linux-m32r.org>
+ */
+
+/*
+ * linux/arch/i386/kernel/irq.c
+ *
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ * This file contains the lowest level m32r-specific interrupt
+ * entry and irq statistics code. All the remaining irq logic is
+ * done by the generic kernel/irq/ code and in the
+ * m32r-specific irq controller code.
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+atomic_t irq_err_count;
+atomic_t irq_mis_count;
+
+/*
+ * Generic, controller-independent functions:
+ */
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+ int i = *(loff_t *) v, j;
+ struct irqaction * action;
+ unsigned long flags;
+
+ if (i == 0) {
+ seq_printf(p, " ");
+ for (j=0; j<NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "CPU%d ",j);
+ seq_putc(p, '\n');
+ }
+
+ if (i < NR_IRQS) {
+ spin_lock_irqsave(&irq_desc[i].lock, flags);
+ action = irq_desc[i].action;
+ if (!action)
+ goto skip;
+ seq_printf(p, "%3d: ",i);
+#ifndef CONFIG_SMP
+ seq_printf(p, "%10u ", kstat_irqs(i));
+#else
+ for (j = 0; j < NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+#endif
+ seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %s", action->name);
+
+ for (action=action->next; action; action = action->next)
+ seq_printf(p, ", %s", action->name);
+
+ seq_putc(p, '\n');
+skip:
+ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ } else if (i == NR_IRQS) {
+ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
+ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
+ }
+ return 0;
+}
+
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs)
+{
+ irq_enter();
+
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+ /* FIXME M32R */
+#endif
+ __do_IRQ(irq, regs);
+ irq_exit();
+
+ return 1;
+}
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c
new file mode 100644
index 00000000000..e5ec134d81d
--- /dev/null
+++ b/arch/m32r/kernel/m32r_ksyms.c
@@ -0,0 +1,140 @@
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/user.h>
+#include <linux/elfcore.h>
+#include <linux/sched.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+#include <linux/string.h>
+
+#include <asm/semaphore.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <asm/tlbflush.h>
+
+extern void dump_thread(struct pt_regs *, struct user *);
+
+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
+extern struct drive_info_struct drive_info;
+EXPORT_SYMBOL(drive_info);
+#endif
+
+/* platform dependent support */
+EXPORT_SYMBOL(boot_cpu_data);
+EXPORT_SYMBOL(dump_thread);
+EXPORT_SYMBOL(dump_fpu);
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(disable_irq);
+EXPORT_SYMBOL(disable_irq_nosync);
+EXPORT_SYMBOL(kernel_thread);
+EXPORT_SYMBOL(__down);
+EXPORT_SYMBOL(__down_interruptible);
+EXPORT_SYMBOL(__up);
+EXPORT_SYMBOL(__down_trylock);
+
+/* Networking helper routines. */
+/* Delay loops */
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__delay);
+EXPORT_SYMBOL(__const_udelay);
+
+EXPORT_SYMBOL(__get_user_1);
+EXPORT_SYMBOL(__get_user_2);
+EXPORT_SYMBOL(__get_user_4);
+
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strstr);
+
+EXPORT_SYMBOL(strncpy_from_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+EXPORT_SYMBOL(clear_user);
+EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(__generic_copy_from_user);
+EXPORT_SYMBOL(__generic_copy_to_user);
+EXPORT_SYMBOL(strnlen_user);
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_CHIP_M32700_TS1
+extern void *dcache_dummy;
+EXPORT_SYMBOL(dcache_dummy);
+#endif
+EXPORT_SYMBOL(cpu_data);
+EXPORT_SYMBOL(cpu_online_map);
+EXPORT_SYMBOL(cpu_callout_map);
+
+/* Global SMP stuff */
+EXPORT_SYMBOL(synchronize_irq);
+EXPORT_SYMBOL(smp_call_function);
+
+/* TLB flushing */
+EXPORT_SYMBOL(smp_flush_tlb_page);
+#endif
+
+/* compiler generated symbol */
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __lshldi3(void);
+extern void __lshrdi3(void);
+extern void __muldi3(void);
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__lshldi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__muldi3);
+
+/* memory and string operations */
+EXPORT_SYMBOL(memchr);
+EXPORT_SYMBOL(memcpy);
+/* EXPORT_SYMBOL(memcpy_fromio); // not implement yet */
+/* EXPORT_SYMBOL(memcpy_toio); // not implement yet */
+EXPORT_SYMBOL(memset);
+/* EXPORT_SYMBOL(memset_io); // not implement yet */
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memscan);
+EXPORT_SYMBOL(copy_page);
+EXPORT_SYMBOL(clear_page);
+
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strncat);
+EXPORT_SYMBOL(strncmp);
+EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(strncpy);
+
+EXPORT_SYMBOL(_inb);
+EXPORT_SYMBOL(_inw);
+EXPORT_SYMBOL(_inl);
+EXPORT_SYMBOL(_outb);
+EXPORT_SYMBOL(_outw);
+EXPORT_SYMBOL(_outl);
+EXPORT_SYMBOL(_inb_p);
+EXPORT_SYMBOL(_inw_p);
+EXPORT_SYMBOL(_inl_p);
+EXPORT_SYMBOL(_outb_p);
+EXPORT_SYMBOL(_outw_p);
+EXPORT_SYMBOL(_outl_p);
+EXPORT_SYMBOL(_insb);
+EXPORT_SYMBOL(_insw);
+EXPORT_SYMBOL(_insl);
+EXPORT_SYMBOL(_outsb);
+EXPORT_SYMBOL(_outsw);
+EXPORT_SYMBOL(_outsl);
+EXPORT_SYMBOL(_readb);
+EXPORT_SYMBOL(_readw);
+EXPORT_SYMBOL(_readl);
+EXPORT_SYMBOL(_writeb);
+EXPORT_SYMBOL(_writew);
+EXPORT_SYMBOL(_writel);
+
diff --git a/arch/m32r/kernel/module.c b/arch/m32r/kernel/module.c
new file mode 100644
index 00000000000..f6a79a016ce
--- /dev/null
+++ b/arch/m32r/kernel/module.c
@@ -0,0 +1,259 @@
+/* Kernel module help for M32R.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/config.h>
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt...)
+#endif
+
+void *module_alloc(unsigned long size)
+{
+ if (size == 0)
+ return NULL;
+#ifdef CONFIG_MMU
+ return vmalloc_exec(size);
+#else
+ return vmalloc(size);
+#endif
+}
+
+
+/* Free memory returned from module_alloc */
+void module_free(struct module *mod, void *module_region)
+{
+ vfree(module_region);
+ /* FIXME: If module_region == mod->init_region, trim exception
+ table entries. */
+}
+
+/* We don't need anything special. */
+int module_frob_arch_sections(Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs,
+ char *secstrings,
+ struct module *mod)
+{
+ return 0;
+}
+
+#define COPY_UNALIGNED_WORD(sw, tw, align) \
+{ \
+ void *__s = &(sw), *__t = &(tw); \
+ unsigned short *__s2 = __s, *__t2 =__t; \
+ unsigned char *__s1 = __s, *__t1 =__t; \
+ switch ((align)) \
+ { \
+ case 0: \
+ *(unsigned long *) __t = *(unsigned long *) __s; \
+ break; \
+ case 2: \
+ *__t2++ = *__s2++; \
+ *__t2 = *__s2; \
+ break; \
+ default: \
+ *__t1++ = *__s1++; \
+ *__t1++ = *__s1++; \
+ *__t1++ = *__s1++; \
+ *__t1 = *__s1; \
+ break; \
+ } \
+}
+
+#define COPY_UNALIGNED_HWORD(sw, tw, align) \
+ { \
+ void *__s = &(sw), *__t = &(tw); \
+ unsigned short *__s2 = __s, *__t2 =__t; \
+ unsigned char *__s1 = __s, *__t1 =__t; \
+ switch ((align)) \
+ { \
+ case 0: \
+ *__t2 = *__s2; \
+ break; \
+ default: \
+ *__t1++ = *__s1++; \
+ *__t1 = *__s1; \
+ break; \
+ } \
+ }
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ unsigned int i;
+ Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+ Elf32_Addr relocation;
+ uint32_t *location;
+ uint32_t value;
+ unsigned short *hlocation;
+ unsigned short hvalue;
+ int svalue;
+ int align;
+
+ DEBUGP("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rel[i].r_info);
+ relocation = sym->st_value + rel[i].r_addend;
+ align = (int)location & 3;
+
+ switch (ELF32_R_TYPE(rel[i].r_info)) {
+ case R_M32R_32_RELA:
+ COPY_UNALIGNED_WORD (*location, value, align);
+ value += relocation;
+ COPY_UNALIGNED_WORD (value, *location, align);
+ break;
+ case R_M32R_HI16_ULO_RELA:
+ COPY_UNALIGNED_WORD (*location, value, align);
+ relocation = (relocation >>16) & 0xffff;
+ /* RELA must has 0 at relocation field. */
+ value += relocation;
+ COPY_UNALIGNED_WORD (value, *location, align);
+ break;
+ case R_M32R_HI16_SLO_RELA:
+ COPY_UNALIGNED_WORD (*location, value, align);
+ if (relocation & 0x8000) relocation += 0x10000;
+ relocation = (relocation >>16) & 0xffff;
+ /* RELA must has 0 at relocation field. */
+ value += relocation;
+ COPY_UNALIGNED_WORD (value, *location, align);
+ break;
+ case R_M32R_16_RELA:
+ hlocation = (unsigned short *)location;
+ relocation = relocation & 0xffff;
+ /* RELA must has 0 at relocation field. */
+ hvalue = relocation;
+ COPY_UNALIGNED_WORD (hvalue, *hlocation, align);
+ break;
+ case R_M32R_SDA16_RELA:
+ case R_M32R_LO16_RELA:
+ COPY_UNALIGNED_WORD (*location, value, align);
+ relocation = relocation & 0xffff;
+ /* RELA must has 0 at relocation field. */
+ value += relocation;
+ COPY_UNALIGNED_WORD (value, *location, align);
+ break;
+ case R_M32R_24_RELA:
+ COPY_UNALIGNED_WORD (*location, value, align);
+ relocation = relocation & 0xffffff;
+ /* RELA must has 0 at relocation field. */
+ value += relocation;
+ COPY_UNALIGNED_WORD (value, *location, align);
+ break;
+ case R_M32R_18_PCREL_RELA:
+ relocation = (relocation - (Elf32_Addr) location);
+ if (relocation < -0x20000 || 0x1fffc < relocation)
+ {
+ printk(KERN_ERR "module %s: relocation overflow: %u\n",
+ me->name, relocation);
+ return -ENOEXEC;
+ }
+ COPY_UNALIGNED_WORD (*location, value, align);
+ if (value & 0xffff)
+ {
+ /* RELA must has 0 at relocation field. */
+ printk(KERN_ERR "module %s: illegal relocation field: %u\n",
+ me->name, value);
+ return -ENOEXEC;
+ }
+ relocation = (relocation >> 2) & 0xffff;
+ value += relocation;
+ COPY_UNALIGNED_WORD (value, *location, align);
+ break;
+ case R_M32R_10_PCREL_RELA:
+ hlocation = (unsigned short *)location;
+ relocation = (relocation - (Elf32_Addr) location);
+ COPY_UNALIGNED_HWORD (*hlocation, hvalue, align);
+ svalue = (int)hvalue;
+ svalue = (signed char)svalue << 2;
+ relocation += svalue;
+ relocation = (relocation >> 2) & 0xff;
+ hvalue = hvalue & 0xff00;
+ hvalue += relocation;
+ COPY_UNALIGNED_HWORD (hvalue, *hlocation, align);
+ break;
+ case R_M32R_26_PCREL_RELA:
+ relocation = (relocation - (Elf32_Addr) location);
+ if (relocation < -0x2000000 || 0x1fffffc < relocation)
+ {
+ printk(KERN_ERR "module %s: relocation overflow: %u\n",
+ me->name, relocation);
+ return -ENOEXEC;
+ }
+ COPY_UNALIGNED_WORD (*location, value, align);
+ if (value & 0xffffff)
+ {
+ /* RELA must has 0 at relocation field. */
+ printk(KERN_ERR "module %s: illegal relocation field: %u\n",
+ me->name, value);
+ return -ENOEXEC;
+ }
+ relocation = (relocation >> 2) & 0xffffff;
+ value += relocation;
+ COPY_UNALIGNED_WORD (value, *location, align);
+ break;
+ default:
+ printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+ me->name, ELF32_R_TYPE(rel[i].r_info));
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+}
+
+int apply_relocate(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+#if 0
+ printk(KERN_ERR "module %s: REL RELOCATION unsupported\n",
+ me->name);
+ return -ENOEXEC;
+#endif
+ return 0;
+
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+}
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
new file mode 100644
index 00000000000..b556c3cf649
--- /dev/null
+++ b/arch/m32r/kernel/process.c
@@ -0,0 +1,359 @@
+/*
+ * linux/arch/m32r/kernel/process.c
+ *
+ * Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto
+ * Taken from sh version.
+ * Copyright (C) 1995 Linus Torvalds
+ * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
+ */
+
+#undef DEBUG_PROCESS
+#ifdef DEBUG_PROCESS
+#define DPRINTK(fmt, args...) printk("%s:%d:%s: " fmt, __FILE__, __LINE__, \
+ __FUNCTION__, ##args)
+#else
+#define DPRINTK(fmt, args...)
+#endif
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#include <linux/fs.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/hardirq.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/mmu_context.h>
+#include <asm/elf.h>
+#include <asm/m32r.h>
+
+#include <linux/err.h>
+
+static int hlt_counter=0;
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+unsigned long thread_saved_pc(struct task_struct *tsk)
+{
+ return tsk->thread.lr;
+}
+
+/*
+ * Powermanagement idle function, if any..
+ */
+void (*pm_idle)(void) = NULL;
+
+void disable_hlt(void)
+{
+ hlt_counter++;
+}
+
+EXPORT_SYMBOL(disable_hlt);
+
+void enable_hlt(void)
+{
+ hlt_counter--;
+}
+
+EXPORT_SYMBOL(enable_hlt);
+
+/*
+ * We use this is we don't have any better
+ * idle routine..
+ */
+void default_idle(void)
+{
+ /* M32R_FIXME: Please use "cpu_sleep" mode. */
+ cpu_relax();
+}
+
+/*
+ * On SMP it's slightly faster (but much more power-consuming!)
+ * to poll the ->work.need_resched flag instead of waiting for the
+ * cross-CPU IPI to arrive. Use this option with caution.
+ */
+static void poll_idle (void)
+{
+ /* M32R_FIXME */
+ cpu_relax();
+}
+
+/*
+ * The idle thread. There's no useful work to be
+ * done, so just try to conserve power and have a
+ * low exit latency (ie sit in a loop waiting for
+ * somebody to say that they'd like to reschedule)
+ */
+void cpu_idle (void)
+{
+ /* endless idle loop with no priority at all */
+ while (1) {
+ while (!need_resched()) {
+ void (*idle)(void) = pm_idle;
+
+ if (!idle)
+ idle = default_idle;
+
+ idle();
+ }
+ schedule();
+ }
+}
+
+void machine_restart(char *__unused)
+{
+ printk("Please push reset button!\n");
+ while (1)
+ cpu_relax();
+}
+
+EXPORT_SYMBOL(machine_restart);
+
+void machine_halt(void)
+{
+ printk("Please push reset button!\n");
+ while (1)
+ cpu_relax();
+}
+
+EXPORT_SYMBOL(machine_halt);
+
+void machine_power_off(void)
+{
+ /* M32R_FIXME */
+}
+
+EXPORT_SYMBOL(machine_power_off);
+
+static int __init idle_setup (char *str)
+{
+ if (!strncmp(str, "poll", 4)) {
+ printk("using poll in idle threads.\n");
+ pm_idle = poll_idle;
+ } else if (!strncmp(str, "sleep", 4)) {
+ printk("using sleep in idle threads.\n");
+ pm_idle = default_idle;
+ }
+
+ return 1;
+}
+
+__setup("idle=", idle_setup);
+
+void show_regs(struct pt_regs * regs)
+{
+ printk("\n");
+ printk("BPC[%08lx]:PSW[%08lx]:LR [%08lx]:FP [%08lx]\n", \
+ regs->bpc, regs->psw, regs->lr, regs->fp);
+ printk("BBPC[%08lx]:BBPSW[%08lx]:SPU[%08lx]:SPI[%08lx]\n", \
+ regs->bbpc, regs->bbpsw, regs->spu, regs->spi);
+ printk("R0 [%08lx]:R1 [%08lx]:R2 [%08lx]:R3 [%08lx]\n", \
+ regs->r0, regs->r1, regs->r2, regs->r3);
+ printk("R4 [%08lx]:R5 [%08lx]:R6 [%08lx]:R7 [%08lx]\n", \
+ regs->r4, regs->r5, regs->r6, regs->r7);
+ printk("R8 [%08lx]:R9 [%08lx]:R10[%08lx]:R11[%08lx]\n", \
+ regs->r8, regs->r9, regs->r10, regs->r11);
+ printk("R12[%08lx]\n", \
+ regs->r12);
+
+#if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2)
+ printk("ACC0H[%08lx]:ACC0L[%08lx]\n", \
+ regs->acc0h, regs->acc0l);
+ printk("ACC1H[%08lx]:ACC1L[%08lx]\n", \
+ regs->acc1h, regs->acc1l);
+#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
+ printk("ACCH[%08lx]:ACCL[%08lx]\n", \
+ regs->acch, regs->accl);
+#else
+#error unknown isa configuration
+#endif
+}
+
+/*
+ * Create a kernel thread
+ */
+
+/*
+ * This is the mechanism for creating a new kernel thread.
+ *
+ * NOTE! Only a kernel-only process(ie the swapper or direct descendants
+ * who haven't done an "execve()") should use this: it will work within
+ * a system call from a "real" process, but the process memory space will
+ * not be free'd until both the parent and the child have exited.
+ */
+static void kernel_thread_helper(void *nouse, int (*fn)(void *), void *arg)
+{
+ fn(arg);
+ do_exit(-1);
+}
+
+int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+{
+ struct pt_regs regs;
+
+ memset(&regs, 0, sizeof (regs));
+ regs.r1 = (unsigned long)fn;
+ regs.r2 = (unsigned long)arg;
+
+ regs.bpc = (unsigned long)kernel_thread_helper;
+
+ regs.psw = M32R_PSW_BIE;
+
+ /* Ok, create the new process. */
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL,
+ NULL);
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+ /* Nothing to do. */
+ DPRINTK("pid = %d\n", current->pid);
+}
+
+void flush_thread(void)
+{
+ DPRINTK("pid = %d\n", current->pid);
+ memset(&current->thread.debug_trap, 0, sizeof(struct debug_trap));
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+ /* do nothing */
+ DPRINTK("pid = %d\n", dead_task->pid);
+}
+
+/* Fill in the fpu structure for a core dump.. */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
+{
+ return 0; /* Task didn't use the fpu at all. */
+}
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long spu,
+ unsigned long unused, struct task_struct *tsk, struct pt_regs *regs)
+{
+ struct pt_regs *childregs;
+ unsigned long sp = (unsigned long)tsk->thread_info + THREAD_SIZE;
+ extern void ret_from_fork(void);
+
+ /* Copy registers */
+ sp -= sizeof (struct pt_regs);
+ childregs = (struct pt_regs *)sp;
+ *childregs = *regs;
+
+ childregs->spu = spu;
+ childregs->r0 = 0; /* Child gets zero as return value */
+ regs->r0 = tsk->pid;
+ tsk->thread.sp = (unsigned long)childregs;
+ tsk->thread.lr = (unsigned long)ret_from_fork;
+
+ return 0;
+}
+
+/*
+ * fill in the user structure for a core dump..
+ */
+void dump_thread(struct pt_regs * regs, struct user * dump)
+{
+ /* M32R_FIXME */
+}
+
+/*
+ * Capture the user space registers if the task is not running (in user space)
+ */
+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
+{
+ /* M32R_FIXME */
+ return 1;
+}
+
+asmlinkage int sys_fork(unsigned long r0, unsigned long r1, unsigned long r2,
+ unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6,
+ struct pt_regs regs)
+{
+#ifdef CONFIG_MMU
+ return do_fork(SIGCHLD, regs.spu, &regs, 0, NULL, NULL);
+#else
+ return -EINVAL;
+#endif /* CONFIG_MMU */
+}
+
+asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
+ unsigned long parent_tidptr,
+ unsigned long child_tidptr,
+ unsigned long r4, unsigned long r5, unsigned long r6,
+ struct pt_regs regs)
+{
+ if (!newsp)
+ newsp = regs.spu;
+
+ return do_fork(clone_flags, newsp, &regs, 0,
+ (int __user *)parent_tidptr, (int __user *)child_tidptr);
+}
+
+/*
+ * This is trivial, and on the face of it looks like it
+ * could equally well be done in user mode.
+ *
+ * Not so, for quite unobvious reasons - register pressure.
+ * In user mode vfork() cannot have a stack frame, and if
+ * done by calling the "clone()" system call directly, you
+ * do not have enough call-clobbered registers to hold all
+ * the information you need.
+ */
+asmlinkage int sys_vfork(unsigned long r0, unsigned long r1, unsigned long r2,
+ unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6,
+ struct pt_regs regs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.spu, &regs, 0,
+ NULL, NULL);
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv,
+ char __user * __user *uenvp,
+ unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, struct pt_regs regs)
+{
+ int error;
+ char *filename;
+
+ filename = getname(ufilename);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+
+ error = do_execve(filename, uargv, uenvp, &regs);
+ if (error == 0) {
+ task_lock(current);
+ current->ptrace &= ~PT_DTRACE;
+ task_unlock(current);
+ }
+ putname(filename);
+out:
+ return error;
+}
+
+/*
+ * These bracket the sleeping functions..
+ */
+#define first_sched ((unsigned long) scheduling_functions_start_here)
+#define last_sched ((unsigned long) scheduling_functions_end_here)
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ /* M32R_FIXME */
+ return (0);
+}
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
new file mode 100644
index 00000000000..8b40f362dd6
--- /dev/null
+++ b/arch/m32r/kernel/ptrace.c
@@ -0,0 +1,829 @@
+/*
+ * linux/arch/m32r/kernel/ptrace.c
+ *
+ * Copyright (C) 2002 Hirokazu Takata, Takeo Takahashi
+ * Copyright (C) 2004 Hirokazu Takata, Kei Sakamoto
+ *
+ * Original x86 implementation:
+ * By Ross Biro 1/23/92
+ * edited by Linus Torvalds
+ *
+ * Some code taken from sh version:
+ * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
+ * Some code taken from arm version:
+ * Copyright (C) 2000 Russell King
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/string.h>
+
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+
+/*
+ * Get the address of the live pt_regs for the specified task.
+ * These are saved onto the top kernel stack when the process
+ * is not running.
+ *
+ * Note: if a user thread is execve'd from kernel space, the
+ * kernel stack will not be empty on entry to the kernel, so
+ * ptracing these tasks will fail.
+ */
+static inline struct pt_regs *
+get_user_regs(struct task_struct *task)
+{
+ return (struct pt_regs *)
+ ((unsigned long)task->thread_info + THREAD_SIZE
+ - sizeof(struct pt_regs));
+}
+
+/*
+ * This routine will get a word off of the process kernel stack.
+ */
+static inline unsigned long int
+get_stack_long(struct task_struct *task, int offset)
+{
+ unsigned long *stack;
+
+ stack = (unsigned long *)get_user_regs(task);
+
+ return stack[offset];
+}
+
+/*
+ * This routine will put a word on the process kernel stack.
+ */
+static inline int
+put_stack_long(struct task_struct *task, int offset, unsigned long data)
+{
+ unsigned long *stack;
+
+ stack = (unsigned long *)get_user_regs(task);
+ stack[offset] = data;
+
+ return 0;
+}
+
+static int reg_offset[] = {
+ PT_R0, PT_R1, PT_R2, PT_R3, PT_R4, PT_R5, PT_R6, PT_R7,
+ PT_R8, PT_R9, PT_R10, PT_R11, PT_R12, PT_FP, PT_LR, PT_SPU,
+};
+
+/*
+ * Read the word at offset "off" into the "struct user". We
+ * actually access the pt_regs stored on the kernel stack.
+ */
+static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
+ unsigned long __user *data)
+{
+ unsigned long tmp;
+#ifndef NO_FPU
+ struct user * dummy = NULL;
+#endif
+
+ if ((off & 3) || (off < 0) || (off > sizeof(struct user) - 3))
+ return -EIO;
+
+ off >>= 2;
+ switch (off) {
+ case PT_EVB:
+ __asm__ __volatile__ (
+ "mvfc %0, cr5 \n\t"
+ : "=r" (tmp)
+ );
+ break;
+ case PT_CBR: {
+ unsigned long psw;
+ psw = get_stack_long(tsk, PT_PSW);
+ tmp = ((psw >> 8) & 1);
+ }
+ break;
+ case PT_PSW: {
+ unsigned long psw, bbpsw;
+ psw = get_stack_long(tsk, PT_PSW);
+ bbpsw = get_stack_long(tsk, PT_BBPSW);
+ tmp = ((psw >> 8) & 0xff) | ((bbpsw & 0xff) << 8);
+ }
+ break;
+ case PT_PC:
+ tmp = get_stack_long(tsk, PT_BPC);
+ break;
+ case PT_BPC:
+ off = PT_BBPC;
+ /* fall through */
+ default:
+ if (off < (sizeof(struct pt_regs) >> 2))
+ tmp = get_stack_long(tsk, off);
+#ifndef NO_FPU
+ else if (off >= (long)(&dummy->fpu >> 2) &&
+ off < (long)(&dummy->u_fpvalid >> 2)) {
+ if (!tsk_used_math(tsk)) {
+ if (off == (long)(&dummy->fpu.fpscr >> 2))
+ tmp = FPSCR_INIT;
+ else
+ tmp = 0;
+ } else
+ tmp = ((long *)(&tsk->thread.fpu >> 2))
+ [off - (long)&dummy->fpu];
+ } else if (off == (long)(&dummy->u_fpvalid >> 2))
+ tmp = !!tsk_used_math(tsk);
+#endif /* not NO_FPU */
+ else
+ tmp = 0;
+ }
+
+ return put_user(tmp, data);
+}
+
+static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
+ unsigned long data)
+{
+ int ret = -EIO;
+#ifndef NO_FPU
+ struct user * dummy = NULL;
+#endif
+
+ if ((off & 3) || off < 0 ||
+ off > sizeof(struct user) - 3)
+ return -EIO;
+
+ off >>= 2;
+ switch (off) {
+ case PT_EVB:
+ case PT_BPC:
+ case PT_SPI:
+ /* We don't allow to modify evb. */
+ ret = 0;
+ break;
+ case PT_PSW:
+ case PT_CBR: {
+ /* We allow to modify only cbr in psw */
+ unsigned long psw;
+ psw = get_stack_long(tsk, PT_PSW);
+ psw = (psw & ~0x100) | ((data & 1) << 8);
+ ret = put_stack_long(tsk, PT_PSW, psw);
+ }
+ break;
+ case PT_PC:
+ off = PT_BPC;
+ data &= ~1;
+ /* fall through */
+ default:
+ if (off < (sizeof(struct pt_regs) >> 2))
+ ret = put_stack_long(tsk, off, data);
+#ifndef NO_FPU
+ else if (off >= (long)(&dummy->fpu >> 2) &&
+ off < (long)(&dummy->u_fpvalid >> 2)) {
+ set_stopped_child_used_math(tsk);
+ ((long *)&tsk->thread.fpu)
+ [off - (long)&dummy->fpu] = data;
+ ret = 0;
+ } else if (off == (long)(&dummy->u_fpvalid >> 2)) {
+ conditional_stopped_child_used_math(data, tsk);
+ ret = 0;
+ }
+#endif /* not NO_FPU */
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Get all user integer registers.
+ */
+static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
+{
+ struct pt_regs *regs = get_user_regs(tsk);
+
+ return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
+}
+
+/*
+ * Set all user integer registers.
+ */
+static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
+{
+ struct pt_regs newregs;
+ int ret;
+
+ ret = -EFAULT;
+ if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
+ struct pt_regs *regs = get_user_regs(tsk);
+ *regs = newregs;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+
+static inline int
+check_condition_bit(struct task_struct *child)
+{
+ return (int)((get_stack_long(child, PT_PSW) >> 8) & 1);
+}
+
+static int
+check_condition_src(unsigned long op, unsigned long regno1,
+ unsigned long regno2, struct task_struct *child)
+{
+ unsigned long reg1, reg2;
+
+ reg2 = get_stack_long(child, reg_offset[regno2]);
+
+ switch (op) {
+ case 0x0: /* BEQ */
+ reg1 = get_stack_long(child, reg_offset[regno1]);
+ return reg1 == reg2;
+ case 0x1: /* BNE */
+ reg1 = get_stack_long(child, reg_offset[regno1]);
+ return reg1 != reg2;
+ case 0x8: /* BEQZ */
+ return reg2 == 0;
+ case 0x9: /* BNEZ */
+ return reg2 != 0;
+ case 0xa: /* BLTZ */
+ return (int)reg2 < 0;
+ case 0xb: /* BGEZ */
+ return (int)reg2 >= 0;
+ case 0xc: /* BLEZ */
+ return (int)reg2 <= 0;
+ case 0xd: /* BGTZ */
+ return (int)reg2 > 0;
+ default:
+ /* never reached */
+ return 0;
+ }
+}
+
+static void
+compute_next_pc_for_16bit_insn(unsigned long insn, unsigned long pc,
+ unsigned long *next_pc,
+ struct task_struct *child)
+{
+ unsigned long op, op2, op3;
+ unsigned long disp;
+ unsigned long regno;
+ int parallel = 0;
+
+ if (insn & 0x00008000)
+ parallel = 1;
+ if (pc & 3)
+ insn &= 0x7fff; /* right slot */
+ else
+ insn >>= 16; /* left slot */
+
+ op = (insn >> 12) & 0xf;
+ op2 = (insn >> 8) & 0xf;
+ op3 = (insn >> 4) & 0xf;
+
+ if (op == 0x7) {
+ switch (op2) {
+ case 0xd: /* BNC */
+ case 0x9: /* BNCL */
+ if (!check_condition_bit(child)) {
+ disp = (long)(insn << 24) >> 22;
+ *next_pc = (pc & ~0x3) + disp;
+ return;
+ }
+ break;
+ case 0x8: /* BCL */
+ case 0xc: /* BC */
+ if (check_condition_bit(child)) {
+ disp = (long)(insn << 24) >> 22;
+ *next_pc = (pc & ~0x3) + disp;
+ return;
+ }
+ break;
+ case 0xe: /* BL */
+ case 0xf: /* BRA */
+ disp = (long)(insn << 24) >> 22;
+ *next_pc = (pc & ~0x3) + disp;
+ return;
+ break;
+ }
+ } else if (op == 0x1) {
+ switch (op2) {
+ case 0x0:
+ if (op3 == 0xf) { /* TRAP */
+#if 1
+ /* pass through */
+#else
+ /* kernel space is not allowed as next_pc */
+ unsigned long evb;
+ unsigned long trapno;
+ trapno = insn & 0xf;
+ __asm__ __volatile__ (
+ "mvfc %0, cr5\n"
+ :"=r"(evb)
+ :
+ );
+ *next_pc = evb + (trapno << 2);
+ return;
+#endif
+ } else if (op3 == 0xd) { /* RTE */
+ *next_pc = get_stack_long(child, PT_BPC);
+ return;
+ }
+ break;
+ case 0xc: /* JC */
+ if (op3 == 0xc && check_condition_bit(child)) {
+ regno = insn & 0xf;
+ *next_pc = get_stack_long(child,
+ reg_offset[regno]);
+ return;
+ }
+ break;
+ case 0xd: /* JNC */
+ if (op3 == 0xc && !check_condition_bit(child)) {
+ regno = insn & 0xf;
+ *next_pc = get_stack_long(child,
+ reg_offset[regno]);
+ return;
+ }
+ break;
+ case 0xe: /* JL */
+ case 0xf: /* JMP */
+ if (op3 == 0xc) { /* JMP */
+ regno = insn & 0xf;
+ *next_pc = get_stack_long(child,
+ reg_offset[regno]);
+ return;
+ }
+ break;
+ }
+ }
+ if (parallel)
+ *next_pc = pc + 4;
+ else
+ *next_pc = pc + 2;
+}
+
+static void
+compute_next_pc_for_32bit_insn(unsigned long insn, unsigned long pc,
+ unsigned long *next_pc,
+ struct task_struct *child)
+{
+ unsigned long op;
+ unsigned long op2;
+ unsigned long disp;
+ unsigned long regno1, regno2;
+
+ op = (insn >> 28) & 0xf;
+ if (op == 0xf) { /* branch 24-bit relative */
+ op2 = (insn >> 24) & 0xf;
+ switch (op2) {
+ case 0xd: /* BNC */
+ case 0x9: /* BNCL */
+ if (!check_condition_bit(child)) {
+ disp = (long)(insn << 8) >> 6;
+ *next_pc = (pc & ~0x3) + disp;
+ return;
+ }
+ break;
+ case 0x8: /* BCL */
+ case 0xc: /* BC */
+ if (check_condition_bit(child)) {
+ disp = (long)(insn << 8) >> 6;
+ *next_pc = (pc & ~0x3) + disp;
+ return;
+ }
+ break;
+ case 0xe: /* BL */
+ case 0xf: /* BRA */
+ disp = (long)(insn << 8) >> 6;
+ *next_pc = (pc & ~0x3) + disp;
+ return;
+ }
+ } else if (op == 0xb) { /* branch 16-bit relative */
+ op2 = (insn >> 20) & 0xf;
+ switch (op2) {
+ case 0x0: /* BEQ */
+ case 0x1: /* BNE */
+ case 0x8: /* BEQZ */
+ case 0x9: /* BNEZ */
+ case 0xa: /* BLTZ */
+ case 0xb: /* BGEZ */
+ case 0xc: /* BLEZ */
+ case 0xd: /* BGTZ */
+ regno1 = ((insn >> 24) & 0xf);
+ regno2 = ((insn >> 16) & 0xf);
+ if (check_condition_src(op2, regno1, regno2, child)) {
+ disp = (long)(insn << 16) >> 14;
+ *next_pc = (pc & ~0x3) + disp;
+ return;
+ }
+ break;
+ }
+ }
+ *next_pc = pc + 4;
+}
+
+static inline void
+compute_next_pc(unsigned long insn, unsigned long pc,
+ unsigned long *next_pc, struct task_struct *child)
+{
+ if (insn & 0x80000000)
+ compute_next_pc_for_32bit_insn(insn, pc, next_pc, child);
+ else
+ compute_next_pc_for_16bit_insn(insn, pc, next_pc, child);
+}
+
+static int
+register_debug_trap(struct task_struct *child, unsigned long next_pc,
+ unsigned long next_insn, unsigned long *code)
+{
+ struct debug_trap *p = &child->thread.debug_trap;
+ unsigned long addr = next_pc & ~3;
+
+ if (p->nr_trap == MAX_TRAPS) {
+ printk("kernel BUG at %s %d: p->nr_trap = %d\n",
+ __FILE__, __LINE__, p->nr_trap);
+ return -1;
+ }
+ p->addr[p->nr_trap] = addr;
+ p->insn[p->nr_trap] = next_insn;
+ p->nr_trap++;
+ if (next_pc & 3) {
+ *code = (next_insn & 0xffff0000) | 0x10f1;
+ /* xxx --> TRAP1 */
+ } else {
+ if ((next_insn & 0x80000000) || (next_insn & 0x8000)) {
+ *code = 0x10f17000;
+ /* TRAP1 --> NOP */
+ } else {
+ *code = (next_insn & 0xffff) | 0x10f10000;
+ /* TRAP1 --> xxx */
+ }
+ }
+ return 0;
+}
+
+static int
+unregister_debug_trap(struct task_struct *child, unsigned long addr,
+ unsigned long *code)
+{
+ struct debug_trap *p = &child->thread.debug_trap;
+ int i;
+
+ /* Search debug trap entry. */
+ for (i = 0; i < p->nr_trap; i++) {
+ if (p->addr[i] == addr)
+ break;
+ }
+ if (i >= p->nr_trap) {
+ /* The trap may be requested from debugger.
+ * ptrace should do nothing in this case.
+ */
+ return 0;
+ }
+
+ /* Recover orignal instruction code. */
+ *code = p->insn[i];
+
+ /* Shift debug trap entries. */
+ while (i < p->nr_trap - 1) {
+ p->insn[i] = p->insn[i + 1];
+ p->addr[i] = p->addr[i + 1];
+ i++;
+ }
+ p->nr_trap--;
+ return 1;
+}
+
+static void
+unregister_all_debug_traps(struct task_struct *child)
+{
+ struct debug_trap *p = &child->thread.debug_trap;
+ int i;
+
+ for (i = 0; i < p->nr_trap; i++)
+ access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), 1);
+ p->nr_trap = 0;
+}
+
+static inline void
+invalidate_cache(void)
+{
+#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP)
+
+ _flush_cache_copyback_all();
+
+#else /* ! CONFIG_CHIP_M32700 */
+
+ /* Invalidate cache */
+ __asm__ __volatile__ (
+ "ldi r0, #-1 \n\t"
+ "ldi r1, #0 \n\t"
+ "stb r1, @r0 ; cache off \n\t"
+ "; \n\t"
+ "ldi r0, #-2 \n\t"
+ "ldi r1, #1 \n\t"
+ "stb r1, @r0 ; cache invalidate \n\t"
+ ".fillinsn \n"
+ "0: \n\t"
+ "ldb r1, @r0 ; invalidate check \n\t"
+ "bnez r1, 0b \n\t"
+ "; \n\t"
+ "ldi r0, #-1 \n\t"
+ "ldi r1, #1 \n\t"
+ "stb r1, @r0 ; cache on \n\t"
+ : : : "r0", "r1", "memory"
+ );
+ /* FIXME: copying-back d-cache and invalidating i-cache are needed.
+ */
+#endif /* CONFIG_CHIP_M32700 */
+}
+
+/* Embed a debug trap (TRAP1) code */
+static int
+embed_debug_trap(struct task_struct *child, unsigned long next_pc)
+{
+ unsigned long next_insn, code;
+ unsigned long addr = next_pc & ~3;
+
+ if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), 0)
+ != sizeof(next_insn)) {
+ return -1; /* error */
+ }
+
+ /* Set a trap code. */
+ if (register_debug_trap(child, next_pc, next_insn, &code)) {
+ return -1; /* error */
+ }
+ if (access_process_vm(child, addr, &code, sizeof(code), 1)
+ != sizeof(code)) {
+ return -1; /* error */
+ }
+ return 0; /* success */
+}
+
+void
+withdraw_debug_trap(struct pt_regs *regs)
+{
+ unsigned long addr;
+ unsigned long code;
+
+ addr = (regs->bpc - 2) & ~3;
+ regs->bpc -= 2;
+ if (unregister_debug_trap(current, addr, &code)) {
+ access_process_vm(current, addr, &code, sizeof(code), 1);
+ invalidate_cache();
+ }
+}
+
+static void
+init_debug_traps(struct task_struct *child)
+{
+ struct debug_trap *p = &child->thread.debug_trap;
+ int i;
+ p->nr_trap = 0;
+ for (i = 0; i < MAX_TRAPS; i++) {
+ p->addr[i] = 0;
+ p->insn[i] = 0;
+ }
+}
+
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ /* nothing to do.. */
+}
+
+static int
+do_ptrace(long request, struct task_struct *child, long addr, long data)
+{
+ unsigned long tmp;
+ int ret;
+
+ switch (request) {
+ /*
+ * read word at location "addr" in the child process.
+ */
+ case PTRACE_PEEKTEXT:
+ case PTRACE_PEEKDATA:
+ ret = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+ if (ret == sizeof(tmp))
+ ret = put_user(tmp,(unsigned long __user *) data);
+ else
+ ret = -EIO;
+ break;
+
+ /*
+ * read the word at location addr in the USER area.
+ */
+ case PTRACE_PEEKUSR:
+ ret = ptrace_read_user(child, addr,
+ (unsigned long __user *)data);
+ break;
+
+ /*
+ * write the word at location addr.
+ */
+ case PTRACE_POKETEXT:
+ case PTRACE_POKEDATA:
+ ret = access_process_vm(child, addr, &data, sizeof(data), 1);
+ if (ret == sizeof(data)) {
+ ret = 0;
+ if (request == PTRACE_POKETEXT) {
+ invalidate_cache();
+ }
+ } else {
+ ret = -EIO;
+ }
+ break;
+
+ /*
+ * write the word at location addr in the USER area.
+ */
+ case PTRACE_POKEUSR:
+ ret = ptrace_write_user(child, addr, data);
+ break;
+
+ /*
+ * continue/restart and stop at next (return from) syscall
+ */
+ case PTRACE_SYSCALL:
+ case PTRACE_CONT:
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ break;
+ if (request == PTRACE_SYSCALL)
+ set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ else
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ child->exit_code = data;
+ wake_up_process(child);
+ ret = 0;
+ break;
+
+ /*
+ * make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
+ * exit.
+ */
+ case PTRACE_KILL: {
+ ret = 0;
+ unregister_all_debug_traps(child);
+ invalidate_cache();
+ if (child->exit_state == EXIT_ZOMBIE) /* already dead */
+ break;
+ child->exit_code = SIGKILL;
+ wake_up_process(child);
+ break;
+ }
+
+ /*
+ * execute single instruction.
+ */
+ case PTRACE_SINGLESTEP: {
+ unsigned long next_pc;
+ unsigned long pc, insn;
+
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ break;
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ if ((child->ptrace & PT_DTRACE) == 0) {
+ /* Spurious delayed TF traps may occur */
+ child->ptrace |= PT_DTRACE;
+ }
+
+ /* Compute next pc. */
+ pc = get_stack_long(child, PT_BPC);
+
+ if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
+ != sizeof(insn))
+ break;
+
+ compute_next_pc(insn, pc, &next_pc, child);
+ if (next_pc & 0x80000000)
+ break;
+
+ if (embed_debug_trap(child, next_pc))
+ break;
+
+ invalidate_cache();
+ child->exit_code = data;
+
+ /* give it a chance to run. */
+ wake_up_process(child);
+ ret = 0;
+ break;
+ }
+
+ /*
+ * detach a process that was attached.
+ */
+ case PTRACE_DETACH:
+ ret = 0;
+ ret = ptrace_detach(child, data);
+ break;
+
+ case PTRACE_GETREGS:
+ ret = ptrace_getregs(child, (void __user *)data);
+ break;
+
+ case PTRACE_SETREGS:
+ ret = ptrace_setregs(child, (void __user *)data);
+ break;
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
+{
+ struct task_struct *child;
+ int ret;
+
+ lock_kernel();
+ ret = -EPERM;
+ if (request == PTRACE_TRACEME) {
+ /* are we already being traced? */
+ if (current->ptrace & PT_PTRACED)
+ goto out;
+ /* set the ptrace bit in the process flags. */
+ current->ptrace |= PT_PTRACED;
+ ret = 0;
+ goto out;
+ }
+ ret = -ESRCH;
+ read_lock(&tasklist_lock);
+ child = find_task_by_pid(pid);
+ if (child)
+ get_task_struct(child);
+ read_unlock(&tasklist_lock);
+ if (!child)
+ goto out;
+
+ ret = -EPERM;
+ if (pid == 1) /* you may not mess with init */
+ goto out;
+
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ if (ret == 0)
+ init_debug_traps(child);
+ goto out_tsk;
+ }
+
+ ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ if (ret == 0)
+ ret = do_ptrace(request, child, addr, data);
+
+out_tsk:
+ put_task_struct(child);
+out:
+ unlock_kernel();
+
+ return ret;
+}
+
+/* notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+ */
+void do_syscall_trace(void)
+{
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return;
+ if (!(current->ptrace & PT_PTRACED))
+ return;
+ /* the 0x80 provides a way for the tracing parent to distinguish
+ between a syscall stop and SIGTRAP delivery */
+ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+ ? 0x80 : 0));
+
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+}
diff --git a/arch/m32r/kernel/semaphore.c b/arch/m32r/kernel/semaphore.c
new file mode 100644
index 00000000000..9a6e6d754dd
--- /dev/null
+++ b/arch/m32r/kernel/semaphore.c
@@ -0,0 +1,186 @@
+/*
+ * linux/arch/m32r/semaphore.c
+ * orig : i386 2.6.4
+ *
+ * M32R semaphore implementation.
+ *
+ * Copyright (c) 2002 - 2004 Hitoshi Yamamoto
+ */
+
+/*
+ * i386 semaphore implementation.
+ *
+ * (C) Copyright 1999 Linus Torvalds
+ *
+ * Portions Copyright 1999 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
+ */
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <asm/semaphore.h>
+
+/*
+ * Semaphores are implemented using a two-way counter:
+ * The "count" variable is decremented for each process
+ * that tries to acquire the semaphore, while the "sleeping"
+ * variable is a count of such acquires.
+ *
+ * Notably, the inline "up()" and "down()" functions can
+ * efficiently test if they need to do any extra work (up
+ * needs to do something only if count was negative before
+ * the increment operation.
+ *
+ * "sleeping" and the contention routine ordering is protected
+ * by the spinlock in the semaphore's waitqueue head.
+ *
+ * Note that these functions are only called when there is
+ * contention on the lock, and as such all this is the
+ * "non-critical" part of the whole semaphore business. The
+ * critical part is the inline stuff in <asm/semaphore.h>
+ * where we want to avoid any extra jumps and calls.
+ */
+
+/*
+ * Logic:
+ * - only on a boundary condition do we need to care. When we go
+ * from a negative count to a non-negative, we wake people up.
+ * - when we go from a non-negative count to a negative do we
+ * (a) synchronize with the "sleeper" count and (b) make sure
+ * that we're on the wakeup list before we synchronize so that
+ * we cannot lose wakeup events.
+ */
+
+asmlinkage void __up(struct semaphore *sem)
+{
+ wake_up(&sem->wait);
+}
+
+asmlinkage void __sched __down(struct semaphore * sem)
+{
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+ unsigned long flags;
+
+ tsk->state = TASK_UNINTERRUPTIBLE;
+ spin_lock_irqsave(&sem->wait.lock, flags);
+ add_wait_queue_exclusive_locked(&sem->wait, &wait);
+
+ sem->sleepers++;
+ for (;;) {
+ int sleepers = sem->sleepers;
+
+ /*
+ * Add "everybody else" into it. They aren't
+ * playing, because we own the spinlock in
+ * the wait_queue_head.
+ */
+ if (!atomic_add_negative(sleepers - 1, &sem->count)) {
+ sem->sleepers = 0;
+ break;
+ }
+ sem->sleepers = 1; /* us - see -1 above */
+ spin_unlock_irqrestore(&sem->wait.lock, flags);
+
+ schedule();
+
+ spin_lock_irqsave(&sem->wait.lock, flags);
+ tsk->state = TASK_UNINTERRUPTIBLE;
+ }
+ remove_wait_queue_locked(&sem->wait, &wait);
+ wake_up_locked(&sem->wait);
+ spin_unlock_irqrestore(&sem->wait.lock, flags);
+ tsk->state = TASK_RUNNING;
+}
+
+asmlinkage int __sched __down_interruptible(struct semaphore * sem)
+{
+ int retval = 0;
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+ unsigned long flags;
+
+ tsk->state = TASK_INTERRUPTIBLE;
+ spin_lock_irqsave(&sem->wait.lock, flags);
+ add_wait_queue_exclusive_locked(&sem->wait, &wait);
+
+ sem->sleepers++;
+ for (;;) {
+ int sleepers = sem->sleepers;
+
+ /*
+ * With signals pending, this turns into
+ * the trylock failure case - we won't be
+ * sleeping, and we* can't get the lock as
+ * it has contention. Just correct the count
+ * and exit.
+ */
+ if (signal_pending(current)) {
+ retval = -EINTR;
+ sem->sleepers = 0;
+ atomic_add(sleepers, &sem->count);
+ break;
+ }
+
+ /*
+ * Add "everybody else" into it. They aren't
+ * playing, because we own the spinlock in
+ * wait_queue_head. The "-1" is because we're
+ * still hoping to get the semaphore.
+ */
+ if (!atomic_add_negative(sleepers - 1, &sem->count)) {
+ sem->sleepers = 0;
+ break;
+ }
+ sem->sleepers = 1; /* us - see -1 above */
+ spin_unlock_irqrestore(&sem->wait.lock, flags);
+
+ schedule();
+
+ spin_lock_irqsave(&sem->wait.lock, flags);
+ tsk->state = TASK_INTERRUPTIBLE;
+ }
+ remove_wait_queue_locked(&sem->wait, &wait);
+ wake_up_locked(&sem->wait);
+ spin_unlock_irqrestore(&sem->wait.lock, flags);
+
+ tsk->state = TASK_RUNNING;
+ return retval;
+}
+
+/*
+ * Trylock failed - make sure we correct for
+ * having decremented the count.
+ *
+ * We could have done the trylock with a
+ * single "cmpxchg" without failure cases,
+ * but then it wouldn't work on a 386.
+ */
+asmlinkage int __down_trylock(struct semaphore * sem)
+{
+ int sleepers;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sem->wait.lock, flags);
+ sleepers = sem->sleepers + 1;
+ sem->sleepers = 0;
+
+ /*
+ * Add "everybody else" and us into it. They aren't
+ * playing, because we own the spinlock in the
+ * wait_queue_head.
+ */
+ if (!atomic_add_negative(sleepers, &sem->count)) {
+ wake_up_locked(&sem->wait);
+ }
+
+ spin_unlock_irqrestore(&sem->wait.lock, flags);
+ return 1;
+}
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
new file mode 100644
index 00000000000..4826cd6e40e
--- /dev/null
+++ b/arch/m32r/kernel/setup.c
@@ -0,0 +1,420 @@
+/*
+ * linux/arch/m32r/kernel/setup.c
+ *
+ * Setup routines for Renesas M32R
+ *
+ * Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/stddef.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/console.h>
+#include <linux/initrd.h>
+#include <linux/major.h>
+#include <linux/root_dev.h>
+#include <linux/seq_file.h>
+#include <linux/timex.h>
+#include <linux/tty.h>
+#include <linux/cpu.h>
+#include <linux/nodemask.h>
+
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/m32r.h>
+#include <asm/setup.h>
+#include <asm/sections.h>
+
+#ifdef CONFIG_MMU
+extern void init_mmu(void);
+#endif
+
+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) \
+ || defined(CONFIG_BLK_DEV_IDE_MODULE) \
+ || defined(CONFIG_BLK_DEV_HD_MODULE)
+struct drive_info_struct { char dummy[32]; } drive_info;
+#endif
+
+extern char _end[];
+
+/*
+ * Machine setup..
+ */
+struct cpuinfo_m32r boot_cpu_data;
+
+#ifdef CONFIG_BLK_DEV_RAM
+extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
+extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_image_start; /* starting block # of image */
+#endif
+
+#if defined(CONFIG_VGA_CONSOLE)
+struct screen_info screen_info = {
+ .orig_video_lines = 25,
+ .orig_video_cols = 80,
+ .orig_video_mode = 0,
+ .orig_video_ega_bx = 0,
+ .orig_video_isVGA = 1,
+ .orig_video_points = 8
+};
+#endif
+
+extern int root_mountflags;
+
+static char command_line[COMMAND_LINE_SIZE];
+
+static struct resource data_resource = {
+ .name = "Kernel data",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static struct resource code_resource = {
+ .name = "Kernel code",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+unsigned long memory_start;
+unsigned long memory_end;
+
+void __init setup_arch(char **);
+int get_cpuinfo(char *);
+
+static __inline__ void parse_mem_cmdline(char ** cmdline_p)
+{
+ char c = ' ';
+ char *to = command_line;
+ char *from = COMMAND_LINE;
+ int len = 0;
+ int usermem = 0;
+
+ /* Save unparsed command line copy for /proc/cmdline */
+ memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
+ saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+
+ memory_start = (unsigned long)CONFIG_MEMORY_START+PAGE_OFFSET;
+ memory_end = memory_start+(unsigned long)CONFIG_MEMORY_SIZE;
+
+ for ( ; ; ) {
+ if (c == ' ' && !memcmp(from, "mem=", 4)) {
+ if (to != command_line)
+ to--;
+
+ {
+ unsigned long mem_size;
+
+ usermem = 1;
+ mem_size = memparse(from+4, &from);
+ memory_end = memory_start + mem_size;
+ }
+ }
+ c = *(from++);
+ if (!c)
+ break;
+
+ if (COMMAND_LINE_SIZE <= ++len)
+ break;
+
+ *(to++) = c;
+ }
+ *to = '\0';
+ *cmdline_p = command_line;
+ if (usermem)
+ printk(KERN_INFO "user-defined physical RAM map:\n");
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+static unsigned long __init setup_memory(void)
+{
+ unsigned long start_pfn, max_low_pfn, bootmap_size;
+
+ start_pfn = PFN_UP( __pa(_end) );
+ max_low_pfn = PFN_DOWN( __pa(memory_end) );
+
+ /*
+ * Initialize the boot-time allocator (with low memory only):
+ */
+ bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
+ CONFIG_MEMORY_START>>PAGE_SHIFT, max_low_pfn);
+
+ /*
+ * Register fully available low RAM pages with the bootmem allocator.
+ */
+ {
+ unsigned long curr_pfn;
+ unsigned long last_pfn;
+ unsigned long pages;
+
+ /*
+ * We are rounding up the start address of usable memory:
+ */
+ curr_pfn = PFN_UP(__pa(memory_start));
+
+ /*
+ * ... and at the end of the usable range downwards:
+ */
+ last_pfn = PFN_DOWN(__pa(memory_end));
+
+ if (last_pfn > max_low_pfn)
+ last_pfn = max_low_pfn;
+
+ pages = last_pfn - curr_pfn;
+ free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages));
+ }
+
+ /*
+ * Reserve the kernel text and
+ * Reserve the bootmem bitmap. We do this in two steps (first step
+ * was init_bootmem()), because this catches the (definitely buggy)
+ * case of us accidentally initializing the bootmem allocator with
+ * an invalid RAM area.
+ */
+ reserve_bootmem(CONFIG_MEMORY_START + PAGE_SIZE,
+ (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE - 1)
+ - CONFIG_MEMORY_START);
+
+ /*
+ * reserve physical page 0 - it's a special BIOS page on many boxes,
+ * enabling clean reboots, SMP operation, laptop functions.
+ */
+ reserve_bootmem(CONFIG_MEMORY_START, PAGE_SIZE);
+
+ /*
+ * reserve memory hole
+ */
+#ifdef CONFIG_MEMHOLE
+ reserve_bootmem(CONFIG_MEMHOLE_START, CONFIG_MEMHOLE_SIZE);
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (LOADER_TYPE && INITRD_START) {
+ if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
+ reserve_bootmem(INITRD_START, INITRD_SIZE);
+ initrd_start = INITRD_START ?
+ INITRD_START + PAGE_OFFSET : 0;
+
+ initrd_end = initrd_start + INITRD_SIZE;
+ printk("initrd:start[%08lx],size[%08lx]\n",
+ initrd_start, INITRD_SIZE);
+ } else {
+ printk("initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ INITRD_START + INITRD_SIZE,
+ max_low_pfn << PAGE_SHIFT);
+
+ initrd_start = 0;
+ }
+ }
+#endif
+
+ return max_low_pfn;
+}
+#else /* CONFIG_DISCONTIGMEM */
+extern unsigned long setup_memory(void);
+#endif /* CONFIG_DISCONTIGMEM */
+
+#define M32R_PCC_PCATCR 0x00ef7014 /* will move to m32r.h */
+
+void __init setup_arch(char **cmdline_p)
+{
+ ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
+
+ boot_cpu_data.cpu_clock = M32R_CPUCLK;
+ boot_cpu_data.bus_clock = M32R_BUSCLK;
+ boot_cpu_data.timer_divide = M32R_TIMER_DIVIDE;
+
+#ifdef CONFIG_BLK_DEV_RAM
+ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
+ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
+ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
+#endif
+
+ if (!MOUNT_ROOT_RDONLY)
+ root_mountflags &= ~MS_RDONLY;
+
+#ifdef CONFIG_VT
+#if defined(CONFIG_VGA_CONSOLE)
+ conswitchp = &vga_con;
+#elif defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+#endif
+
+#ifdef CONFIG_DISCONTIGMEM
+ nodes_clear(node_online_map);
+ node_set_online(0);
+ node_set_online(1);
+#endif /* CONFIG_DISCONTIGMEM */
+
+ init_mm.start_code = (unsigned long) _text;
+ init_mm.end_code = (unsigned long) _etext;
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = (unsigned long) _end;
+
+ code_resource.start = virt_to_phys(_text);
+ code_resource.end = virt_to_phys(_etext)-1;
+ data_resource.start = virt_to_phys(_etext);
+ data_resource.end = virt_to_phys(_edata)-1;
+
+ parse_mem_cmdline(cmdline_p);
+
+ setup_memory();
+
+ paging_init();
+}
+
+static struct cpu cpu[NR_CPUS];
+
+static int __init topology_init(void)
+{
+ int cpu_id;
+
+ for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++)
+ if (cpu_possible(cpu_id))
+ register_cpu(&cpu[cpu_id], cpu_id, NULL);
+
+ return 0;
+}
+
+subsys_initcall(topology_init);
+
+#ifdef CONFIG_PROC_FS
+/*
+ * Get CPU information for use by the procfs.
+ */
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ struct cpuinfo_m32r *c = v;
+ unsigned long cpu = c - cpu_data;
+
+#ifdef CONFIG_SMP
+ if (!cpu_online(cpu))
+ return 0;
+#endif /* CONFIG_SMP */
+
+ seq_printf(m, "processor\t: %ld\n", cpu);
+
+#ifdef CONFIG_CHIP_VDEC2
+ seq_printf(m, "cpu family\t: VDEC2\n"
+ "cache size\t: Unknown\n");
+#elif CONFIG_CHIP_M32700
+ seq_printf(m,"cpu family\t: M32700\n"
+ "cache size\t: I-8KB/D-8KB\n");
+#elif CONFIG_CHIP_M32102
+ seq_printf(m,"cpu family\t: M32102\n"
+ "cache size\t: I-8KB\n");
+#elif CONFIG_CHIP_OPSP
+ seq_printf(m,"cpu family\t: OPSP\n"
+ "cache size\t: I-8KB/D-8KB\n");
+#elif CONFIG_CHIP_MP
+ seq_printf(m, "cpu family\t: M32R-MP\n"
+ "cache size\t: I-xxKB/D-xxKB\n");
+#else
+ seq_printf(m, "cpu family\t: Unknown\n");
+#endif
+ seq_printf(m, "bogomips\t: %lu.%02lu\n",
+ c->loops_per_jiffy/(500000/HZ),
+ (c->loops_per_jiffy/(5000/HZ)) % 100);
+#ifdef CONFIG_PLAT_MAPPI
+ seq_printf(m, "Machine\t\t: Mappi Evaluation board\n");
+#elif CONFIG_PLAT_MAPPI2
+ seq_printf(m, "Machine\t\t: Mappi-II Evaluation board\n");
+#elif CONFIG_PLAT_M32700UT
+ seq_printf(m, "Machine\t\t: M32700UT Evaluation board\n");
+#elif CONFIG_PLAT_OPSPUT
+ seq_printf(m, "Machine\t\t: OPSPUT Evaluation board\n");
+#elif CONFIG_PLAT_USRV
+ seq_printf(m, "Machine\t\t: uServer\n");
+#elif CONFIG_PLAT_OAKS32R
+ seq_printf(m, "Machine\t\t: OAKS32R\n");
+#else
+ seq_printf(m, "Machine\t\t: Unknown\n");
+#endif
+
+#define PRINT_CLOCK(name, value) \
+ seq_printf(m, name " clock\t: %d.%02dMHz\n", \
+ ((value) / 1000000), ((value) % 1000000)/10000)
+
+ PRINT_CLOCK("CPU", (int)c->cpu_clock);
+ PRINT_CLOCK("Bus", (int)c->bus_clock);
+
+ seq_printf(m, "\n");
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos < NR_CPUS ? cpu_data + *pos : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+struct seq_operations cpuinfo_op = {
+ start: c_start,
+ next: c_next,
+ stop: c_stop,
+ show: show_cpuinfo,
+};
+#endif /* CONFIG_PROC_FS */
+
+unsigned long cpu_initialized __initdata = 0;
+
+/*
+ * cpu_init() initializes state that is per-CPU. Some data is already
+ * initialized (naturally) in the bootstrap process.
+ * We reload them nevertheless, this function acts as a
+ * 'CPU state barrier', nothing should get across.
+ */
+#if defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2) \
+ || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \
+ || defined(CONFIG_CHIP_OPSP)
+void __init cpu_init (void)
+{
+ int cpu_id = smp_processor_id();
+
+ if (test_and_set_bit(cpu_id, &cpu_initialized)) {
+ printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
+ for ( ; ; )
+ local_irq_enable();
+ }
+ printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
+
+ /* Set up and load the per-CPU TSS and LDT */
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ if (current->mm)
+ BUG();
+
+ /* Force FPU initialization */
+ current_thread_info()->status = 0;
+ clear_used_math();
+
+#ifdef CONFIG_MMU
+ /* Set up MMU */
+ init_mmu();
+#endif
+
+ /* Set up ICUIMASK */
+ outl(0x00070000, M32R_ICU_IMASK_PORTL); /* imask=111 */
+}
+#endif /* defined(CONFIG_CHIP_VDEC2) ... */
diff --git a/arch/m32r/kernel/setup_m32700ut.c b/arch/m32r/kernel/setup_m32700ut.c
new file mode 100644
index 00000000000..488aa87bab7
--- /dev/null
+++ b/arch/m32r/kernel/setup_m32700ut.c
@@ -0,0 +1,478 @@
+/*
+ * linux/arch/m32r/kernel/setup_m32700ut.c
+ *
+ * Setup routines for Renesas M32700UT Board
+ *
+ * Copyright (c) 2002 Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto, Takeo Takahashi
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/config.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+
+#include <asm/system.h>
+#include <asm/m32r.h>
+#include <asm/io.h>
+
+/*
+ * M32700 Interrupt Control Unit (Level 1)
+ */
+#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
+
+#ifndef CONFIG_SMP
+typedef struct {
+ unsigned long icucr; /* ICU Control Register */
+} icu_data_t;
+#endif /* CONFIG_SMP */
+
+static icu_data_t icu_data[M32700UT_NUM_CPU_IRQ];
+
+static void disable_m32700ut_irq(unsigned int irq)
+{
+ unsigned long port, data;
+
+ port = irq2port(irq);
+ data = icu_data[irq].icucr|M32R_ICUCR_ILEVEL7;
+ outl(data, port);
+}
+
+static void enable_m32700ut_irq(unsigned int irq)
+{
+ unsigned long port, data;
+
+ port = irq2port(irq);
+ data = icu_data[irq].icucr|M32R_ICUCR_IEN|M32R_ICUCR_ILEVEL6;
+ outl(data, port);
+}
+
+static void mask_and_ack_m32700ut(unsigned int irq)
+{
+ disable_m32700ut_irq(irq);
+}
+
+static void end_m32700ut_irq(unsigned int irq)
+{
+ enable_m32700ut_irq(irq);
+}
+
+static unsigned int startup_m32700ut_irq(unsigned int irq)
+{
+ enable_m32700ut_irq(irq);
+ return (0);
+}
+
+static void shutdown_m32700ut_irq(unsigned int irq)
+{
+ unsigned long port;
+
+ port = irq2port(irq);
+ outl(M32R_ICUCR_ILEVEL7, port);
+}
+
+static struct hw_interrupt_type m32700ut_irq_type =
+{
+ "M32700UT-IRQ",
+ startup_m32700ut_irq,
+ shutdown_m32700ut_irq,
+ enable_m32700ut_irq,
+ disable_m32700ut_irq,
+ mask_and_ack_m32700ut,
+ end_m32700ut_irq
+};
+
+/*
+ * Interrupt Control Unit of PLD on M32700UT (Level 2)
+ */
+#define irq2pldirq(x) ((x) - M32700UT_PLD_IRQ_BASE)
+#define pldirq2port(x) (unsigned long)((int)PLD_ICUCR1 + \
+ (((x) - 1) * sizeof(unsigned short)))
+
+typedef struct {
+ unsigned short icucr; /* ICU Control Register */
+} pld_icu_data_t;
+
+static pld_icu_data_t pld_icu_data[M32700UT_NUM_PLD_IRQ];
+
+static void disable_m32700ut_pld_irq(unsigned int irq)
+{
+ unsigned long port, data;
+ unsigned int pldirq;
+
+ pldirq = irq2pldirq(irq);
+// disable_m32700ut_irq(M32R_IRQ_INT1);
+ port = pldirq2port(pldirq);
+ data = pld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7;
+ outw(data, port);
+}
+
+static void enable_m32700ut_pld_irq(unsigned int irq)
+{
+ unsigned long port, data;
+ unsigned int pldirq;
+
+ pldirq = irq2pldirq(irq);
+// enable_m32700ut_irq(M32R_IRQ_INT1);
+ port = pldirq2port(pldirq);
+ data = pld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6;
+ outw(data, port);
+}
+
+static void mask_and_ack_m32700ut_pld(unsigned int irq)
+{
+ disable_m32700ut_pld_irq(irq);
+// mask_and_ack_m32700ut(M32R_IRQ_INT1);
+}
+
+static void end_m32700ut_pld_irq(unsigned int irq)
+{
+ enable_m32700ut_pld_irq(irq);
+ end_m32700ut_irq(M32R_IRQ_INT1);
+}
+
+static unsigned int startup_m32700ut_pld_irq(unsigned int irq)
+{
+ enable_m32700ut_pld_irq(irq);
+ return (0);
+}
+
+static void shutdown_m32700ut_pld_irq(unsigned int irq)
+{
+ unsigned long port;
+ unsigned int pldirq;
+
+ pldirq = irq2pldirq(irq);
+// shutdown_m32700ut_irq(M32R_IRQ_INT1);
+ port = pldirq2port(pldirq);
+ outw(PLD_ICUCR_ILEVEL7, port);
+}
+
+static struct hw_interrupt_type m32700ut_pld_irq_type =
+{
+ "M32700UT-PLD-IRQ",
+ startup_m32700ut_pld_irq,
+ shutdown_m32700ut_pld_irq,
+ enable_m32700ut_pld_irq,
+ disable_m32700ut_pld_irq,
+ mask_and_ack_m32700ut_pld,
+ end_m32700ut_pld_irq
+};
+
+/*
+ * Interrupt Control Unit of PLD on M32700UT-LAN (Level 2)
+ */
+#define irq2lanpldirq(x) ((x) - M32700UT_LAN_PLD_IRQ_BASE)
+#define lanpldirq2port(x) (unsigned long)((int)M32700UT_LAN_ICUCR1 + \
+ (((x) - 1) * sizeof(unsigned short)))
+
+static pld_icu_data_t lanpld_icu_data[M32700UT_NUM_LAN_PLD_IRQ];
+
+static void disable_m32700ut_lanpld_irq(unsigned int irq)
+{
+ unsigned long port, data;
+ unsigned int pldirq;
+
+ pldirq = irq2lanpldirq(irq);
+ port = lanpldirq2port(pldirq);
+ data = lanpld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7;
+ outw(data, port);
+}
+
+static void enable_m32700ut_lanpld_irq(unsigned int irq)
+{
+ unsigned long port, data;
+ unsigned int pldirq;
+
+ pldirq = irq2lanpldirq(irq);
+ port = lanpldirq2port(pldirq);
+ data = lanpld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6;
+ outw(data, port);
+}
+
+static void mask_and_ack_m32700ut_lanpld(unsigned int irq)
+{
+ disable_m32700ut_lanpld_irq(irq);
+}
+
+static void end_m32700ut_lanpld_irq(unsigned int irq)
+{
+ enable_m32700ut_lanpld_irq(irq);
+ end_m32700ut_irq(M32R_IRQ_INT0);
+}
+
+static unsigned int startup_m32700ut_lanpld_irq(unsigned int irq)
+{
+ enable_m32700ut_lanpld_irq(irq);
+ return (0);
+}
+
+static void shutdown_m32700ut_lanpld_irq(unsigned int irq)
+{
+ unsigned long port;
+ unsigned int pldirq;
+
+ pldirq = irq2lanpldirq(irq);
+ port = lanpldirq2port(pldirq);
+ outw(PLD_ICUCR_ILEVEL7, port);
+}
+
+static struct hw_interrupt_type m32700ut_lanpld_irq_type =
+{
+ "M32700UT-PLD-LAN-IRQ",
+ startup_m32700ut_lanpld_irq,
+ shutdown_m32700ut_lanpld_irq,
+ enable_m32700ut_lanpld_irq,
+ disable_m32700ut_lanpld_irq,
+ mask_and_ack_m32700ut_lanpld,
+ end_m32700ut_lanpld_irq
+};
+
+/*
+ * Interrupt Control Unit of PLD on M32700UT-LCD (Level 2)
+ */
+#define irq2lcdpldirq(x) ((x) - M32700UT_LCD_PLD_IRQ_BASE)
+#define lcdpldirq2port(x) (unsigned long)((int)M32700UT_LCD_ICUCR1 + \
+ (((x) - 1) * sizeof(unsigned short)))
+
+static pld_icu_data_t lcdpld_icu_data[M32700UT_NUM_LCD_PLD_IRQ];
+
+static void disable_m32700ut_lcdpld_irq(unsigned int irq)
+{
+ unsigned long port, data;
+ unsigned int pldirq;
+
+ pldirq = irq2lcdpldirq(irq);
+ port = lcdpldirq2port(pldirq);
+ data = lcdpld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7;
+ outw(data, port);
+}
+
+static void enable_m32700ut_lcdpld_irq(unsigned int irq)
+{
+ unsigned long port, data;
+ unsigned int pldirq;
+
+ pldirq = irq2lcdpldirq(irq);
+ port = lcdpldirq2port(pldirq);
+ data = lcdpld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6;
+ outw(data, port);
+}
+
+static void mask_and_ack_m32700ut_lcdpld(unsigned int irq)
+{
+ disable_m32700ut_lcdpld_irq(irq);
+}
+
+static void end_m32700ut_lcdpld_irq(unsigned int irq)
+{
+ enable_m32700ut_lcdpld_irq(irq);
+ end_m32700ut_irq(M32R_IRQ_INT2);
+}
+
+static unsigned int startup_m32700ut_lcdpld_irq(unsigned int irq)
+{
+ enable_m32700ut_lcdpld_irq(irq);
+ return (0);
+}
+
+static void shutdown_m32700ut_lcdpld_irq(unsigned int irq)
+{
+ unsigned long port;
+ unsigned int pldirq;
+
+ pldirq = irq2lcdpldirq(irq);
+ port = lcdpldirq2port(pldirq);
+ outw(PLD_ICUCR_ILEVEL7, port);
+}
+
+static struct hw_interrupt_type m32700ut_lcdpld_irq_type =
+{
+ "M32700UT-PLD-LCD-IRQ",
+ startup_m32700ut_lcdpld_irq,
+ shutdown_m32700ut_lcdpld_irq,
+ enable_m32700ut_lcdpld_irq,
+ disable_m32700ut_lcdpld_irq,
+ mask_and_ack_m32700ut_lcdpld,
+ end_m32700ut_lcdpld_irq
+};
+
+void __init init_IRQ(void)
+{
+#if defined(CONFIG_SMC91X)
+ /* INT#0: LAN controller on M32700UT-LAN (SMC91C111)*/
+ irq_desc[M32700UT_LAN_IRQ_LAN].status = IRQ_DISABLED;
+ irq_desc[M32700UT_LAN_IRQ_LAN].handler = &m32700ut_lanpld_irq_type;
+ irq_desc[M32700UT_LAN_IRQ_LAN].action = 0;
+ irq_desc[M32700UT_LAN_IRQ_LAN].depth = 1; /* disable nested irq */
+ lanpld_icu_data[irq2lanpldirq(M32700UT_LAN_IRQ_LAN)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02; /* "H" edge sense */
+ disable_m32700ut_lanpld_irq(M32700UT_LAN_IRQ_LAN);
+#endif /* CONFIG_SMC91X */
+
+ /* MFT2 : system timer */
+ irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_MFT2].handler = &m32700ut_irq_type;
+ irq_desc[M32R_IRQ_MFT2].action = 0;
+ irq_desc[M32R_IRQ_MFT2].depth = 1;
+ icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
+ disable_m32700ut_irq(M32R_IRQ_MFT2);
+
+ /* SIO0 : receive */
+ irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO0_R].handler = &m32700ut_irq_type;
+ irq_desc[M32R_IRQ_SIO0_R].action = 0;
+ irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+ icu_data[M32R_IRQ_SIO0_R].icucr = 0;
+ disable_m32700ut_irq(M32R_IRQ_SIO0_R);
+
+ /* SIO0 : send */
+ irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO0_S].handler = &m32700ut_irq_type;
+ irq_desc[M32R_IRQ_SIO0_S].action = 0;
+ irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+ icu_data[M32R_IRQ_SIO0_S].icucr = 0;
+ disable_m32700ut_irq(M32R_IRQ_SIO0_S);
+
+ /* SIO1 : receive */
+ irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO1_R].handler = &m32700ut_irq_type;
+ irq_desc[M32R_IRQ_SIO1_R].action = 0;
+ irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+ icu_data[M32R_IRQ_SIO1_R].icucr = 0;
+ disable_m32700ut_irq(M32R_IRQ_SIO1_R);
+
+ /* SIO1 : send */
+ irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO1_S].handler = &m32700ut_irq_type;
+ irq_desc[M32R_IRQ_SIO1_S].action = 0;
+ irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+ icu_data[M32R_IRQ_SIO1_S].icucr = 0;
+ disable_m32700ut_irq(M32R_IRQ_SIO1_S);
+
+ /* DMA1 : */
+ irq_desc[M32R_IRQ_DMA1].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_DMA1].handler = &m32700ut_irq_type;
+ irq_desc[M32R_IRQ_DMA1].action = 0;
+ irq_desc[M32R_IRQ_DMA1].depth = 1;
+ icu_data[M32R_IRQ_DMA1].icucr = 0;
+ disable_m32700ut_irq(M32R_IRQ_DMA1);
+
+#ifdef CONFIG_SERIAL_M32R_PLDSIO
+ /* INT#1: SIO0 Receive on PLD */
+ irq_desc[PLD_IRQ_SIO0_RCV].status = IRQ_DISABLED;
+ irq_desc[PLD_IRQ_SIO0_RCV].handler = &m32700ut_pld_irq_type;
+ irq_desc[PLD_IRQ_SIO0_RCV].action = 0;
+ irq_desc[PLD_IRQ_SIO0_RCV].depth = 1; /* disable nested irq */
+ pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_RCV)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
+ disable_m32700ut_pld_irq(PLD_IRQ_SIO0_RCV);
+
+ /* INT#1: SIO0 Send on PLD */
+ irq_desc[PLD_IRQ_SIO0_SND].status = IRQ_DISABLED;
+ irq_desc[PLD_IRQ_SIO0_SND].handler = &m32700ut_pld_irq_type;
+ irq_desc[PLD_IRQ_SIO0_SND].action = 0;
+ irq_desc[PLD_IRQ_SIO0_SND].depth = 1; /* disable nested irq */
+ pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_SND)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
+ disable_m32700ut_pld_irq(PLD_IRQ_SIO0_SND);
+#endif /* CONFIG_SERIAL_M32R_PLDSIO */
+
+ /* INT#1: CFC IREQ on PLD */
+ irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
+ irq_desc[PLD_IRQ_CFIREQ].handler = &m32700ut_pld_irq_type;
+ irq_desc[PLD_IRQ_CFIREQ].action = 0;
+ irq_desc[PLD_IRQ_CFIREQ].depth = 1; /* disable nested irq */
+ pld_icu_data[irq2pldirq(PLD_IRQ_CFIREQ)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01; /* 'L' level sense */
+ disable_m32700ut_pld_irq(PLD_IRQ_CFIREQ);
+
+ /* INT#1: CFC Insert on PLD */
+ irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
+ irq_desc[PLD_IRQ_CFC_INSERT].handler = &m32700ut_pld_irq_type;
+ irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
+ irq_desc[PLD_IRQ_CFC_INSERT].depth = 1; /* disable nested irq */
+ pld_icu_data[irq2pldirq(PLD_IRQ_CFC_INSERT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD00; /* 'L' edge sense */
+ disable_m32700ut_pld_irq(PLD_IRQ_CFC_INSERT);
+
+ /* INT#1: CFC Eject on PLD */
+ irq_desc[PLD_IRQ_CFC_EJECT].status = IRQ_DISABLED;
+ irq_desc[PLD_IRQ_CFC_EJECT].handler = &m32700ut_pld_irq_type;
+ irq_desc[PLD_IRQ_CFC_EJECT].action = 0;
+ irq_desc[PLD_IRQ_CFC_EJECT].depth = 1; /* disable nested irq */
+ pld_icu_data[irq2pldirq(PLD_IRQ_CFC_EJECT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02; /* 'H' edge sense */
+ disable_m32700ut_pld_irq(PLD_IRQ_CFC_EJECT);
+
+ /*
+ * INT0# is used for LAN, DIO
+ * We enable it here.
+ */
+ icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD11;
+ enable_m32700ut_irq(M32R_IRQ_INT0);
+
+ /*
+ * INT1# is used for UART, MMC, CF Controller in FPGA.
+ * We enable it here.
+ */
+ icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD11;
+ enable_m32700ut_irq(M32R_IRQ_INT1);
+
+#if defined(CONFIG_USB)
+ outw(USBCR_OTGS, USBCR); /* USBCR: non-OTG */
+
+ irq_desc[M32700UT_LCD_IRQ_USB_INT1].status = IRQ_DISABLED;
+ irq_desc[M32700UT_LCD_IRQ_USB_INT1].handler = &m32700ut_lcdpld_irq_type;
+ irq_desc[M32700UT_LCD_IRQ_USB_INT1].action = 0;
+ irq_desc[M32700UT_LCD_IRQ_USB_INT1].depth = 1;
+ lcdpld_icu_data[irq2lcdpldirq(M32700UT_LCD_IRQ_USB_INT1)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01; /* "L" level sense */
+ disable_m32700ut_lcdpld_irq(M32700UT_LCD_IRQ_USB_INT1);
+#endif
+ /*
+ * INT2# is used for BAT, USB, AUDIO
+ * We enable it here.
+ */
+ icu_data[M32R_IRQ_INT2].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD01;
+ enable_m32700ut_irq(M32R_IRQ_INT2);
+
+//#if defined(CONFIG_VIDEO_M32R_AR)
+ /*
+ * INT3# is used for AR
+ */
+ irq_desc[M32R_IRQ_INT3].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_INT3].handler = &m32700ut_irq_type;
+ irq_desc[M32R_IRQ_INT3].action = 0;
+ irq_desc[M32R_IRQ_INT3].depth = 1;
+ icu_data[M32R_IRQ_INT3].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
+ disable_m32700ut_irq(M32R_IRQ_INT3);
+//#endif /* CONFIG_VIDEO_M32R_AR */
+}
+
+#define LAN_IOSTART 0x300
+#define LAN_IOEND 0x320
+static struct resource smc91x_resources[] = {
+ [0] = {
+ .start = (LAN_IOSTART),
+ .end = (LAN_IOEND),
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = M32700UT_LAN_IRQ_LAN,
+ .end = M32700UT_LAN_IRQ_LAN,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device smc91x_device = {
+ .name = "smc91x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smc91x_resources),
+ .resource = smc91x_resources,
+};
+
+static int __init platform_init(void)
+{
+ platform_device_register(&smc91x_device);
+ return 0;
+}
+arch_initcall(platform_init);
diff --git a/arch/m32r/kernel/setup_mappi.c b/arch/m32r/kernel/setup_mappi.c
new file mode 100644
index 00000000000..1e74110f067
--- /dev/null
+++ b/arch/m32r/kernel/setup_mappi.c
@@ -0,0 +1,160 @@
+/*
+ * linux/arch/m32r/kernel/setup_mappi.c
+ *
+ * Setup routines for Renesas MAPPI Board
+ *
+ * Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto
+ */
+
+#include <linux/config.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/m32r.h>
+#include <asm/io.h>
+
+#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
+
+#ifndef CONFIG_SMP
+typedef struct {
+ unsigned long icucr; /* ICU Control Register */
+} icu_data_t;
+#endif /* CONFIG_SMP */
+
+icu_data_t icu_data[NR_IRQS];
+
+static void disable_mappi_irq(unsigned int irq)
+{
+ unsigned long port, data;
+
+ port = irq2port(irq);
+ data = icu_data[irq].icucr|M32R_ICUCR_ILEVEL7;
+ outl(data, port);
+}
+
+static void enable_mappi_irq(unsigned int irq)
+{
+ unsigned long port, data;
+
+ port = irq2port(irq);
+ data = icu_data[irq].icucr|M32R_ICUCR_IEN|M32R_ICUCR_ILEVEL6;
+ outl(data, port);
+}
+
+static void mask_and_ack_mappi(unsigned int irq)
+{
+ disable_mappi_irq(irq);
+}
+
+static void end_mappi_irq(unsigned int irq)
+{
+ enable_mappi_irq(irq);
+}
+
+static unsigned int startup_mappi_irq(unsigned int irq)
+{
+ enable_mappi_irq(irq);
+ return (0);
+}
+
+static void shutdown_mappi_irq(unsigned int irq)
+{
+ unsigned long port;
+
+ port = irq2port(irq);
+ outl(M32R_ICUCR_ILEVEL7, port);
+}
+
+static struct hw_interrupt_type mappi_irq_type =
+{
+ "MAPPI-IRQ",
+ startup_mappi_irq,
+ shutdown_mappi_irq,
+ enable_mappi_irq,
+ disable_mappi_irq,
+ mask_and_ack_mappi,
+ end_mappi_irq
+};
+
+void __init init_IRQ(void)
+{
+ static int once = 0;
+
+ if (once)
+ return;
+ else
+ once++;
+
+#ifdef CONFIG_NE2000
+ /* INT0 : LAN controller (RTL8019AS) */
+ irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_INT0].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_INT0].action = 0;
+ irq_desc[M32R_IRQ_INT0].depth = 1;
+ icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
+ disable_mappi_irq(M32R_IRQ_INT0);
+#endif /* CONFIG_M32R_NE2000 */
+
+ /* MFT2 : system timer */
+ irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_MFT2].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_MFT2].action = 0;
+ irq_desc[M32R_IRQ_MFT2].depth = 1;
+ icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
+ disable_mappi_irq(M32R_IRQ_MFT2);
+
+#ifdef CONFIG_SERIAL_M32R_SIO
+ /* SIO0_R : uart receive data */
+ irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO0_R].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_SIO0_R].action = 0;
+ irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+ icu_data[M32R_IRQ_SIO0_R].icucr = 0;
+ disable_mappi_irq(M32R_IRQ_SIO0_R);
+
+ /* SIO0_S : uart send data */
+ irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO0_S].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_SIO0_S].action = 0;
+ irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+ icu_data[M32R_IRQ_SIO0_S].icucr = 0;
+ disable_mappi_irq(M32R_IRQ_SIO0_S);
+
+ /* SIO1_R : uart receive data */
+ irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO1_R].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_SIO1_R].action = 0;
+ irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+ icu_data[M32R_IRQ_SIO1_R].icucr = 0;
+ disable_mappi_irq(M32R_IRQ_SIO1_R);
+
+ /* SIO1_S : uart send data */
+ irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO1_S].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_SIO1_S].action = 0;
+ irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+ icu_data[M32R_IRQ_SIO1_S].icucr = 0;
+ disable_mappi_irq(M32R_IRQ_SIO1_S);
+#endif /* CONFIG_SERIAL_M32R_SIO */
+
+#if defined(CONFIG_M32R_PCC)
+ /* INT1 : pccard0 interrupt */
+ irq_desc[M32R_IRQ_INT1].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_INT1].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_INT1].action = 0;
+ irq_desc[M32R_IRQ_INT1].depth = 1;
+ icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD00;
+ disable_mappi_irq(M32R_IRQ_INT1);
+
+ /* INT2 : pccard1 interrupt */
+ irq_desc[M32R_IRQ_INT2].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_INT2].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_INT2].action = 0;
+ irq_desc[M32R_IRQ_INT2].depth = 1;
+ icu_data[M32R_IRQ_INT2].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD00;
+ disable_mappi_irq(M32R_IRQ_INT2);
+#endif /* CONFIG_M32RPCC */
+}
diff --git a/arch/m32r/kernel/setup_mappi2.c b/arch/m32r/kernel/setup_mappi2.c
new file mode 100644
index 00000000000..1904d465a50
--- /dev/null
+++ b/arch/m32r/kernel/setup_mappi2.c
@@ -0,0 +1,212 @@
+/*
+ * linux/arch/m32r/kernel/setup_mappi.c
+ *
+ * Setup routines for Renesas MAPPI-II(M3A-ZA36) Board
+ *
+ * Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto, Mamoru Sakugawa
+ */
+
+#include <linux/config.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+
+#include <asm/system.h>
+#include <asm/m32r.h>
+#include <asm/io.h>
+
+#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
+
+#ifndef CONFIG_SMP
+typedef struct {
+ unsigned long icucr; /* ICU Control Register */
+} icu_data_t;
+#endif /* CONFIG_SMP */
+
+icu_data_t icu_data[NR_IRQS];
+
+static void disable_mappi2_irq(unsigned int irq)
+{
+ unsigned long port, data;
+
+ if ((irq == 0) ||(irq >= NR_IRQS)) {
+ printk("bad irq 0x%08x\n", irq);
+ return;
+ }
+ port = irq2port(irq);
+ data = icu_data[irq].icucr|M32R_ICUCR_ILEVEL7;
+ outl(data, port);
+}
+
+static void enable_mappi2_irq(unsigned int irq)
+{
+ unsigned long port, data;
+
+ if ((irq == 0) ||(irq >= NR_IRQS)) {
+ printk("bad irq 0x%08x\n", irq);
+ return;
+ }
+ port = irq2port(irq);
+ data = icu_data[irq].icucr|M32R_ICUCR_IEN|M32R_ICUCR_ILEVEL6;
+ outl(data, port);
+}
+
+static void mask_and_ack_mappi2(unsigned int irq)
+{
+ disable_mappi2_irq(irq);
+}
+
+static void end_mappi2_irq(unsigned int irq)
+{
+ enable_mappi2_irq(irq);
+}
+
+static unsigned int startup_mappi2_irq(unsigned int irq)
+{
+ enable_mappi2_irq(irq);
+ return (0);
+}
+
+static void shutdown_mappi2_irq(unsigned int irq)
+{
+ unsigned long port;
+
+ port = irq2port(irq);
+ outl(M32R_ICUCR_ILEVEL7, port);
+}
+
+static struct hw_interrupt_type mappi2_irq_type =
+{
+ "MAPPI2-IRQ",
+ startup_mappi2_irq,
+ shutdown_mappi2_irq,
+ enable_mappi2_irq,
+ disable_mappi2_irq,
+ mask_and_ack_mappi2,
+ end_mappi2_irq
+};
+
+void __init init_IRQ(void)
+{
+#if defined(CONFIG_SMC91X)
+ /* INT0 : LAN controller (SMC91111) */
+ irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_INT0].handler = &mappi2_irq_type;
+ irq_desc[M32R_IRQ_INT0].action = 0;
+ irq_desc[M32R_IRQ_INT0].depth = 1;
+ icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
+ disable_mappi2_irq(M32R_IRQ_INT0);
+#endif /* CONFIG_SMC91X */
+
+ /* MFT2 : system timer */
+ irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_MFT2].handler = &mappi2_irq_type;
+ irq_desc[M32R_IRQ_MFT2].action = 0;
+ irq_desc[M32R_IRQ_MFT2].depth = 1;
+ icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
+ disable_mappi2_irq(M32R_IRQ_MFT2);
+
+#ifdef CONFIG_SERIAL_M32R_SIO
+ /* SIO0_R : uart receive data */
+ irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO0_R].handler = &mappi2_irq_type;
+ irq_desc[M32R_IRQ_SIO0_R].action = 0;
+ irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+ icu_data[M32R_IRQ_SIO0_R].icucr = 0;
+ disable_mappi2_irq(M32R_IRQ_SIO0_R);
+
+ /* SIO0_S : uart send data */
+ irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO0_S].handler = &mappi2_irq_type;
+ irq_desc[M32R_IRQ_SIO0_S].action = 0;
+ irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+ icu_data[M32R_IRQ_SIO0_S].icucr = 0;
+ disable_mappi2_irq(M32R_IRQ_SIO0_S);
+ /* SIO1_R : uart receive data */
+ irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO1_R].handler = &mappi2_irq_type;
+ irq_desc[M32R_IRQ_SIO1_R].action = 0;
+ irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+ icu_data[M32R_IRQ_SIO1_R].icucr = 0;
+ disable_mappi2_irq(M32R_IRQ_SIO1_R);
+
+ /* SIO1_S : uart send data */
+ irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO1_S].handler = &mappi2_irq_type;
+ irq_desc[M32R_IRQ_SIO1_S].action = 0;
+ irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+ icu_data[M32R_IRQ_SIO1_S].icucr = 0;
+ disable_mappi2_irq(M32R_IRQ_SIO1_S);
+#endif /* CONFIG_M32R_USE_DBG_CONSOLE */
+
+#if defined(CONFIG_USB)
+ /* INT1 : USB Host controller interrupt */
+ irq_desc[M32R_IRQ_INT1].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_INT1].handler = &mappi2_irq_type;
+ irq_desc[M32R_IRQ_INT1].action = 0;
+ irq_desc[M32R_IRQ_INT1].depth = 1;
+ icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_ISMOD01;
+ disable_mappi2_irq(M32R_IRQ_INT1);
+#endif /* CONFIG_USB */
+
+ /* ICUCR40: CFC IREQ */
+ irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
+ irq_desc[PLD_IRQ_CFIREQ].handler = &mappi2_irq_type;
+ irq_desc[PLD_IRQ_CFIREQ].action = 0;
+ irq_desc[PLD_IRQ_CFIREQ].depth = 1; /* disable nested irq */
+// icu_data[PLD_IRQ_CFIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD00;
+ icu_data[PLD_IRQ_CFIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD01;
+ disable_mappi2_irq(PLD_IRQ_CFIREQ);
+
+#if defined(CONFIG_M32R_CFC)
+ /* ICUCR41: CFC Insert */
+ irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
+ irq_desc[PLD_IRQ_CFC_INSERT].handler = &mappi2_irq_type;
+ irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
+ irq_desc[PLD_IRQ_CFC_INSERT].depth = 1; /* disable nested irq */
+ icu_data[PLD_IRQ_CFC_INSERT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD00;
+// icu_data[PLD_IRQ_CFC_INSERT].icucr = 0;
+ disable_mappi2_irq(PLD_IRQ_CFC_INSERT);
+
+ /* ICUCR42: CFC Eject */
+ irq_desc[PLD_IRQ_CFC_EJECT].status = IRQ_DISABLED;
+ irq_desc[PLD_IRQ_CFC_EJECT].handler = &mappi2_irq_type;
+ irq_desc[PLD_IRQ_CFC_EJECT].action = 0;
+ irq_desc[PLD_IRQ_CFC_EJECT].depth = 1; /* disable nested irq */
+ icu_data[PLD_IRQ_CFC_EJECT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
+// icu_data[PLD_IRQ_CFC_EJECT].icucr = 0;
+ disable_mappi2_irq(PLD_IRQ_CFC_EJECT);
+
+#endif /* CONFIG_MAPPI2_CFC */
+}
+
+#define LAN_IOSTART 0x300
+#define LAN_IOEND 0x320
+static struct resource smc91x_resources[] = {
+ [0] = {
+ .start = (LAN_IOSTART),
+ .end = (LAN_IOEND),
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = M32R_IRQ_INT0,
+ .end = M32R_IRQ_INT0,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device smc91x_device = {
+ .name = "smc91x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smc91x_resources),
+ .resource = smc91x_resources,
+};
+
+static int __init platform_init(void)
+{
+ platform_device_register(&smc91x_device);
+ return 0;
+}
+arch_initcall(platform_init);
diff --git a/arch/m32r/kernel/setup_oaks32r.c b/arch/m32r/kernel/setup_oaks32r.c
new file mode 100644
index 00000000000..b04834526c9
--- /dev/null
+++ b/arch/m32r/kernel/setup_oaks32r.c
@@ -0,0 +1,143 @@
+/*
+ * linux/arch/m32r/kernel/setup_oaks32r.c
+ *
+ * Setup routines for OAKS32R Board
+ *
+ * Copyright (c) 2002-2004 Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto, Mamoru Sakugawa
+ */
+
+#include <linux/config.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/m32r.h>
+#include <asm/io.h>
+
+#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
+
+#ifndef CONFIG_SMP
+typedef struct {
+ unsigned long icucr; /* ICU Control Register */
+} icu_data_t;
+#endif /* CONFIG_SMP */
+
+icu_data_t icu_data[NR_IRQS];
+
+static void disable_oaks32r_irq(unsigned int irq)
+{
+ unsigned long port, data;
+
+ port = irq2port(irq);
+ data = icu_data[irq].icucr|M32R_ICUCR_ILEVEL7;
+ outl(data, port);
+}
+
+static void enable_oaks32r_irq(unsigned int irq)
+{
+ unsigned long port, data;
+
+ port = irq2port(irq);
+ data = icu_data[irq].icucr|M32R_ICUCR_IEN|M32R_ICUCR_ILEVEL6;
+ outl(data, port);
+}
+
+static void mask_and_ack_mappi(unsigned int irq)
+{
+ disable_oaks32r_irq(irq);
+}
+
+static void end_oaks32r_irq(unsigned int irq)
+{
+ enable_oaks32r_irq(irq);
+}
+
+static unsigned int startup_oaks32r_irq(unsigned int irq)
+{
+ enable_oaks32r_irq(irq);
+ return (0);
+}
+
+static void shutdown_oaks32r_irq(unsigned int irq)
+{
+ unsigned long port;
+
+ port = irq2port(irq);
+ outl(M32R_ICUCR_ILEVEL7, port);
+}
+
+static struct hw_interrupt_type oaks32r_irq_type =
+{
+ "OAKS32R-IRQ",
+ startup_oaks32r_irq,
+ shutdown_oaks32r_irq,
+ enable_oaks32r_irq,
+ disable_oaks32r_irq,
+ mask_and_ack_mappi,
+ end_oaks32r_irq
+};
+
+void __init init_IRQ(void)
+{
+ static int once = 0;
+
+ if (once)
+ return;
+ else
+ once++;
+
+#ifdef CONFIG_NE2000
+ /* INT3 : LAN controller (RTL8019AS) */
+ irq_desc[M32R_IRQ_INT3].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_INT3].handler = &oaks32r_irq_type;
+ irq_desc[M32R_IRQ_INT3].action = 0;
+ irq_desc[M32R_IRQ_INT3].depth = 1;
+ icu_data[M32R_IRQ_INT3].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
+ disable_oaks32r_irq(M32R_IRQ_INT3);
+#endif /* CONFIG_M32R_NE2000 */
+
+ /* MFT2 : system timer */
+ irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_MFT2].handler = &oaks32r_irq_type;
+ irq_desc[M32R_IRQ_MFT2].action = 0;
+ irq_desc[M32R_IRQ_MFT2].depth = 1;
+ icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
+ disable_oaks32r_irq(M32R_IRQ_MFT2);
+
+#ifdef CONFIG_SERIAL_M32R_SIO
+ /* SIO0_R : uart receive data */
+ irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO0_R].handler = &oaks32r_irq_type;
+ irq_desc[M32R_IRQ_SIO0_R].action = 0;
+ irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+ icu_data[M32R_IRQ_SIO0_R].icucr = 0;
+ disable_oaks32r_irq(M32R_IRQ_SIO0_R);
+
+ /* SIO0_S : uart send data */
+ irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO0_S].handler = &oaks32r_irq_type;
+ irq_desc[M32R_IRQ_SIO0_S].action = 0;
+ irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+ icu_data[M32R_IRQ_SIO0_S].icucr = 0;
+ disable_oaks32r_irq(M32R_IRQ_SIO0_S);
+
+ /* SIO1_R : uart receive data */
+ irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO1_R].handler = &oaks32r_irq_type;
+ irq_desc[M32R_IRQ_SIO1_R].action = 0;
+ irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+ icu_data[M32R_IRQ_SIO1_R].icucr = 0;
+ disable_oaks32r_irq(M32R_IRQ_SIO1_R);
+
+ /* SIO1_S : uart send data */
+ irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO1_S].handler = &oaks32r_irq_type;
+ irq_desc[M32R_IRQ_SIO1_S].action = 0;
+ irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+ icu_data[M32R_IRQ_SIO1_S].icucr = 0;
+ disable_oaks32r_irq(M32R_IRQ_SIO1_S);
+#endif /* CONFIG_SERIAL_M32R_SIO */
+
+}
diff --git a/arch/m32r/kernel/setup_opsput.c b/arch/m32r/kernel/setup_opsput.c
new file mode 100644
index 00000000000..84315e344c5
--- /dev/null
+++ b/arch/m32r/kernel/setup_opsput.c
@@ -0,0 +1,482 @@
+/*
+ * linux/arch/m32r/kernel/setup_opsput.c
+ *
+ * Setup routines for Renesas OPSPUT Board
+ *
+ * Copyright (c) 2002-2004
+ * Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto, Takeo Takahashi, Mamoru Sakugawa
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/config.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+
+#include <asm/system.h>
+#include <asm/m32r.h>
+#include <asm/io.h>
+
+/*
+ * OPSP Interrupt Control Unit (Level 1)
+ */
+#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
+
+#ifndef CONFIG_SMP
+typedef struct {
+ unsigned long icucr; /* ICU Control Register */
+} icu_data_t;
+#endif /* CONFIG_SMP */
+
+static icu_data_t icu_data[OPSPUT_NUM_CPU_IRQ];
+
+static void disable_opsput_irq(unsigned int irq)
+{
+ unsigned long port, data;
+
+ port = irq2port(irq);
+ data = icu_data[irq].icucr|M32R_ICUCR_ILEVEL7;
+ outl(data, port);
+}
+
+static void enable_opsput_irq(unsigned int irq)
+{
+ unsigned long port, data;
+
+ port = irq2port(irq);
+ data = icu_data[irq].icucr|M32R_ICUCR_IEN|M32R_ICUCR_ILEVEL6;
+ outl(data, port);
+}
+
+static void mask_and_ack_opsput(unsigned int irq)
+{
+ disable_opsput_irq(irq);
+}
+
+static void end_opsput_irq(unsigned int irq)
+{
+ enable_opsput_irq(irq);
+}
+
+static unsigned int startup_opsput_irq(unsigned int irq)
+{
+ enable_opsput_irq(irq);
+ return (0);
+}
+
+static void shutdown_opsput_irq(unsigned int irq)
+{
+ unsigned long port;
+
+ port = irq2port(irq);
+ outl(M32R_ICUCR_ILEVEL7, port);
+}
+
+static struct hw_interrupt_type opsput_irq_type =
+{
+ "OPSPUT-IRQ",
+ startup_opsput_irq,
+ shutdown_opsput_irq,
+ enable_opsput_irq,
+ disable_opsput_irq,
+ mask_and_ack_opsput,
+ end_opsput_irq
+};
+
+/*
+ * Interrupt Control Unit of PLD on OPSPUT (Level 2)
+ */
+#define irq2pldirq(x) ((x) - OPSPUT_PLD_IRQ_BASE)
+#define pldirq2port(x) (unsigned long)((int)PLD_ICUCR1 + \
+ (((x) - 1) * sizeof(unsigned short)))
+
+typedef struct {
+ unsigned short icucr; /* ICU Control Register */
+} pld_icu_data_t;
+
+static pld_icu_data_t pld_icu_data[OPSPUT_NUM_PLD_IRQ];
+
+static void disable_opsput_pld_irq(unsigned int irq)
+{
+ unsigned long port, data;
+ unsigned int pldirq;
+
+ pldirq = irq2pldirq(irq);
+// disable_opsput_irq(M32R_IRQ_INT1);
+ port = pldirq2port(pldirq);
+ data = pld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7;
+ outw(data, port);
+}
+
+static void enable_opsput_pld_irq(unsigned int irq)
+{
+ unsigned long port, data;
+ unsigned int pldirq;
+
+ pldirq = irq2pldirq(irq);
+// enable_opsput_irq(M32R_IRQ_INT1);
+ port = pldirq2port(pldirq);
+ data = pld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6;
+ outw(data, port);
+}
+
+static void mask_and_ack_opsput_pld(unsigned int irq)
+{
+ disable_opsput_pld_irq(irq);
+// mask_and_ack_opsput(M32R_IRQ_INT1);
+}
+
+static void end_opsput_pld_irq(unsigned int irq)
+{
+ enable_opsput_pld_irq(irq);
+ end_opsput_irq(M32R_IRQ_INT1);
+}
+
+static unsigned int startup_opsput_pld_irq(unsigned int irq)
+{
+ enable_opsput_pld_irq(irq);
+ return (0);
+}
+
+static void shutdown_opsput_pld_irq(unsigned int irq)
+{
+ unsigned long port;
+ unsigned int pldirq;
+
+ pldirq = irq2pldirq(irq);
+// shutdown_opsput_irq(M32R_IRQ_INT1);
+ port = pldirq2port(pldirq);
+ outw(PLD_ICUCR_ILEVEL7, port);
+}
+
+static struct hw_interrupt_type opsput_pld_irq_type =
+{
+ "OPSPUT-PLD-IRQ",
+ startup_opsput_pld_irq,
+ shutdown_opsput_pld_irq,
+ enable_opsput_pld_irq,
+ disable_opsput_pld_irq,
+ mask_and_ack_opsput_pld,
+ end_opsput_pld_irq
+};
+
+/*
+ * Interrupt Control Unit of PLD on OPSPUT-LAN (Level 2)
+ */
+#define irq2lanpldirq(x) ((x) - OPSPUT_LAN_PLD_IRQ_BASE)
+#define lanpldirq2port(x) (unsigned long)((int)OPSPUT_LAN_ICUCR1 + \
+ (((x) - 1) * sizeof(unsigned short)))
+
+static pld_icu_data_t lanpld_icu_data[OPSPUT_NUM_LAN_PLD_IRQ];
+
+static void disable_opsput_lanpld_irq(unsigned int irq)
+{
+ unsigned long port, data;
+ unsigned int pldirq;
+
+ pldirq = irq2lanpldirq(irq);
+ port = lanpldirq2port(pldirq);
+ data = lanpld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7;
+ outw(data, port);
+}
+
+static void enable_opsput_lanpld_irq(unsigned int irq)
+{
+ unsigned long port, data;
+ unsigned int pldirq;
+
+ pldirq = irq2lanpldirq(irq);
+ port = lanpldirq2port(pldirq);
+ data = lanpld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6;
+ outw(data, port);
+}
+
+static void mask_and_ack_opsput_lanpld(unsigned int irq)
+{
+ disable_opsput_lanpld_irq(irq);
+}
+
+static void end_opsput_lanpld_irq(unsigned int irq)
+{
+ enable_opsput_lanpld_irq(irq);
+ end_opsput_irq(M32R_IRQ_INT0);
+}
+
+static unsigned int startup_opsput_lanpld_irq(unsigned int irq)
+{
+ enable_opsput_lanpld_irq(irq);
+ return (0);
+}
+
+static void shutdown_opsput_lanpld_irq(unsigned int irq)
+{
+ unsigned long port;
+ unsigned int pldirq;
+
+ pldirq = irq2lanpldirq(irq);
+ port = lanpldirq2port(pldirq);
+ outw(PLD_ICUCR_ILEVEL7, port);
+}
+
+static struct hw_interrupt_type opsput_lanpld_irq_type =
+{
+ "OPSPUT-PLD-LAN-IRQ",
+ startup_opsput_lanpld_irq,
+ shutdown_opsput_lanpld_irq,
+ enable_opsput_lanpld_irq,
+ disable_opsput_lanpld_irq,
+ mask_and_ack_opsput_lanpld,
+ end_opsput_lanpld_irq
+};
+
+/*
+ * Interrupt Control Unit of PLD on OPSPUT-LCD (Level 2)
+ */
+#define irq2lcdpldirq(x) ((x) - OPSPUT_LCD_PLD_IRQ_BASE)
+#define lcdpldirq2port(x) (unsigned long)((int)OPSPUT_LCD_ICUCR1 + \
+ (((x) - 1) * sizeof(unsigned short)))
+
+static pld_icu_data_t lcdpld_icu_data[OPSPUT_NUM_LCD_PLD_IRQ];
+
+static void disable_opsput_lcdpld_irq(unsigned int irq)
+{
+ unsigned long port, data;
+ unsigned int pldirq;
+
+ pldirq = irq2lcdpldirq(irq);
+ port = lcdpldirq2port(pldirq);
+ data = lcdpld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7;
+ outw(data, port);
+}
+
+static void enable_opsput_lcdpld_irq(unsigned int irq)
+{
+ unsigned long port, data;
+ unsigned int pldirq;
+
+ pldirq = irq2lcdpldirq(irq);
+ port = lcdpldirq2port(pldirq);
+ data = lcdpld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6;
+ outw(data, port);
+}
+
+static void mask_and_ack_opsput_lcdpld(unsigned int irq)
+{
+ disable_opsput_lcdpld_irq(irq);
+}
+
+static void end_opsput_lcdpld_irq(unsigned int irq)
+{
+ enable_opsput_lcdpld_irq(irq);
+ end_opsput_irq(M32R_IRQ_INT2);
+}
+
+static unsigned int startup_opsput_lcdpld_irq(unsigned int irq)
+{
+ enable_opsput_lcdpld_irq(irq);
+ return (0);
+}
+
+static void shutdown_opsput_lcdpld_irq(unsigned int irq)
+{
+ unsigned long port;
+ unsigned int pldirq;
+
+ pldirq = irq2lcdpldirq(irq);
+ port = lcdpldirq2port(pldirq);
+ outw(PLD_ICUCR_ILEVEL7, port);
+}
+
+static struct hw_interrupt_type opsput_lcdpld_irq_type =
+{
+ "OPSPUT-PLD-LCD-IRQ",
+ startup_opsput_lcdpld_irq,
+ shutdown_opsput_lcdpld_irq,
+ enable_opsput_lcdpld_irq,
+ disable_opsput_lcdpld_irq,
+ mask_and_ack_opsput_lcdpld,
+ end_opsput_lcdpld_irq
+};
+
+void __init init_IRQ(void)
+{
+#if defined(CONFIG_SMC91X)
+ /* INT#0: LAN controller on OPSPUT-LAN (SMC91C111)*/
+ irq_desc[OPSPUT_LAN_IRQ_LAN].status = IRQ_DISABLED;
+ irq_desc[OPSPUT_LAN_IRQ_LAN].handler = &opsput_lanpld_irq_type;
+ irq_desc[OPSPUT_LAN_IRQ_LAN].action = 0;
+ irq_desc[OPSPUT_LAN_IRQ_LAN].depth = 1; /* disable nested irq */
+ lanpld_icu_data[irq2lanpldirq(OPSPUT_LAN_IRQ_LAN)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02; /* "H" edge sense */
+ disable_opsput_lanpld_irq(OPSPUT_LAN_IRQ_LAN);
+#endif /* CONFIG_SMC91X */
+
+ /* MFT2 : system timer */
+ irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_MFT2].handler = &opsput_irq_type;
+ irq_desc[M32R_IRQ_MFT2].action = 0;
+ irq_desc[M32R_IRQ_MFT2].depth = 1;
+ icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
+ disable_opsput_irq(M32R_IRQ_MFT2);
+
+ /* SIO0 : receive */
+ irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO0_R].handler = &opsput_irq_type;
+ irq_desc[M32R_IRQ_SIO0_R].action = 0;
+ irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+ icu_data[M32R_IRQ_SIO0_R].icucr = 0;
+ disable_opsput_irq(M32R_IRQ_SIO0_R);
+
+ /* SIO0 : send */
+ irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO0_S].handler = &opsput_irq_type;
+ irq_desc[M32R_IRQ_SIO0_S].action = 0;
+ irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+ icu_data[M32R_IRQ_SIO0_S].icucr = 0;
+ disable_opsput_irq(M32R_IRQ_SIO0_S);
+
+ /* SIO1 : receive */
+ irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO1_R].handler = &opsput_irq_type;
+ irq_desc[M32R_IRQ_SIO1_R].action = 0;
+ irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+ icu_data[M32R_IRQ_SIO1_R].icucr = 0;
+ disable_opsput_irq(M32R_IRQ_SIO1_R);
+
+ /* SIO1 : send */
+ irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO1_S].handler = &opsput_irq_type;
+ irq_desc[M32R_IRQ_SIO1_S].action = 0;
+ irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+ icu_data[M32R_IRQ_SIO1_S].icucr = 0;
+ disable_opsput_irq(M32R_IRQ_SIO1_S);
+
+ /* DMA1 : */
+ irq_desc[M32R_IRQ_DMA1].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_DMA1].handler = &opsput_irq_type;
+ irq_desc[M32R_IRQ_DMA1].action = 0;
+ irq_desc[M32R_IRQ_DMA1].depth = 1;
+ icu_data[M32R_IRQ_DMA1].icucr = 0;
+ disable_opsput_irq(M32R_IRQ_DMA1);
+
+#ifdef CONFIG_SERIAL_M32R_PLDSIO
+ /* INT#1: SIO0 Receive on PLD */
+ irq_desc[PLD_IRQ_SIO0_RCV].status = IRQ_DISABLED;
+ irq_desc[PLD_IRQ_SIO0_RCV].handler = &opsput_pld_irq_type;
+ irq_desc[PLD_IRQ_SIO0_RCV].action = 0;
+ irq_desc[PLD_IRQ_SIO0_RCV].depth = 1; /* disable nested irq */
+ pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_RCV)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
+ disable_opsput_pld_irq(PLD_IRQ_SIO0_RCV);
+
+ /* INT#1: SIO0 Send on PLD */
+ irq_desc[PLD_IRQ_SIO0_SND].status = IRQ_DISABLED;
+ irq_desc[PLD_IRQ_SIO0_SND].handler = &opsput_pld_irq_type;
+ irq_desc[PLD_IRQ_SIO0_SND].action = 0;
+ irq_desc[PLD_IRQ_SIO0_SND].depth = 1; /* disable nested irq */
+ pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_SND)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
+ disable_opsput_pld_irq(PLD_IRQ_SIO0_SND);
+#endif /* CONFIG_SERIAL_M32R_PLDSIO */
+
+#if defined(CONFIG_M32R_CFC)
+ /* INT#1: CFC IREQ on PLD */
+ irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
+ irq_desc[PLD_IRQ_CFIREQ].handler = &opsput_pld_irq_type;
+ irq_desc[PLD_IRQ_CFIREQ].action = 0;
+ irq_desc[PLD_IRQ_CFIREQ].depth = 1; /* disable nested irq */
+ pld_icu_data[irq2pldirq(PLD_IRQ_CFIREQ)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01; /* 'L' level sense */
+ disable_opsput_pld_irq(PLD_IRQ_CFIREQ);
+
+ /* INT#1: CFC Insert on PLD */
+ irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
+ irq_desc[PLD_IRQ_CFC_INSERT].handler = &opsput_pld_irq_type;
+ irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
+ irq_desc[PLD_IRQ_CFC_INSERT].depth = 1; /* disable nested irq */
+ pld_icu_data[irq2pldirq(PLD_IRQ_CFC_INSERT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD00; /* 'L' edge sense */
+ disable_opsput_pld_irq(PLD_IRQ_CFC_INSERT);
+
+ /* INT#1: CFC Eject on PLD */
+ irq_desc[PLD_IRQ_CFC_EJECT].status = IRQ_DISABLED;
+ irq_desc[PLD_IRQ_CFC_EJECT].handler = &opsput_pld_irq_type;
+ irq_desc[PLD_IRQ_CFC_EJECT].action = 0;
+ irq_desc[PLD_IRQ_CFC_EJECT].depth = 1; /* disable nested irq */
+ pld_icu_data[irq2pldirq(PLD_IRQ_CFC_EJECT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02; /* 'H' edge sense */
+ disable_opsput_pld_irq(PLD_IRQ_CFC_EJECT);
+#endif /* CONFIG_M32R_CFC */
+
+
+ /*
+ * INT0# is used for LAN, DIO
+ * We enable it here.
+ */
+ icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD11;
+ enable_opsput_irq(M32R_IRQ_INT0);
+
+ /*
+ * INT1# is used for UART, MMC, CF Controller in FPGA.
+ * We enable it here.
+ */
+ icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD11;
+ enable_opsput_irq(M32R_IRQ_INT1);
+
+#if defined(CONFIG_USB)
+ outw(USBCR_OTGS, USBCR); /* USBCR: non-OTG */
+
+ irq_desc[OPSPUT_LCD_IRQ_USB_INT1].status = IRQ_DISABLED;
+ irq_desc[OPSPUT_LCD_IRQ_USB_INT1].handler = &opsput_lcdpld_irq_type;
+ irq_desc[OPSPUT_LCD_IRQ_USB_INT1].action = 0;
+ irq_desc[OPSPUT_LCD_IRQ_USB_INT1].depth = 1;
+ lcdpld_icu_data[irq2lcdpldirq(OPSPUT_LCD_IRQ_USB_INT1)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01; /* "L" level sense */
+ disable_opsput_lcdpld_irq(OPSPUT_LCD_IRQ_USB_INT1);
+#endif
+ /*
+ * INT2# is used for BAT, USB, AUDIO
+ * We enable it here.
+ */
+ icu_data[M32R_IRQ_INT2].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD01;
+ enable_opsput_irq(M32R_IRQ_INT2);
+
+//#if defined(CONFIG_VIDEO_M32R_AR)
+ /*
+ * INT3# is used for AR
+ */
+ irq_desc[M32R_IRQ_INT3].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_INT3].handler = &opsput_irq_type;
+ irq_desc[M32R_IRQ_INT3].action = 0;
+ irq_desc[M32R_IRQ_INT3].depth = 1;
+ icu_data[M32R_IRQ_INT3].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
+ disable_opsput_irq(M32R_IRQ_INT3);
+//#endif /* CONFIG_VIDEO_M32R_AR */
+}
+
+#define LAN_IOSTART 0x300
+#define LAN_IOEND 0x320
+static struct resource smc91x_resources[] = {
+ [0] = {
+ .start = (LAN_IOSTART),
+ .end = (LAN_IOEND),
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = OPSPUT_LAN_IRQ_LAN,
+ .end = OPSPUT_LAN_IRQ_LAN,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device smc91x_device = {
+ .name = "smc91x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smc91x_resources),
+ .resource = smc91x_resources,
+};
+
+static int __init platform_init(void)
+{
+ platform_device_register(&smc91x_device);
+ return 0;
+}
+arch_initcall(platform_init);
diff --git a/arch/m32r/kernel/setup_usrv.c b/arch/m32r/kernel/setup_usrv.c
new file mode 100644
index 00000000000..fe417be5e3e
--- /dev/null
+++ b/arch/m32r/kernel/setup_usrv.c
@@ -0,0 +1,256 @@
+/*
+ * linux/arch/m32r/kernel/setup_usrv.c
+ *
+ * Setup routines for MITSUBISHI uServer
+ *
+ * Copyright (c) 2001, 2002, 2003 Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto
+ */
+
+#include <linux/config.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/m32r.h>
+#include <asm/io.h>
+
+#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
+
+#if !defined(CONFIG_SMP)
+typedef struct {
+ unsigned long icucr; /* ICU Control Register */
+} icu_data_t;
+#endif /* CONFIG_SMP */
+
+icu_data_t icu_data[M32700UT_NUM_CPU_IRQ];
+
+static void disable_mappi_irq(unsigned int irq)
+{
+ unsigned long port, data;
+
+ port = irq2port(irq);
+ data = icu_data[irq].icucr|M32R_ICUCR_ILEVEL7;
+ outl(data, port);
+}
+
+static void enable_mappi_irq(unsigned int irq)
+{
+ unsigned long port, data;
+
+ port = irq2port(irq);
+ data = icu_data[irq].icucr|M32R_ICUCR_IEN|M32R_ICUCR_ILEVEL6;
+ outl(data, port);
+}
+
+static void mask_and_ack_mappi(unsigned int irq)
+{
+ disable_mappi_irq(irq);
+}
+
+static void end_mappi_irq(unsigned int irq)
+{
+ enable_mappi_irq(irq);
+}
+
+static unsigned int startup_mappi_irq(unsigned int irq)
+{
+ enable_mappi_irq(irq);
+ return 0;
+}
+
+static void shutdown_mappi_irq(unsigned int irq)
+{
+ unsigned long port;
+
+ port = irq2port(irq);
+ outl(M32R_ICUCR_ILEVEL7, port);
+}
+
+static struct hw_interrupt_type mappi_irq_type =
+{
+ "M32700-IRQ",
+ startup_mappi_irq,
+ shutdown_mappi_irq,
+ enable_mappi_irq,
+ disable_mappi_irq,
+ mask_and_ack_mappi,
+ end_mappi_irq
+};
+
+/*
+ * Interrupt Control Unit of PLD on M32700UT (Level 2)
+ */
+#define irq2pldirq(x) ((x) - M32700UT_PLD_IRQ_BASE)
+#define pldirq2port(x) (unsigned long)((int)PLD_ICUCR1 + \
+ (((x) - 1) * sizeof(unsigned short)))
+
+typedef struct {
+ unsigned short icucr; /* ICU Control Register */
+} pld_icu_data_t;
+
+static pld_icu_data_t pld_icu_data[M32700UT_NUM_PLD_IRQ];
+
+static void disable_m32700ut_pld_irq(unsigned int irq)
+{
+ unsigned long port, data;
+ unsigned int pldirq;
+
+ pldirq = irq2pldirq(irq);
+ port = pldirq2port(pldirq);
+ data = pld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7;
+ outw(data, port);
+}
+
+static void enable_m32700ut_pld_irq(unsigned int irq)
+{
+ unsigned long port, data;
+ unsigned int pldirq;
+
+ pldirq = irq2pldirq(irq);
+ port = pldirq2port(pldirq);
+ data = pld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6;
+ outw(data, port);
+}
+
+static void mask_and_ack_m32700ut_pld(unsigned int irq)
+{
+ disable_m32700ut_pld_irq(irq);
+}
+
+static void end_m32700ut_pld_irq(unsigned int irq)
+{
+ enable_m32700ut_pld_irq(irq);
+ end_mappi_irq(M32R_IRQ_INT1);
+}
+
+static unsigned int startup_m32700ut_pld_irq(unsigned int irq)
+{
+ enable_m32700ut_pld_irq(irq);
+ return 0;
+}
+
+static void shutdown_m32700ut_pld_irq(unsigned int irq)
+{
+ unsigned long port;
+ unsigned int pldirq;
+
+ pldirq = irq2pldirq(irq);
+ port = pldirq2port(pldirq);
+ outw(PLD_ICUCR_ILEVEL7, port);
+}
+
+static struct hw_interrupt_type m32700ut_pld_irq_type =
+{
+ "USRV-PLD-IRQ",
+ startup_m32700ut_pld_irq,
+ shutdown_m32700ut_pld_irq,
+ enable_m32700ut_pld_irq,
+ disable_m32700ut_pld_irq,
+ mask_and_ack_m32700ut_pld,
+ end_m32700ut_pld_irq
+};
+
+void __init init_IRQ(void)
+{
+ static int once = 0;
+ int i;
+
+ if (once)
+ return;
+ else
+ once++;
+
+ /* MFT2 : system timer */
+ irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_MFT2].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_MFT2].action = 0;
+ irq_desc[M32R_IRQ_MFT2].depth = 1;
+ icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
+ disable_mappi_irq(M32R_IRQ_MFT2);
+
+#if defined(CONFIG_SERIAL_M32R_SIO)
+ /* SIO0_R : uart receive data */
+ irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO0_R].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_SIO0_R].action = 0;
+ irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+ icu_data[M32R_IRQ_SIO0_R].icucr = 0;
+ disable_mappi_irq(M32R_IRQ_SIO0_R);
+
+ /* SIO0_S : uart send data */
+ irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO0_S].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_SIO0_S].action = 0;
+ irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+ icu_data[M32R_IRQ_SIO0_S].icucr = 0;
+ disable_mappi_irq(M32R_IRQ_SIO0_S);
+
+ /* SIO1_R : uart receive data */
+ irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO1_R].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_SIO1_R].action = 0;
+ irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+ icu_data[M32R_IRQ_SIO1_R].icucr = 0;
+ disable_mappi_irq(M32R_IRQ_SIO1_R);
+
+ /* SIO1_S : uart send data */
+ irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO1_S].handler = &mappi_irq_type;
+ irq_desc[M32R_IRQ_SIO1_S].action = 0;
+ irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+ icu_data[M32R_IRQ_SIO1_S].icucr = 0;
+ disable_mappi_irq(M32R_IRQ_SIO1_S);
+#endif /* CONFIG_SERIAL_M32R_SIO */
+
+ /* INT#67-#71: CFC#0 IREQ on PLD */
+ for (i = 0 ; i < CONFIG_CFC_NUM ; i++ ) {
+ irq_desc[PLD_IRQ_CF0 + i].status = IRQ_DISABLED;
+ irq_desc[PLD_IRQ_CF0 + i].handler = &m32700ut_pld_irq_type;
+ irq_desc[PLD_IRQ_CF0 + i].action = 0;
+ irq_desc[PLD_IRQ_CF0 + i].depth = 1; /* disable nested irq */
+ pld_icu_data[irq2pldirq(PLD_IRQ_CF0 + i)].icucr
+ = PLD_ICUCR_ISMOD01; /* 'L' level sense */
+ disable_m32700ut_pld_irq(PLD_IRQ_CF0 + i);
+ }
+
+#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
+ /* INT#76: 16552D#0 IREQ on PLD */
+ irq_desc[PLD_IRQ_UART0].status = IRQ_DISABLED;
+ irq_desc[PLD_IRQ_UART0].handler = &m32700ut_pld_irq_type;
+ irq_desc[PLD_IRQ_UART0].action = 0;
+ irq_desc[PLD_IRQ_UART0].depth = 1; /* disable nested irq */
+ pld_icu_data[irq2pldirq(PLD_IRQ_UART0)].icucr
+ = PLD_ICUCR_ISMOD03; /* 'H' level sense */
+ disable_m32700ut_pld_irq(PLD_IRQ_UART0);
+
+ /* INT#77: 16552D#1 IREQ on PLD */
+ irq_desc[PLD_IRQ_UART1].status = IRQ_DISABLED;
+ irq_desc[PLD_IRQ_UART1].handler = &m32700ut_pld_irq_type;
+ irq_desc[PLD_IRQ_UART1].action = 0;
+ irq_desc[PLD_IRQ_UART1].depth = 1; /* disable nested irq */
+ pld_icu_data[irq2pldirq(PLD_IRQ_UART1)].icucr
+ = PLD_ICUCR_ISMOD03; /* 'H' level sense */
+ disable_m32700ut_pld_irq(PLD_IRQ_UART1);
+#endif /* CONFIG_SERIAL_8250 || CONFIG_SERIAL_8250_MODULE */
+
+#if defined(CONFIG_IDC_AK4524) || defined(CONFIG_IDC_AK4524_MODULE)
+ /* INT#80: AK4524 IREQ on PLD */
+ irq_desc[PLD_IRQ_SNDINT].status = IRQ_DISABLED;
+ irq_desc[PLD_IRQ_SNDINT].handler = &m32700ut_pld_irq_type;
+ irq_desc[PLD_IRQ_SNDINT].action = 0;
+ irq_desc[PLD_IRQ_SNDINT].depth = 1; /* disable nested irq */
+ pld_icu_data[irq2pldirq(PLD_IRQ_SNDINT)].icucr
+ = PLD_ICUCR_ISMOD01; /* 'L' level sense */
+ disable_m32700ut_pld_irq(PLD_IRQ_SNDINT);
+#endif /* CONFIG_IDC_AK4524 || CONFIG_IDC_AK4524_MODULE */
+
+ /*
+ * INT1# is used for UART, MMC, CF Controller in FPGA.
+ * We enable it here.
+ */
+ icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_ISMOD11;
+ enable_mappi_irq(M32R_IRQ_INT1);
+}
+
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
new file mode 100644
index 00000000000..50311eb07a2
--- /dev/null
+++ b/arch/m32r/kernel/signal.c
@@ -0,0 +1,438 @@
+/*
+ * linux/arch/m32r/kernel/signal.c
+ *
+ * Copyright (c) 2003 Hitoshi Yamamoto
+ *
+ * Taken from i386 version.
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
+ * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/personality.h>
+#include <linux/suspend.h>
+#include <asm/cacheflush.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+
+#define DEBUG_SIG 0
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+int do_signal(struct pt_regs *, sigset_t *);
+
+asmlinkage int
+sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
+ unsigned long r2, unsigned long r3, unsigned long r4,
+ unsigned long r5, unsigned long r6, struct pt_regs regs)
+{
+ sigset_t saveset, newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(&current->sighand->siglock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ regs.r0 = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&regs, &saveset))
+ return regs.r0;
+ }
+}
+
+asmlinkage int
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
+ unsigned long r2, unsigned long r3, unsigned long r4,
+ unsigned long r5, unsigned long r6, struct pt_regs regs)
+{
+ return do_sigaltstack(uss, uoss, regs.spu);
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+struct rt_sigframe
+{
+ int sig;
+ struct siginfo *pinfo;
+ void *puc;
+ struct siginfo info;
+ struct ucontext uc;
+// struct _fpstate fpstate;
+};
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+ int *r0_p)
+{
+ unsigned int err = 0;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
+ COPY(r4);
+ COPY(r5);
+ COPY(r6);
+ COPY(pt_regs);
+ /* COPY(r0); Skip r0 */
+ COPY(r1);
+ COPY(r2);
+ COPY(r3);
+ COPY(r7);
+ COPY(r8);
+ COPY(r9);
+ COPY(r10);
+ COPY(r11);
+ COPY(r12);
+#if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2)
+ COPY(acc0h);
+ COPY(acc0l);
+ COPY(acc1h);
+ COPY(acc1l);
+#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
+ COPY(acch);
+ COPY(accl);
+#else
+#error unknown isa configuration
+#endif
+ COPY(psw);
+ COPY(bpc);
+ COPY(bbpsw);
+ COPY(bbpc);
+ COPY(spu);
+ COPY(fp);
+ COPY(lr);
+ COPY(spi);
+#undef COPY
+
+ regs->syscall_nr = -1; /* disable syscall checks */
+ err |= __get_user(*r0_p, &sc->sc_r0);
+
+ return err;
+}
+
+asmlinkage int
+sys_rt_sigreturn(unsigned long r0, unsigned long r1,
+ unsigned long r2, unsigned long r3, unsigned long r4,
+ unsigned long r5, unsigned long r6, struct pt_regs regs)
+{
+ struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs.spu;
+ sigset_t set;
+ stack_t st;
+ int result;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (restore_sigcontext(&regs, &frame->uc.uc_mcontext, &result))
+ goto badframe;
+
+ if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
+ goto badframe;
+ /* It is more difficult to avoid calling this function than to
+ call it and ignore errors. */
+ do_sigaltstack(&st, NULL, regs.spu);
+
+ return result;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static int
+setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
+ unsigned long mask)
+{
+ int err = 0;
+
+#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
+ COPY(r4);
+ COPY(r5);
+ COPY(r6);
+ COPY(pt_regs);
+ COPY(r0);
+ COPY(r1);
+ COPY(r2);
+ COPY(r3);
+ COPY(r7);
+ COPY(r8);
+ COPY(r9);
+ COPY(r10);
+ COPY(r11);
+ COPY(r12);
+#if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2)
+ COPY(acc0h);
+ COPY(acc0l);
+ COPY(acc1h);
+ COPY(acc1l);
+#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
+ COPY(acch);
+ COPY(accl);
+#else
+#error unknown isa configuration
+#endif
+ COPY(psw);
+ COPY(bpc);
+ COPY(bbpsw);
+ COPY(bbpc);
+ COPY(spu);
+ COPY(fp);
+ COPY(lr);
+ COPY(spi);
+#undef COPY
+
+ err |= __put_user(mask, &sc->oldmask);
+
+ return err;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
+{
+ /* This is the X/Open sanctioned signal stack switching. */
+ if (ka->sa.sa_flags & SA_ONSTACK) {
+ if (sas_ss_flags(sp) == 0)
+ sp = current->sas_ss_sp + current->sas_ss_size;
+ }
+
+ return (void __user *)((sp - frame_size) & -8ul);
+}
+
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+ int err = 0;
+ int signal;
+
+ frame = get_sigframe(ka, regs->spu, sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ signal = current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err |= __put_user(signal, &frame->sig);
+ if (err)
+ goto give_sigsegv;
+
+ err |= __put_user(&frame->info, &frame->pinfo);
+ err |= __put_user(&frame->uc, &frame->puc);
+ err |= copy_siginfo_to_user(&frame->info, info);
+ if (err)
+ goto give_sigsegv;
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(regs->spu),
+ &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up to return from userspace. */
+ regs->lr = (unsigned long)ka->sa.sa_restorer;
+
+ /* Set up registers for signal handler */
+ regs->spu = (unsigned long)frame;
+ regs->r0 = signal; /* Arg for signal handler */
+ regs->r1 = (unsigned long)&frame->info;
+ regs->r2 = (unsigned long)&frame->uc;
+ regs->bpc = (unsigned long)ka->sa.sa_handler;
+
+ set_fs(USER_DS);
+
+#if DEBUG_SIG
+ printk("SIG deliver (%s:%d): sp=%p pc=%p\n",
+ current->comm, current->pid, frame, regs->pc);
+#endif
+
+ return;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+
+static void
+handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *oldset, struct pt_regs *regs)
+{
+ unsigned short inst;
+
+ /* Are we from a system call? */
+ if (regs->syscall_nr >= 0) {
+ /* If so, check system call restarting.. */
+ switch (regs->r0) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->r0 = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->r0 = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ regs->r0 = regs->orig_r0;
+ inst = *(unsigned short *)(regs->bpc - 2);
+ if ((inst & 0xfff0) == 0x10f0) /* trap ? */
+ regs->bpc -= 2;
+ else
+ regs->bpc -= 4;
+ }
+ }
+
+ /* Set up the stack frame */
+ setup_rt_frame(sig, ka, info, oldset, regs);
+
+ if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigaddset(&current->blocked,sig);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ }
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+int do_signal(struct pt_regs *regs, sigset_t *oldset)
+{
+ siginfo_t info;
+ int signr;
+ struct k_sigaction ka;
+ unsigned short inst;
+
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if (!user_mode(regs))
+ return 1;
+
+ if (current->flags & PF_FREEZE) {
+ refrigerator(0);
+ goto no_signal;
+ }
+
+ if (!oldset)
+ oldset = &current->blocked;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ if (signr > 0) {
+ /* Reenable any watchpoints before delivering the
+ * signal to user space. The processor register will
+ * have been cleared if the watchpoint triggered
+ * inside the kernel.
+ */
+
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, &ka, &info, oldset, regs);
+ return 1;
+ }
+
+ no_signal:
+ /* Did we come from a system call? */
+ if (regs->syscall_nr >= 0) {
+ /* Restart the system call - no handlers present */
+ if (regs->r0 == -ERESTARTNOHAND ||
+ regs->r0 == -ERESTARTSYS ||
+ regs->r0 == -ERESTARTNOINTR) {
+ regs->r0 = regs->orig_r0;
+ inst = *(unsigned short *)(regs->bpc - 2);
+ if ((inst & 0xfff0) == 0x10f0) /* trap ? */
+ regs->bpc -= 2;
+ else
+ regs->bpc -= 4;
+ }
+ if (regs->r0 == -ERESTART_RESTARTBLOCK){
+ regs->r0 = regs->orig_r0;
+ regs->r7 = __NR_restart_syscall;
+ inst = *(unsigned short *)(regs->bpc - 2);
+ if ((inst & 0xfff0) == 0x10f0) /* trap ? */
+ regs->bpc -= 2;
+ else
+ regs->bpc -= 4;
+ }
+ }
+ return 0;
+}
+
+/*
+ * notification of userspace execution resumption
+ * - triggered by current->work.notify_resume
+ */
+void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
+ __u32 thread_info_flags)
+{
+ /* Pending single-step? */
+ if (thread_info_flags & _TIF_SINGLESTEP)
+ clear_thread_flag(TIF_SINGLESTEP);
+
+ /* deal with pending signal delivery */
+ if (thread_info_flags & _TIF_SIGPENDING)
+ do_signal(regs,oldset);
+
+ clear_thread_flag(TIF_IRET);
+}
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
new file mode 100644
index 00000000000..48b187f2d2b
--- /dev/null
+++ b/arch/m32r/kernel/smp.c
@@ -0,0 +1,965 @@
+/*
+ * linux/arch/m32r/kernel/smp.c
+ *
+ * M32R SMP support routines.
+ *
+ * Copyright (c) 2001, 2002 Hitoshi Yamamoto
+ *
+ * Taken from i386 version.
+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
+ *
+ * This code is released under the GNU General Public License version 2 or
+ * later.
+ */
+
+#undef DEBUG_SMP
+
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/profile.h>
+#include <linux/cpu.h>
+
+#include <asm/cacheflush.h>
+#include <asm/pgalloc.h>
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/m32r.h>
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Data structures and variables */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+/*
+ * Structure and data for smp_call_function(). This is designed to minimise
+ * static memory requirements. It also looks cleaner.
+ */
+static DEFINE_SPINLOCK(call_lock);
+
+struct call_data_struct {
+ void (*func) (void *info);
+ void *info;
+ atomic_t started;
+ atomic_t finished;
+ int wait;
+} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
+
+static struct call_data_struct *call_data;
+
+/*
+ * For flush_cache_all()
+ */
+static DEFINE_SPINLOCK(flushcache_lock);
+static volatile unsigned long flushcache_cpumask = 0;
+
+/*
+ * For flush_tlb_others()
+ */
+static volatile cpumask_t flush_cpumask;
+static struct mm_struct *flush_mm;
+static struct vm_area_struct *flush_vma;
+static volatile unsigned long flush_va;
+static DEFINE_SPINLOCK(tlbstate_lock);
+#define FLUSH_ALL 0xffffffff
+
+DECLARE_PER_CPU(int, prof_multiplier);
+DECLARE_PER_CPU(int, prof_old_multiplier);
+DECLARE_PER_CPU(int, prof_counter);
+
+extern spinlock_t ipi_lock[];
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Function Prototypes */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+void smp_send_reschedule(int);
+void smp_reschedule_interrupt(void);
+
+void smp_flush_cache_all(void);
+void smp_flush_cache_all_interrupt(void);
+
+void smp_flush_tlb_all(void);
+static void flush_tlb_all_ipi(void *);
+
+void smp_flush_tlb_mm(struct mm_struct *);
+void smp_flush_tlb_range(struct vm_area_struct *, unsigned long, \
+ unsigned long);
+void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
+static void flush_tlb_others(cpumask_t, struct mm_struct *,
+ struct vm_area_struct *, unsigned long);
+void smp_invalidate_interrupt(void);
+
+void smp_send_stop(void);
+static void stop_this_cpu(void *);
+
+int smp_call_function(void (*) (void *), void *, int, int);
+void smp_call_function_interrupt(void);
+
+void smp_send_timer(void);
+void smp_ipi_timer_interrupt(struct pt_regs *);
+void smp_local_timer_interrupt(struct pt_regs *);
+
+void send_IPI_allbutself(int, int);
+static void send_IPI_mask(cpumask_t, int, int);
+unsigned long send_IPI_mask_phys(cpumask_t, int, int);
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Rescheduling request Routines */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+/*==========================================================================*
+ * Name: smp_send_reschedule
+ *
+ * Description: This routine requests other CPU to execute rescheduling.
+ * 1.Send 'RESCHEDULE_IPI' to other CPU.
+ * Request other CPU to execute 'smp_reschedule_interrupt()'.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: cpu_id - Target CPU ID
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_send_reschedule(int cpu_id)
+{
+ WARN_ON(cpu_is_offline(cpu_id));
+ send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1);
+}
+
+/*==========================================================================*
+ * Name: smp_reschedule_interrupt
+ *
+ * Description: This routine executes on CPU which received
+ * 'RESCHEDULE_IPI'.
+ * Rescheduling is processed at the exit of interrupt
+ * operation.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: NONE
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_reschedule_interrupt(void)
+{
+ /* nothing to do */
+}
+
+/*==========================================================================*
+ * Name: smp_flush_cache_all
+ *
+ * Description: This routine sends a 'INVALIDATE_CACHE_IPI' to all other
+ * CPUs in the system.
+ *
+ * Born on Date: 2003-05-28
+ *
+ * Arguments: NONE
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_flush_cache_all(void)
+{
+ cpumask_t cpumask;
+ unsigned long *mask;
+
+ preempt_disable();
+ cpumask = cpu_online_map;
+ cpu_clear(smp_processor_id(), cpumask);
+ spin_lock(&flushcache_lock);
+ mask=cpus_addr(cpumask);
+ atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
+ send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0);
+ _flush_cache_copyback_all();
+ while (flushcache_cpumask)
+ mb();
+ spin_unlock(&flushcache_lock);
+ preempt_enable();
+}
+
+void smp_flush_cache_all_interrupt(void)
+{
+ _flush_cache_copyback_all();
+ clear_bit(smp_processor_id(), &flushcache_cpumask);
+}
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* TLB flush request Routins */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+/*==========================================================================*
+ * Name: smp_flush_tlb_all
+ *
+ * Description: This routine flushes all processes TLBs.
+ * 1.Request other CPU to execute 'flush_tlb_all_ipi()'.
+ * 2.Execute 'do_flush_tlb_all_local()'.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: NONE
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_flush_tlb_all(void)
+{
+ unsigned long flags;
+
+ preempt_disable();
+ local_irq_save(flags);
+ __flush_tlb_all();
+ local_irq_restore(flags);
+ smp_call_function(flush_tlb_all_ipi, 0, 1, 1);
+ preempt_enable();
+}
+
+/*==========================================================================*
+ * Name: flush_tlb_all_ipi
+ *
+ * Description: This routine flushes all local TLBs.
+ * 1.Execute 'do_flush_tlb_all_local()'.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: *info - not used
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+static void flush_tlb_all_ipi(void *info)
+{
+ __flush_tlb_all();
+}
+
+/*==========================================================================*
+ * Name: smp_flush_tlb_mm
+ *
+ * Description: This routine flushes the specified mm context TLB's.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: *mm - a pointer to the mm struct for flush TLB
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_flush_tlb_mm(struct mm_struct *mm)
+{
+ int cpu_id = smp_processor_id();
+ cpumask_t cpu_mask;
+ unsigned long *mmc = &mm->context[cpu_id];
+ unsigned long flags;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(cpu_id, cpu_mask);
+
+ if (*mmc != NO_CONTEXT) {
+ local_irq_save(flags);
+ *mmc = NO_CONTEXT;
+ if (mm == current->mm)
+ activate_context(mm);
+ else
+ cpu_clear(cpu_id, mm->cpu_vm_mask);
+ local_irq_restore(flags);
+ }
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
+
+ preempt_enable();
+}
+
+/*==========================================================================*
+ * Name: smp_flush_tlb_range
+ *
+ * Description: This routine flushes a range of pages.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: *mm - a pointer to the mm struct for flush TLB
+ * start - not used
+ * end - not used
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ smp_flush_tlb_mm(vma->vm_mm);
+}
+
+/*==========================================================================*
+ * Name: smp_flush_tlb_page
+ *
+ * Description: This routine flushes one page.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: *vma - a pointer to the vma struct include va
+ * va - virtual address for flush TLB
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int cpu_id = smp_processor_id();
+ cpumask_t cpu_mask;
+ unsigned long *mmc = &mm->context[cpu_id];
+ unsigned long flags;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(cpu_id, cpu_mask);
+
+#ifdef DEBUG_SMP
+ if (!mm)
+ BUG();
+#endif
+
+ if (*mmc != NO_CONTEXT) {
+ local_irq_save(flags);
+ va &= PAGE_MASK;
+ va |= (*mmc & MMU_CONTEXT_ASID_MASK);
+ __flush_tlb_page(va);
+ local_irq_restore(flags);
+ }
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, vma, va);
+
+ preempt_enable();
+}
+
+/*==========================================================================*
+ * Name: flush_tlb_others
+ *
+ * Description: This routine requests other CPU to execute flush TLB.
+ * 1.Setup parmeters.
+ * 2.Send 'INVALIDATE_TLB_IPI' to other CPU.
+ * Request other CPU to execute 'smp_invalidate_interrupt()'.
+ * 3.Wait for other CPUs operation finished.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: cpumask - bitmap of target CPUs
+ * *mm - a pointer to the mm struct for flush TLB
+ * *vma - a pointer to the vma struct include va
+ * va - virtual address for flush TLB
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long va)
+{
+ unsigned long *mask;
+#ifdef DEBUG_SMP
+ unsigned long flags;
+ __save_flags(flags);
+ if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
+ BUG();
+#endif /* DEBUG_SMP */
+
+ /*
+ * A couple of (to be removed) sanity checks:
+ *
+ * - we do not send IPIs to not-yet booted CPUs.
+ * - current CPU must not be in mask
+ * - mask must exist :)
+ */
+ BUG_ON(cpus_empty(cpumask));
+
+ BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+ BUG_ON(!mm);
+
+ /* If a CPU which we ran on has gone down, OK. */
+ cpus_and(cpumask, cpumask, cpu_online_map);
+ if (cpus_empty(cpumask))
+ return;
+
+ /*
+ * i'm not happy about this global shared spinlock in the
+ * MM hot path, but we'll see how contended it is.
+ * Temporarily this turns IRQs off, so that lockups are
+ * detected by the NMI watchdog.
+ */
+ spin_lock(&tlbstate_lock);
+
+ flush_mm = mm;
+ flush_vma = vma;
+ flush_va = va;
+ mask=cpus_addr(cpumask);
+ atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
+
+ /*
+ * We have to send the IPI only to
+ * CPUs affected.
+ */
+ send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0);
+
+ while (!cpus_empty(flush_cpumask)) {
+ /* nothing. lockup detection does not belong here */
+ mb();
+ }
+
+ flush_mm = NULL;
+ flush_vma = NULL;
+ flush_va = 0;
+ spin_unlock(&tlbstate_lock);
+}
+
+/*==========================================================================*
+ * Name: smp_invalidate_interrupt
+ *
+ * Description: This routine executes on CPU which received
+ * 'INVALIDATE_TLB_IPI'.
+ * 1.Flush local TLB.
+ * 2.Report flush TLB process was finished.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: NONE
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_invalidate_interrupt(void)
+{
+ int cpu_id = smp_processor_id();
+ unsigned long *mmc = &flush_mm->context[cpu_id];
+
+ if (!cpu_isset(cpu_id, flush_cpumask))
+ return;
+
+ if (flush_va == FLUSH_ALL) {
+ *mmc = NO_CONTEXT;
+ if (flush_mm == current->active_mm)
+ activate_context(flush_mm);
+ else
+ cpu_clear(cpu_id, flush_mm->cpu_vm_mask);
+ } else {
+ unsigned long va = flush_va;
+
+ if (*mmc != NO_CONTEXT) {
+ va &= PAGE_MASK;
+ va |= (*mmc & MMU_CONTEXT_ASID_MASK);
+ __flush_tlb_page(va);
+ }
+ }
+ cpu_clear(cpu_id, flush_cpumask);
+}
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Stop CPU request Routins */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+/*==========================================================================*
+ * Name: smp_send_stop
+ *
+ * Description: This routine requests stop all CPUs.
+ * 1.Request other CPU to execute 'stop_this_cpu()'.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: NONE
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_send_stop(void)
+{
+ smp_call_function(stop_this_cpu, NULL, 1, 0);
+}
+
+/*==========================================================================*
+ * Name: stop_this_cpu
+ *
+ * Description: This routine halt CPU.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: NONE
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+static void stop_this_cpu(void *dummy)
+{
+ int cpu_id = smp_processor_id();
+
+ /*
+ * Remove this CPU:
+ */
+ cpu_clear(cpu_id, cpu_online_map);
+
+ /*
+ * PSW IE = 1;
+ * IMASK = 0;
+ * goto SLEEP
+ */
+ local_irq_disable();
+ outl(0, M32R_ICU_IMASK_PORTL);
+ inl(M32R_ICU_IMASK_PORTL); /* dummy read */
+ local_irq_enable();
+
+ for ( ; ; );
+}
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Call function Routins */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+/*==========================================================================*
+ * Name: smp_call_function
+ *
+ * Description: This routine sends a 'CALL_FUNCTION_IPI' to all other CPUs
+ * in the system.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: *func - The function to run. This must be fast and
+ * non-blocking.
+ * *info - An arbitrary pointer to pass to the function.
+ * nonatomic - currently unused.
+ * wait - If true, wait (atomically) until function has
+ * completed on other CPUs.
+ *
+ * Returns: 0 on success, else a negative status code. Does not return
+ * until remote CPUs are nearly ready to execute <<func>> or
+ * are or have executed.
+ *
+ * Cautions: You must not call this function with disabled interrupts or
+ * from a hardware interrupt handler, you may call it from a
+ * bottom half handler.
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
+ int wait)
+{
+ struct call_data_struct data;
+ int cpus;
+
+#ifdef DEBUG_SMP
+ unsigned long flags;
+ __save_flags(flags);
+ if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
+ BUG();
+#endif /* DEBUG_SMP */
+
+ /* Holding any lock stops cpus from going down. */
+ spin_lock(&call_lock);
+ cpus = num_online_cpus() - 1;
+
+ if (!cpus) {
+ spin_unlock(&call_lock);
+ return 0;
+ }
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
+ data.func = func;
+ data.info = info;
+ atomic_set(&data.started, 0);
+ data.wait = wait;
+ if (wait)
+ atomic_set(&data.finished, 0);
+
+ call_data = &data;
+ mb();
+
+ /* Send a message to all other CPUs and wait for them to respond */
+ send_IPI_allbutself(CALL_FUNCTION_IPI, 0);
+
+ /* Wait for response */
+ while (atomic_read(&data.started) != cpus)
+ barrier();
+
+ if (wait)
+ while (atomic_read(&data.finished) != cpus)
+ barrier();
+ spin_unlock(&call_lock);
+
+ return 0;
+}
+
+/*==========================================================================*
+ * Name: smp_call_function_interrupt
+ *
+ * Description: This routine executes on CPU which received
+ * 'CALL_FUNCTION_IPI'.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: NONE
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_call_function_interrupt(void)
+{
+ void (*func) (void *info) = call_data->func;
+ void *info = call_data->info;
+ int wait = call_data->wait;
+
+ /*
+ * Notify initiating CPU that I've grabbed the data and am
+ * about to execute the function
+ */
+ mb();
+ atomic_inc(&call_data->started);
+ /*
+ * At this point the info structure may be out of scope unless wait==1
+ */
+ irq_enter();
+ (*func)(info);
+ irq_exit();
+
+ if (wait) {
+ mb();
+ atomic_inc(&call_data->finished);
+ }
+}
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Timer Routins */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+/*==========================================================================*
+ * Name: smp_send_timer
+ *
+ * Description: This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs
+ * in the system.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: NONE
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_send_timer(void)
+{
+ send_IPI_allbutself(LOCAL_TIMER_IPI, 1);
+}
+
+/*==========================================================================*
+ * Name: smp_send_timer
+ *
+ * Description: This routine executes on CPU which received
+ * 'LOCAL_TIMER_IPI'.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: *regs - a pointer to the saved regster info
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_ipi_timer_interrupt(struct pt_regs *regs)
+{
+ irq_enter();
+ smp_local_timer_interrupt(regs);
+ irq_exit();
+}
+
+/*==========================================================================*
+ * Name: smp_local_timer_interrupt
+ *
+ * Description: Local timer interrupt handler. It does both profiling and
+ * process statistics/rescheduling.
+ * We do profiling in every local tick, statistics/rescheduling
+ * happen only every 'profiling multiplier' ticks. The default
+ * multiplier is 1 and it can be changed by writing the new
+ * multiplier value into /proc/profile.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: *regs - a pointer to the saved regster info
+ *
+ * Returns: void (cannot fail)
+ *
+ * Original: arch/i386/kernel/apic.c
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ * 2003-06-24 hy use per_cpu structure.
+ *==========================================================================*/
+void smp_local_timer_interrupt(struct pt_regs *regs)
+{
+ int user = user_mode(regs);
+ int cpu_id = smp_processor_id();
+
+ /*
+ * The profiling function is SMP safe. (nothing can mess
+ * around with "current", and the profiling counters are
+ * updated with atomic operations). This is especially
+ * useful with a profiling multiplier != 1
+ */
+
+ profile_tick(CPU_PROFILING, regs);
+
+ if (--per_cpu(prof_counter, cpu_id) <= 0) {
+ /*
+ * The multiplier may have changed since the last time we got
+ * to this point as a result of the user writing to
+ * /proc/profile. In this case we need to adjust the APIC
+ * timer accordingly.
+ *
+ * Interrupts are already masked off at this point.
+ */
+ per_cpu(prof_counter, cpu_id)
+ = per_cpu(prof_multiplier, cpu_id);
+ if (per_cpu(prof_counter, cpu_id)
+ != per_cpu(prof_old_multiplier, cpu_id))
+ {
+ per_cpu(prof_old_multiplier, cpu_id)
+ = per_cpu(prof_counter, cpu_id);
+ }
+
+ update_process_times(user);
+ }
+}
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Send IPI Routins */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+/*==========================================================================*
+ * Name: send_IPI_allbutself
+ *
+ * Description: This routine sends a IPI to all other CPUs in the system.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: ipi_num - Number of IPI
+ * try - 0 : Send IPI certainly.
+ * !0 : The following IPI is not sended when Target CPU
+ * has not received the before IPI.
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void send_IPI_allbutself(int ipi_num, int try)
+{
+ cpumask_t cpumask;
+
+ cpumask = cpu_online_map;
+ cpu_clear(smp_processor_id(), cpumask);
+
+ send_IPI_mask(cpumask, ipi_num, try);
+}
+
+/*==========================================================================*
+ * Name: send_IPI_mask
+ *
+ * Description: This routine sends a IPI to CPUs in the system.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: cpu_mask - Bitmap of target CPUs logical ID
+ * ipi_num - Number of IPI
+ * try - 0 : Send IPI certainly.
+ * !0 : The following IPI is not sended when Target CPU
+ * has not received the before IPI.
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
+{
+ cpumask_t physid_mask, tmp;
+ int cpu_id, phys_id;
+ int num_cpus = num_online_cpus();
+
+ if (num_cpus <= 1) /* NO MP */
+ return;
+
+ cpus_and(tmp, cpumask, cpu_online_map);
+ BUG_ON(!cpus_equal(cpumask, tmp));
+
+ physid_mask = CPU_MASK_NONE;
+ for_each_cpu_mask(cpu_id, cpumask){
+ if ((phys_id = cpu_to_physid(cpu_id)) != -1)
+ cpu_set(phys_id, physid_mask);
+ }
+
+ send_IPI_mask_phys(physid_mask, ipi_num, try);
+}
+
+/*==========================================================================*
+ * Name: send_IPI_mask_phys
+ *
+ * Description: This routine sends a IPI to other CPUs in the system.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: cpu_mask - Bitmap of target CPUs physical ID
+ * ipi_num - Number of IPI
+ * try - 0 : Send IPI certainly.
+ * !0 : The following IPI is not sended when Target CPU
+ * has not received the before IPI.
+ *
+ * Returns: IPICRi regster value.
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
+ int try)
+{
+ spinlock_t *ipilock;
+ unsigned long flags = 0;
+ volatile unsigned long *ipicr_addr;
+ unsigned long ipicr_val;
+ unsigned long my_physid_mask;
+ unsigned long mask = cpus_addr(physid_mask)[0];
+
+
+ if (mask & ~physids_coerce(phys_cpu_present_map))
+ BUG();
+ if (ipi_num >= NR_IPIS)
+ BUG();
+
+ mask <<= IPI_SHIFT;
+ ipilock = &ipi_lock[ipi_num];
+ ipicr_addr = (volatile unsigned long *)(M32R_ICU_IPICR_ADDR
+ + (ipi_num << 2));
+ my_physid_mask = ~(1 << smp_processor_id());
+
+ /*
+ * lock ipi_lock[i]
+ * check IPICRi == 0
+ * write IPICRi (send IPIi)
+ * unlock ipi_lock[i]
+ */
+ __asm__ __volatile__ (
+ ";; LOCK ipi_lock[i] \n\t"
+ ".fillinsn \n"
+ "1: \n\t"
+ "mvfc %1, psw \n\t"
+ "clrpsw #0x40 -> nop \n\t"
+ DCACHE_CLEAR("r4", "r5", "%2")
+ "lock r4, @%2 \n\t"
+ "addi r4, #-1 \n\t"
+ "unlock r4, @%2 \n\t"
+ "mvtc %1, psw \n\t"
+ "bnez r4, 2f \n\t"
+ LOCK_SECTION_START(".balign 4 \n\t")
+ ".fillinsn \n"
+ "2: \n\t"
+ "ld r4, @%2 \n\t"
+ "blez r4, 2b \n\t"
+ "bra 1b \n\t"
+ LOCK_SECTION_END
+ ";; CHECK IPICRi == 0 \n\t"
+ ".fillinsn \n"
+ "3: \n\t"
+ "ld %0, @%3 \n\t"
+ "and %0, %6 \n\t"
+ "beqz %0, 4f \n\t"
+ "bnez %5, 5f \n\t"
+ "bra 3b \n\t"
+ ";; WRITE IPICRi (send IPIi) \n\t"
+ ".fillinsn \n"
+ "4: \n\t"
+ "st %4, @%3 \n\t"
+ ";; UNLOCK ipi_lock[i] \n\t"
+ ".fillinsn \n"
+ "5: \n\t"
+ "ldi r4, #1 \n\t"
+ "st r4, @%2 \n\t"
+ : "=&r"(ipicr_val)
+ : "r"(flags), "r"(&ipilock->slock), "r"(ipicr_addr),
+ "r"(mask), "r"(try), "r"(my_physid_mask)
+ : "memory", "r4"
+#ifdef CONFIG_CHIP_M32700_TS1
+ , "r5"
+#endif /* CONFIG_CHIP_M32700_TS1 */
+ );
+
+ return ipicr_val;
+}
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
new file mode 100644
index 00000000000..f9a0e723478
--- /dev/null
+++ b/arch/m32r/kernel/smpboot.c
@@ -0,0 +1,630 @@
+/*
+ * linux/arch/m32r/kernel/smpboot.c
+ * orig : i386 2.4.10
+ *
+ * M32R SMP booting functions
+ *
+ * Copyright (c) 2001, 2002, 2003 Hitoshi Yamamoto
+ *
+ * Taken from i386 version.
+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ *
+ * Much of the core SMP work is based on previous work by Thomas Radke, to
+ * whom a great many thanks are extended.
+ *
+ * Thanks to Intel for making available several different Pentium,
+ * Pentium Pro and Pentium-II/Xeon MP machines.
+ * Original development of Linux SMP code supported by Caldera.
+ *
+ * This code is released under the GNU General Public License version 2 or
+ * later.
+ *
+ * Fixes
+ * Felix Koop : NR_CPUS used properly
+ * Jose Renau : Handle single CPU case.
+ * Alan Cox : By repeated request
+ * 8) - Total BogoMIP report.
+ * Greg Wright : Fix for kernel stacks panic.
+ * Erich Boleyn : MP v1.4 and additional changes.
+ * Matthias Sattler : Changes for 2.1 kernel map.
+ * Michel Lespinasse : Changes for 2.1 kernel map.
+ * Michael Chastain : Change trampoline.S to gnu as.
+ * Alan Cox : Dumb bug: 'B' step PPro's are fine
+ * Ingo Molnar : Added APIC timers, based on code
+ * from Jose Renau
+ * Ingo Molnar : various cleanups and rewrites
+ * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
+ * Maciej W. Rozycki : Bits for genuine 82489DX APICs
+ * Martin J. Bligh : Added support for multi-quad systems
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/smp_lock.h>
+#include <linux/irq.h>
+#include <linux/bootmem.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+#define DEBUG_SMP
+#ifdef DEBUG_SMP
+#define Dprintk(x...) printk(x)
+#else
+#define Dprintk(x...)
+#endif
+
+extern cpumask_t cpu_initialized;
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Data structures and variables */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+/* Processor that is doing the boot up */
+static unsigned int bsp_phys_id = -1;
+
+/* Bitmask of physically existing CPUs */
+physid_mask_t phys_cpu_present_map;
+
+/* Bitmask of currently online CPUs */
+cpumask_t cpu_online_map;
+
+cpumask_t cpu_bootout_map;
+cpumask_t cpu_bootin_map;
+cpumask_t cpu_callout_map;
+static cpumask_t cpu_callin_map;
+
+/* Per CPU bogomips and other parameters */
+struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned;
+
+static int cpucount;
+static cpumask_t smp_commenced_mask;
+
+extern struct {
+ void * spi;
+ unsigned short ss;
+} stack_start;
+
+/* which physical physical ID maps to which logical CPU number */
+static volatile int physid_2_cpu[NR_CPUS];
+
+/* which logical CPU number maps to which physical ID */
+volatile int cpu_2_physid[NR_CPUS];
+
+DEFINE_PER_CPU(int, prof_multiplier) = 1;
+DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
+DEFINE_PER_CPU(int, prof_counter) = 1;
+
+spinlock_t ipi_lock[NR_IPIS];
+
+static unsigned int calibration_result;
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Function Prototypes */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+void smp_prepare_boot_cpu(void);
+void smp_prepare_cpus(unsigned int);
+static void smp_tune_scheduling(void);
+static void init_ipi_lock(void);
+static void do_boot_cpu(int);
+int __cpu_up(unsigned int);
+void smp_cpus_done(unsigned int);
+
+int start_secondary(void *);
+static void smp_callin(void);
+static void smp_online(void);
+
+static void show_mp_info(int);
+static void smp_store_cpu_info(int);
+static void show_cpu_info(int);
+int setup_profiling_timer(unsigned int);
+static void init_cpu_to_physid(void);
+static void map_cpu_to_physid(int, int);
+static void unmap_cpu_to_physid(int, int);
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Boot up APs Routins : BSP */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+void __devinit smp_prepare_boot_cpu(void)
+{
+ bsp_phys_id = hard_smp_processor_id();
+ physid_set(bsp_phys_id, phys_cpu_present_map);
+ cpu_set(0, cpu_online_map); /* BSP's cpu_id == 0 */
+ cpu_set(0, cpu_callout_map);
+ cpu_set(0, cpu_callin_map);
+
+ /*
+ * Initialize the logical to physical CPU number mapping
+ */
+ init_cpu_to_physid();
+ map_cpu_to_physid(0, bsp_phys_id);
+ current_thread_info()->cpu = 0;
+}
+
+/*==========================================================================*
+ * Name: smp_prepare_cpus (old smp_boot_cpus)
+ *
+ * Description: This routine boot up APs.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: NONE
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ * 2003-06-24 hy modify for linux-2.5.69
+ *
+ *==========================================================================*/
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ int phys_id;
+ unsigned long nr_cpu;
+
+ nr_cpu = inl(M32R_FPGA_NUM_OF_CPUS_PORTL);
+ if (nr_cpu > NR_CPUS) {
+ printk(KERN_INFO "NUM_OF_CPUS reg. value [%ld] > NR_CPU [%d]",
+ nr_cpu, NR_CPUS);
+ goto smp_done;
+ }
+ for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
+ physid_set(phys_id, phys_cpu_present_map);
+
+ show_mp_info(nr_cpu);
+
+ init_ipi_lock();
+
+ /*
+ * Setup boot CPU information
+ */
+ smp_store_cpu_info(0); /* Final full version of the data */
+ smp_tune_scheduling();
+
+ /*
+ * If SMP should be disabled, then really disable it!
+ */
+ if (!max_cpus) {
+ printk(KERN_INFO "SMP mode deactivated by commandline.\n");
+ goto smp_done;
+ }
+
+ /*
+ * Now scan the CPU present map and fire up the other CPUs.
+ */
+ Dprintk("CPU present map : %lx\n", physids_coerce(phys_cpu_present_map));
+
+ for (phys_id = 0 ; phys_id < NR_CPUS ; phys_id++) {
+ /*
+ * Don't even attempt to start the boot CPU!
+ */
+ if (phys_id == bsp_phys_id)
+ continue;
+
+ if (!physid_isset(phys_id, phys_cpu_present_map))
+ continue;
+
+ if ((max_cpus >= 0) && (max_cpus <= cpucount + 1))
+ continue;
+
+ do_boot_cpu(phys_id);
+
+ /*
+ * Make sure we unmap all failed CPUs
+ */
+ if (physid_to_cpu(phys_id) == -1) {
+ physid_clear(phys_id, phys_cpu_present_map);
+ printk("phys CPU#%d not responding - " \
+ "cannot use it.\n", phys_id);
+ }
+ }
+
+smp_done:
+ Dprintk("Boot done.\n");
+}
+
+static void __init smp_tune_scheduling(void)
+{
+ /* Nothing to do. */
+}
+
+/*
+ * init_ipi_lock : Initialize IPI locks.
+ */
+static void __init init_ipi_lock(void)
+{
+ int ipi;
+
+ for (ipi = 0 ; ipi < NR_IPIS ; ipi++)
+ spin_lock_init(&ipi_lock[ipi]);
+}
+
+/*==========================================================================*
+ * Name: do_boot_cpu
+ *
+ * Description: This routine boot up one AP.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: phys_id - Target CPU physical ID
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ * 2003-06-24 hy modify for linux-2.5.69
+ *
+ *==========================================================================*/
+static void __init do_boot_cpu(int phys_id)
+{
+ struct task_struct *idle;
+ unsigned long send_status, boot_status;
+ int timeout, cpu_id;
+
+ cpu_id = ++cpucount;
+
+ /*
+ * We can't use kernel_thread since we must avoid to
+ * reschedule the child.
+ */
+ idle = fork_idle(cpu_id);
+ if (IS_ERR(idle))
+ panic("failed fork for CPU#%d.", cpu_id);
+
+ idle->thread.lr = (unsigned long)start_secondary;
+
+ map_cpu_to_physid(cpu_id, phys_id);
+
+ /* So we see what's up */
+ printk("Booting processor %d/%d\n", phys_id, cpu_id);
+ stack_start.spi = (void *)idle->thread.sp;
+ idle->thread_info->cpu = cpu_id;
+
+ /*
+ * Send Startup IPI
+ * 1.IPI received by CPU#(phys_id).
+ * 2.CPU#(phys_id) enter startup_AP (arch/m32r/kernel/head.S)
+ * 3.CPU#(phys_id) enter start_secondary()
+ */
+ send_status = 0;
+ boot_status = 0;
+
+ cpu_set(phys_id, cpu_bootout_map);
+
+ /* Send Startup IPI */
+ send_IPI_mask_phys(cpumask_of_cpu(phys_id), CPU_BOOT_IPI, 0);
+
+ Dprintk("Waiting for send to finish...\n");
+ timeout = 0;
+
+ /* Wait 100[ms] */
+ do {
+ Dprintk("+");
+ udelay(1000);
+ send_status = !cpu_isset(phys_id, cpu_bootin_map);
+ } while (send_status && (timeout++ < 100));
+
+ Dprintk("After Startup.\n");
+
+ if (!send_status) {
+ /*
+ * allow APs to start initializing.
+ */
+ Dprintk("Before Callout %d.\n", cpu_id);
+ cpu_set(cpu_id, cpu_callout_map);
+ Dprintk("After Callout %d.\n", cpu_id);
+
+ /*
+ * Wait 5s total for a response
+ */
+ for (timeout = 0; timeout < 5000; timeout++) {
+ if (cpu_isset(cpu_id, cpu_callin_map))
+ break; /* It has booted */
+ udelay(1000);
+ }
+
+ if (cpu_isset(cpu_id, cpu_callin_map)) {
+ /* number CPUs logically, starting from 1 (BSP is 0) */
+ Dprintk("OK.\n");
+ } else {
+ boot_status = 1;
+ printk("Not responding.\n");
+ }
+ } else
+ printk("IPI never delivered???\n");
+
+ if (send_status || boot_status) {
+ unmap_cpu_to_physid(cpu_id, phys_id);
+ cpu_clear(cpu_id, cpu_callout_map);
+ cpu_clear(cpu_id, cpu_callin_map);
+ cpu_clear(cpu_id, cpu_initialized);
+ cpucount--;
+ }
+}
+
+int __devinit __cpu_up(unsigned int cpu_id)
+{
+ int timeout;
+
+ cpu_set(cpu_id, smp_commenced_mask);
+
+ /*
+ * Wait 5s total for a response
+ */
+ for (timeout = 0; timeout < 5000; timeout++) {
+ if (cpu_isset(cpu_id, cpu_online_map))
+ break;
+ udelay(1000);
+ }
+ if (!cpu_isset(cpu_id, cpu_online_map))
+ BUG();
+
+ return 0;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+ int cpu_id, timeout;
+ unsigned long bogosum = 0;
+
+ for (timeout = 0; timeout < 5000; timeout++) {
+ if (cpus_equal(cpu_callin_map, cpu_online_map))
+ break;
+ udelay(1000);
+ }
+ if (!cpus_equal(cpu_callin_map, cpu_online_map))
+ BUG();
+
+ for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++)
+ show_cpu_info(cpu_id);
+
+ /*
+ * Allow the user to impress friends.
+ */
+ Dprintk("Before bogomips.\n");
+ if (cpucount) {
+ for_each_cpu_mask(cpu_id, cpu_online_map)
+ bogosum += cpu_data[cpu_id].loops_per_jiffy;
+
+ printk(KERN_INFO "Total of %d processors activated " \
+ "(%lu.%02lu BogoMIPS).\n", cpucount + 1,
+ bogosum / (500000 / HZ),
+ (bogosum / (5000 / HZ)) % 100);
+ Dprintk("Before bogocount - setting activated=1.\n");
+ }
+}
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Activate a secondary processor Routins */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+/*==========================================================================*
+ * Name: start_secondary
+ *
+ * Description: This routine activate a secondary processor.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: *unused - currently unused.
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ * 2003-06-24 hy modify for linux-2.5.69
+ *
+ *==========================================================================*/
+int __init start_secondary(void *unused)
+{
+ cpu_init();
+ smp_callin();
+ while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
+ cpu_relax();
+
+ smp_online();
+
+ /*
+ * low-memory mappings have been cleared, flush them from
+ * the local TLBs too.
+ */
+ local_flush_tlb_all();
+
+ cpu_idle();
+ return 0;
+}
+
+/*==========================================================================*
+ * Name: smp_callin
+ *
+ * Description: This routine activate a secondary processor.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments: NONE
+ *
+ * Returns: void (cannot fail)
+ *
+ * Modification log:
+ * Date Who Description
+ * ---------- --- --------------------------------------------------------
+ * 2003-06-24 hy modify for linux-2.5.69
+ *
+ *==========================================================================*/
+static void __init smp_callin(void)
+{
+ int phys_id = hard_smp_processor_id();
+ int cpu_id = smp_processor_id();
+ unsigned long timeout;
+
+ if (cpu_isset(cpu_id, cpu_callin_map)) {
+ printk("huh, phys CPU#%d, CPU#%d already present??\n",
+ phys_id, cpu_id);
+ BUG();
+ }
+ Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpu_id, phys_id);
+
+ /* Waiting 2s total for startup (udelay is not yet working) */
+ timeout = jiffies + (2 * HZ);
+ while (time_before(jiffies, timeout)) {
+ /* Has the boot CPU finished it's STARTUP sequence ? */
+ if (cpu_isset(cpu_id, cpu_callout_map))
+ break;
+ cpu_relax();
+ }
+
+ if (!time_before(jiffies, timeout)) {
+ printk("BUG: CPU#%d started up but did not get a callout!\n",
+ cpu_id);
+ BUG();
+ }
+
+ /* Allow the master to continue. */
+ cpu_set(cpu_id, cpu_callin_map);
+}
+
+static void __init smp_online(void)
+{
+ int cpu_id = smp_processor_id();
+
+ local_irq_enable();
+
+ /* Get our bogomips. */
+ calibrate_delay();
+
+ /* Save our processor parameters */
+ smp_store_cpu_info(cpu_id);
+
+ cpu_set(cpu_id, cpu_online_map);
+}
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Boot up CPUs common Routins */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+static void __init show_mp_info(int nr_cpu)
+{
+ int i;
+ char cpu_model0[17], cpu_model1[17], cpu_ver[9];
+
+ strncpy(cpu_model0, (char *)M32R_FPGA_CPU_NAME_ADDR, 16);
+ strncpy(cpu_model1, (char *)M32R_FPGA_MODEL_ID_ADDR, 16);
+ strncpy(cpu_ver, (char *)M32R_FPGA_VERSION_ADDR, 8);
+
+ cpu_model0[16] = '\0';
+ for (i = 15 ; i >= 0 ; i--) {
+ if (cpu_model0[i] != ' ')
+ break;
+ cpu_model0[i] = '\0';
+ }
+ cpu_model1[16] = '\0';
+ for (i = 15 ; i >= 0 ; i--) {
+ if (cpu_model1[i] != ' ')
+ break;
+ cpu_model1[i] = '\0';
+ }
+ cpu_ver[8] = '\0';
+ for (i = 7 ; i >= 0 ; i--) {
+ if (cpu_ver[i] != ' ')
+ break;
+ cpu_ver[i] = '\0';
+ }
+
+ printk(KERN_INFO "M32R-mp information\n");
+ printk(KERN_INFO " On-chip CPUs : %d\n", nr_cpu);
+ printk(KERN_INFO " CPU model : %s/%s(%s)\n", cpu_model0,
+ cpu_model1, cpu_ver);
+}
+
+/*
+ * The bootstrap kernel entry code has set these up. Save them for
+ * a given CPU
+ */
+static void __init smp_store_cpu_info(int cpu_id)
+{
+ struct cpuinfo_m32r *ci = cpu_data + cpu_id;
+
+ *ci = boot_cpu_data;
+ ci->loops_per_jiffy = loops_per_jiffy;
+}
+
+static void __init show_cpu_info(int cpu_id)
+{
+ struct cpuinfo_m32r *ci = &cpu_data[cpu_id];
+
+ printk("CPU#%d : ", cpu_id);
+
+#define PRINT_CLOCK(name, value) \
+ printk(name " clock %d.%02dMHz", \
+ ((value) / 1000000), ((value) % 1000000) / 10000)
+
+ PRINT_CLOCK("CPU", (int)ci->cpu_clock);
+ PRINT_CLOCK(", Bus", (int)ci->bus_clock);
+ printk(", loops_per_jiffy[%ld]\n", ci->loops_per_jiffy);
+}
+
+/*
+ * the frequency of the profiling timer can be changed
+ * by writing a multiplier value into /proc/profile.
+ */
+int setup_profiling_timer(unsigned int multiplier)
+{
+ int i;
+
+ /*
+ * Sanity check. [at least 500 APIC cycles should be
+ * between APIC interrupts as a rule of thumb, to avoid
+ * irqs flooding us]
+ */
+ if ( (!multiplier) || (calibration_result / multiplier < 500))
+ return -EINVAL;
+
+ /*
+ * Set the new multiplier for each CPU. CPUs don't start using the
+ * new values until the next timer interrupt in which they do process
+ * accounting. At that time they also adjust their APIC timers
+ * accordingly.
+ */
+ for (i = 0; i < NR_CPUS; ++i)
+ per_cpu(prof_multiplier, i) = multiplier;
+
+ return 0;
+}
+
+/* Initialize all maps between cpu number and apicids */
+static void __init init_cpu_to_physid(void)
+{
+ int i;
+
+ for (i = 0 ; i < NR_CPUS ; i++) {
+ cpu_2_physid[i] = -1;
+ physid_2_cpu[i] = -1;
+ }
+}
+
+/*
+ * set up a mapping between cpu and apicid. Uses logical apicids for multiquad,
+ * else physical apic ids
+ */
+static void __init map_cpu_to_physid(int cpu_id, int phys_id)
+{
+ physid_2_cpu[phys_id] = cpu_id;
+ cpu_2_physid[cpu_id] = phys_id;
+}
+
+/*
+ * undo a mapping between cpu and apicid. Uses logical apicids for multiquad,
+ * else physical apic ids
+ */
+static void __init unmap_cpu_to_physid(int cpu_id, int phys_id)
+{
+ physid_2_cpu[phys_id] = -1;
+ cpu_2_physid[cpu_id] = -1;
+}
+
diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c
new file mode 100644
index 00000000000..e0500e12c5f
--- /dev/null
+++ b/arch/m32r/kernel/sys_m32r.c
@@ -0,0 +1,217 @@
+/*
+ * linux/arch/m32r/kernel/sys_m32r.c
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/M32R platform.
+ *
+ * Taken from i386 version.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+
+#include <asm/uaccess.h>
+#include <asm/cachectl.h>
+#include <asm/cacheflush.h>
+#include <asm/ipc.h>
+
+/*
+ * sys_tas() - test-and-set
+ * linuxthreads testing version
+ */
+#ifndef CONFIG_SMP
+asmlinkage int sys_tas(int *addr)
+{
+ int oldval;
+ unsigned long flags;
+
+ if (!access_ok(VERIFY_WRITE, addr, sizeof (int)))
+ return -EFAULT;
+ local_irq_save(flags);
+ oldval = *addr;
+ *addr = 1;
+ local_irq_restore(flags);
+ return oldval;
+}
+#else /* CONFIG_SMP */
+#include <linux/spinlock.h>
+
+static DEFINE_SPINLOCK(tas_lock);
+
+asmlinkage int sys_tas(int *addr)
+{
+ int oldval;
+
+ if (!access_ok(VERIFY_WRITE, addr, sizeof (int)))
+ return -EFAULT;
+
+ _raw_spin_lock(&tas_lock);
+ oldval = *addr;
+ *addr = 1;
+ _raw_spin_unlock(&tas_lock);
+
+ return oldval;
+}
+#endif /* CONFIG_SMP */
+
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way Unix traditionally does this, though.
+ */
+asmlinkage int
+sys_pipe(unsigned long r0, unsigned long r1, unsigned long r2,
+ unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, struct pt_regs regs)
+{
+ int fd[2];
+ int error;
+
+ error = do_pipe(fd);
+ if (!error) {
+ if (copy_to_user((void *)r0, (void *)fd, 2*sizeof(int)))
+ error = -EFAULT;
+ }
+ return error;
+}
+
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ int error = -EBADF;
+ struct file *file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(&current->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(&current->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+asmlinkage int sys_ipc(uint call, int first, int second,
+ int third, void __user *ptr, long fifth)
+{
+ int version, ret;
+
+ version = call >> 16; /* hack for backward compatibility */
+ call &= 0xffff;
+
+ switch (call) {
+ case SEMOP:
+ return sys_semtimedop(first, (struct sembuf __user *)ptr,
+ second, NULL);
+ case SEMTIMEDOP:
+ return sys_semtimedop(first, (struct sembuf __user *)ptr,
+ second, (const struct timespec __user *)fifth);
+ case SEMGET:
+ return sys_semget (first, second, third);
+ case SEMCTL: {
+ union semun fourth;
+ if (!ptr)
+ return -EINVAL;
+ if (get_user(fourth.__pad, (void __user * __user *) ptr))
+ return -EFAULT;
+ return sys_semctl (first, second, third, fourth);
+ }
+
+ case MSGSND:
+ return sys_msgsnd (first, (struct msgbuf __user *) ptr,
+ second, third);
+ case MSGRCV:
+ switch (version) {
+ case 0: {
+ struct ipc_kludge tmp;
+ if (!ptr)
+ return -EINVAL;
+
+ if (copy_from_user(&tmp,
+ (struct ipc_kludge __user *) ptr,
+ sizeof (tmp)))
+ return -EFAULT;
+ return sys_msgrcv (first, tmp.msgp, second,
+ tmp.msgtyp, third);
+ }
+ default:
+ return sys_msgrcv (first,
+ (struct msgbuf __user *) ptr,
+ second, fifth, third);
+ }
+ case MSGGET:
+ return sys_msgget ((key_t) first, second);
+ case MSGCTL:
+ return sys_msgctl (first, second,
+ (struct msqid_ds __user *) ptr);
+ case SHMAT: {
+ ulong raddr;
+
+ if (!access_ok(VERIFY_WRITE, (ulong __user *) third,
+ sizeof(ulong)))
+ return -EFAULT;
+ ret = do_shmat (first, (char __user *) ptr, second, &raddr);
+ if (ret)
+ return ret;
+ return put_user (raddr, (ulong __user *) third);
+ }
+ case SHMDT:
+ return sys_shmdt ((char __user *)ptr);
+ case SHMGET:
+ return sys_shmget (first, second, third);
+ case SHMCTL:
+ return sys_shmctl (first, second,
+ (struct shmid_ds __user *) ptr);
+ default:
+ return -ENOSYS;
+ }
+}
+
+asmlinkage int sys_uname(struct old_utsname * name)
+{
+ int err;
+ if (!name)
+ return -EFAULT;
+ down_read(&uts_sem);
+ err=copy_to_user(name, &system_utsname, sizeof (*name));
+ up_read(&uts_sem);
+ return err?-EFAULT:0;
+}
+
+asmlinkage int sys_cacheflush(void *addr, int bytes, int cache)
+{
+ /* This should flush more selectivly ... */
+ _flush_cache_all();
+ return 0;
+}
+
+asmlinkage int sys_cachectl(char *addr, int nbytes, int op)
+{
+ /* Not implemented yet. */
+ return -ENOSYS;
+}
+
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
new file mode 100644
index 00000000000..3c4707280a5
--- /dev/null
+++ b/arch/m32r/kernel/time.c
@@ -0,0 +1,318 @@
+/*
+ * linux/arch/m32r/kernel/time.c
+ *
+ * Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto
+ * Taken from i386 version.
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ * Copyright (C) 1996, 1997, 1998 Ralf Baechle
+ *
+ * This file contains the time handling details for PC-style clocks as
+ * found in some MIPS systems.
+ *
+ * Some code taken from sh version.
+ * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
+ * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
+ */
+
+#undef DEBUG_TIMER
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/profile.h>
+
+#include <asm/io.h>
+#include <asm/m32r.h>
+
+#include <asm/hw_irq.h>
+
+#ifdef CONFIG_SMP
+extern void send_IPI_allbutself(int, int);
+extern void smp_local_timer_interrupt(struct pt_regs *);
+#endif
+
+u64 jiffies_64 = INITIAL_JIFFIES;
+
+EXPORT_SYMBOL(jiffies_64);
+
+extern unsigned long wall_jiffies;
+#define TICK_SIZE (tick_nsec / 1000)
+
+/*
+ * Change this if you have some constant time drift
+ */
+
+/* This is for machines which generate the exact clock. */
+#define USECS_PER_JIFFY (1000000/HZ)
+
+static unsigned long latch;
+
+static unsigned long do_gettimeoffset(void)
+{
+ unsigned long elapsed_time = 0; /* [us] */
+
+#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \
+ || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \
+ || defined(CONFIG_CHIP_OPSP)
+#ifndef CONFIG_SMP
+
+ unsigned long count;
+
+ /* timer count may underflow right here */
+ count = inl(M32R_MFT2CUT_PORTL);
+
+ if (inl(M32R_ICU_CR18_PORTL) & 0x00000100) /* underflow check */
+ count = 0;
+
+ count = (latch - count) * TICK_SIZE;
+ elapsed_time = (count + latch / 2) / latch;
+ /* NOTE: LATCH is equal to the "interval" value (= reload count). */
+
+#else /* CONFIG_SMP */
+ unsigned long count;
+ static unsigned long p_jiffies = -1;
+ static unsigned long p_count = 0;
+
+ /* timer count may underflow right here */
+ count = inl(M32R_MFT2CUT_PORTL);
+
+ if (jiffies == p_jiffies && count > p_count)
+ count = 0;
+
+ p_jiffies = jiffies;
+ p_count = count;
+
+ count = (latch - count) * TICK_SIZE;
+ elapsed_time = (count + latch / 2) / latch;
+ /* NOTE: LATCH is equal to the "interval" value (= reload count). */
+#endif /* CONFIG_SMP */
+#elif defined(CONFIG_CHIP_M32310)
+#warning do_gettimeoffse not implemented
+#else
+#error no chip configuration
+#endif
+
+ return elapsed_time;
+}
+
+/*
+ * This version of gettimeofday has near microsecond resolution.
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long seq;
+ unsigned long usec, sec;
+ unsigned long max_ntp_tick = tick_usec - tickadj;
+
+ do {
+ unsigned long lost;
+
+ seq = read_seqbegin(&xtime_lock);
+
+ usec = do_gettimeoffset();
+ lost = jiffies - wall_jiffies;
+
+ /*
+ * If time_adjust is negative then NTP is slowing the clock
+ * so make sure not to go into next possible interval.
+ * Better to lose some accuracy than have time go backwards..
+ */
+ if (unlikely(time_adjust < 0)) {
+ usec = min(usec, max_ntp_tick);
+ if (lost)
+ usec += lost * max_ntp_tick;
+ } else if (unlikely(lost))
+ usec += lost * tick_usec;
+
+ sec = xtime.tv_sec;
+ usec += (xtime.tv_nsec / 1000);
+ } while (read_seqretry(&xtime_lock, seq));
+
+ while (usec >= 1000000) {
+ usec -= 1000000;
+ sec++;
+ }
+
+ tv->tv_sec = sec;
+ tv->tv_usec = usec;
+}
+
+EXPORT_SYMBOL(do_gettimeofday);
+
+int do_settimeofday(struct timespec *tv)
+{
+ time_t wtm_sec, sec = tv->tv_sec;
+ long wtm_nsec, nsec = tv->tv_nsec;
+
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ write_seqlock_irq(&xtime_lock);
+ /*
+ * This is revolting. We need to set "xtime" correctly. However, the
+ * value in this location is the value at the most recent update of
+ * wall time. Discover what correction gettimeofday() would have
+ * made, and then undo it!
+ */
+ nsec -= do_gettimeoffset() * NSEC_PER_USEC;
+ nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
+
+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+ set_normalized_timespec(&xtime, sec, nsec);
+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
+ write_sequnlock_irq(&xtime_lock);
+ clock_was_set();
+
+ return 0;
+}
+
+EXPORT_SYMBOL(do_settimeofday);
+
+/*
+ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
+ * called 500 ms after the second nowtime has started, because when
+ * nowtime is written into the registers of the CMOS clock, it will
+ * jump to the next second precisely 500 ms later. Check the Motorola
+ * MC146818A or Dallas DS12887 data sheet for details.
+ *
+ * BUG: This routine does not handle hour overflow properly; it just
+ * sets the minutes. Usually you won't notice until after reboot!
+ */
+static inline int set_rtc_mmss(unsigned long nowtime)
+{
+ return 0;
+}
+
+/* last time the cmos clock got updated */
+static long last_rtc_update = 0;
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+static inline void
+do_timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+#ifndef CONFIG_SMP
+ profile_tick(CPU_PROFILING, regs);
+#endif
+ do_timer(regs);
+
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
+ /*
+ * If we have an externally synchronized Linux clock, then update
+ * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+ * called as close as possible to 500 ms before the new second starts.
+ */
+ if ((time_status & STA_UNSYNC) == 0
+ && xtime.tv_sec > last_rtc_update + 660
+ && (xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2
+ && (xtime.tv_nsec / 1000) <= 500000 + ((unsigned)TICK_SIZE) / 2)
+ {
+ if (set_rtc_mmss(xtime.tv_sec) == 0)
+ last_rtc_update = xtime.tv_sec;
+ else /* do it again in 60 s */
+ last_rtc_update = xtime.tv_sec - 600;
+ }
+ /* As we return to user mode fire off the other CPU schedulers..
+ this is basically because we don't yet share IRQ's around.
+ This message is rigged to be safe on the 386 - basically it's
+ a hack, so don't look closely for now.. */
+
+#ifdef CONFIG_SMP
+ smp_local_timer_interrupt(regs);
+#endif
+}
+
+irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ write_seqlock(&xtime_lock);
+ do_timer_interrupt(irq, NULL, regs);
+ write_sequnlock(&xtime_lock);
+
+ return IRQ_HANDLED;
+}
+
+struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE,
+ "MFT2", NULL, NULL };
+
+void __init time_init(void)
+{
+ unsigned int epoch, year, mon, day, hour, min, sec;
+
+ sec = min = hour = day = mon = year = 0;
+ epoch = 0;
+
+ year = 23;
+ mon = 4;
+ day = 17;
+
+ /* Attempt to guess the epoch. This is the same heuristic as in rtc.c
+ so no stupid things will happen to timekeeping. Who knows, maybe
+ Ultrix also uses 1952 as epoch ... */
+ if (year > 10 && year < 44)
+ epoch = 1980;
+ else if (year < 96)
+ epoch = 1952;
+ year += epoch;
+
+ xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
+ xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
+
+#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \
+ || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \
+ || defined(CONFIG_CHIP_OPSP)
+
+ /* M32102 MFT setup */
+ setup_irq(M32R_IRQ_MFT2, &irq0);
+ {
+ unsigned long bus_clock;
+ unsigned short divide;
+
+ bus_clock = boot_cpu_data.bus_clock;
+ divide = boot_cpu_data.timer_divide;
+ latch = (bus_clock/divide + HZ / 2) / HZ;
+
+ printk("Timer start : latch = %ld\n", latch);
+
+ outl((M32R_MFTMOD_CC_MASK | M32R_MFTMOD_TCCR \
+ |M32R_MFTMOD_CSSEL011), M32R_MFT2MOD_PORTL);
+ outl(latch, M32R_MFT2RLD_PORTL);
+ outl(latch, M32R_MFT2CUT_PORTL);
+ outl(0, M32R_MFT2CMPRLD_PORTL);
+ outl((M32R_MFTCR_MFT2MSK|M32R_MFTCR_MFT2EN), M32R_MFTCR_PORTL);
+ }
+
+#elif defined(CONFIG_CHIP_M32310)
+#warning time_init not implemented
+#else
+#error no chip configuration
+#endif
+}
+
+/*
+ * Scheduler clock - returns current time in nanosec units.
+ */
+unsigned long long sched_clock(void)
+{
+ return (unsigned long long)jiffies * (1000000000 / HZ);
+}
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
new file mode 100644
index 00000000000..01922271d17
--- /dev/null
+++ b/arch/m32r/kernel/traps.c
@@ -0,0 +1,332 @@
+/*
+ * linux/arch/m32r/kernel/traps.c
+ *
+ * Copyright (C) 2001, 2002 Hirokazu Takata, Hiroyuki Kondo,
+ * Hitoshi Yamamoto
+ */
+
+/* $Id$ */
+
+/*
+ * 'traps.c' handles hardware traps and faults after we have saved some
+ * state in 'entry.S'.
+ */
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kallsyms.h>
+#include <linux/stddef.h>
+#include <linux/ptrace.h>
+#include <linux/mm.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+
+#include <asm/smp.h>
+
+#include <linux/module.h>
+
+asmlinkage void alignment_check(void);
+asmlinkage void ei_handler(void);
+asmlinkage void rie_handler(void);
+asmlinkage void debug_trap(void);
+asmlinkage void cache_flushing_handler(void);
+
+#ifdef CONFIG_SMP
+extern void smp_reschedule_interrupt(void);
+extern void smp_invalidate_interrupt(void);
+extern void smp_call_function_interrupt(void);
+extern void smp_ipi_timer_interrupt(void);
+extern void smp_flush_cache_all_interrupt(void);
+
+/*
+ * for Boot AP function
+ */
+asm (
+ " .section .eit_vector4,\"ax\" \n"
+ " .global _AP_RE \n"
+ " .global startup_AP \n"
+ "_AP_RE: \n"
+ " .fill 32, 4, 0 \n"
+ "_AP_EI: bra startup_AP \n"
+ " .previous \n"
+);
+#endif /* CONFIG_SMP */
+
+extern unsigned long eit_vector[];
+#define BRA_INSN(func, entry) \
+ ((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \
+ + 0xff000000UL
+
+void set_eit_vector_entries(void)
+{
+ extern void default_eit_handler(void);
+ extern void system_call(void);
+ extern void pie_handler(void);
+ extern void ace_handler(void);
+ extern void tme_handler(void);
+ extern void _flush_cache_copyback_all(void);
+
+ eit_vector[0] = 0xd0c00001; /* seth r0, 0x01 */
+ eit_vector[1] = BRA_INSN(default_eit_handler, 1);
+ eit_vector[4] = 0xd0c00010; /* seth r0, 0x10 */
+ eit_vector[5] = BRA_INSN(default_eit_handler, 5);
+ eit_vector[8] = BRA_INSN(rie_handler, 8);
+ eit_vector[12] = BRA_INSN(alignment_check, 12);
+ eit_vector[16] = 0xff000000UL;
+ eit_vector[17] = BRA_INSN(debug_trap, 17);
+ eit_vector[18] = BRA_INSN(system_call, 18);
+ eit_vector[19] = 0xff000000UL;
+ eit_vector[20] = 0xff000000UL;
+ eit_vector[21] = 0xff000000UL;
+ eit_vector[22] = 0xff000000UL;
+ eit_vector[23] = 0xff000000UL;
+ eit_vector[24] = 0xff000000UL;
+ eit_vector[25] = 0xff000000UL;
+ eit_vector[26] = 0xff000000UL;
+ eit_vector[27] = 0xff000000UL;
+ eit_vector[28] = BRA_INSN(cache_flushing_handler, 28);
+ eit_vector[29] = 0xff000000UL;
+ eit_vector[30] = 0xff000000UL;
+ eit_vector[31] = 0xff000000UL;
+ eit_vector[32] = BRA_INSN(ei_handler, 32);
+ eit_vector[64] = BRA_INSN(pie_handler, 64);
+#ifdef CONFIG_MMU
+ eit_vector[68] = BRA_INSN(ace_handler, 68);
+ eit_vector[72] = BRA_INSN(tme_handler, 72);
+#endif /* CONFIG_MMU */
+#ifdef CONFIG_SMP
+ eit_vector[184] = (unsigned long)smp_reschedule_interrupt;
+ eit_vector[185] = (unsigned long)smp_invalidate_interrupt;
+ eit_vector[186] = (unsigned long)smp_call_function_interrupt;
+ eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
+ eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
+ eit_vector[189] = 0;
+ eit_vector[190] = 0;
+ eit_vector[191] = 0;
+#endif
+ _flush_cache_copyback_all();
+}
+
+void __init trap_init(void)
+{
+ set_eit_vector_entries();
+
+ /*
+ * Should be a barrier for any external CPU state.
+ */
+ cpu_init();
+}
+
+int kstack_depth_to_print = 24;
+
+void show_trace(struct task_struct *task, unsigned long *stack)
+{
+ unsigned long addr;
+
+ if (!stack)
+ stack = (unsigned long*)&stack;
+
+ printk("Call Trace: ");
+ while (!kstack_end(stack)) {
+ addr = *stack++;
+ if (__kernel_text_address(addr)) {
+ printk("[<%08lx>] ", addr);
+ print_symbol("%s\n", addr);
+ }
+ }
+ printk("\n");
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+ unsigned long *stack;
+ int i;
+
+ /*
+ * debugging aid: "show_stack(NULL);" prints the
+ * back trace for this cpu.
+ */
+
+ if(sp==NULL) {
+ if (task)
+ sp = (unsigned long *)task->thread.sp;
+ else
+ sp=(unsigned long*)&sp;
+ }
+
+ stack = sp;
+ for(i=0; i < kstack_depth_to_print; i++) {
+ if (kstack_end(stack))
+ break;
+ if (i && ((i % 4) == 0))
+ printk("\n ");
+ printk("%08lx ", *stack++);
+ }
+ printk("\n");
+ show_trace(task, sp);
+}
+
+void dump_stack(void)
+{
+ unsigned long stack;
+
+ show_trace(current, &stack);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+static void show_registers(struct pt_regs *regs)
+{
+ int i = 0;
+ int in_kernel = 1;
+ unsigned long sp;
+
+ printk("CPU: %d\n", smp_processor_id());
+ show_regs(regs);
+
+ sp = (unsigned long) (1+regs);
+ if (user_mode(regs)) {
+ in_kernel = 0;
+ sp = regs->spu;
+ printk("SPU: %08lx\n", sp);
+ } else {
+ printk("SPI: %08lx\n", sp);
+ }
+ printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
+ current->comm, current->pid, 0xffff & i, 4096+(unsigned long)current);
+
+ /*
+ * When in-kernel, we also print out the stack and code at the
+ * time of the fault..
+ */
+ if (in_kernel) {
+ printk("\nStack: ");
+ show_stack(current, (unsigned long*) sp);
+
+ printk("\nCode: ");
+ if (regs->bpc < PAGE_OFFSET)
+ goto bad;
+
+ for(i=0;i<20;i++) {
+ unsigned char c;
+ if (__get_user(c, &((unsigned char*)regs->bpc)[i])) {
+bad:
+ printk(" Bad PC value.");
+ break;
+ }
+ printk("%02x ", c);
+ }
+ }
+ printk("\n");
+}
+
+DEFINE_SPINLOCK(die_lock);
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+ console_verbose();
+ spin_lock_irq(&die_lock);
+ bust_spinlocks(1);
+ printk("%s: %04lx\n", str, err & 0xffff);
+ show_registers(regs);
+ bust_spinlocks(0);
+ spin_unlock_irq(&die_lock);
+ do_exit(SIGSEGV);
+}
+
+static __inline__ void die_if_kernel(const char * str,
+ struct pt_regs * regs, long err)
+{
+ if (!user_mode(regs))
+ die(str, regs, err);
+}
+
+static __inline__ void do_trap(int trapnr, int signr, const char * str,
+ struct pt_regs * regs, long error_code, siginfo_t *info)
+{
+ if (user_mode(regs)) {
+ /* trap_signal */
+ struct task_struct *tsk = current;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = trapnr;
+ if (info)
+ force_sig_info(signr, info, tsk);
+ else
+ force_sig(signr, tsk);
+ return;
+ } else {
+ /* kernel_trap */
+ if (!fixup_exception(regs))
+ die(str, regs, error_code);
+ return;
+ }
+}
+
+#define DO_ERROR(trapnr, signr, str, name) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+ do_trap(trapnr, signr, 0, regs, error_code, NULL); \
+}
+
+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+ siginfo_t info; \
+ info.si_signo = signr; \
+ info.si_errno = 0; \
+ info.si_code = sicode; \
+ info.si_addr = (void __user *)siaddr; \
+ do_trap(trapnr, signr, str, regs, error_code, &info); \
+}
+
+DO_ERROR( 1, SIGTRAP, "debug trap", debug_trap)
+DO_ERROR_INFO(0x20, SIGILL, "reserved instruction ", rie_handler, ILL_ILLOPC, regs->bpc)
+DO_ERROR_INFO(0x100, SIGILL, "privilege instruction", pie_handler, ILL_PRVOPC, regs->bpc)
+
+extern int handle_unaligned_access(unsigned long, struct pt_regs *);
+
+/* This code taken from arch/sh/kernel/traps.c */
+asmlinkage void do_alignment_check(struct pt_regs *regs, long error_code)
+{
+ mm_segment_t oldfs;
+ unsigned long insn;
+ int tmp;
+
+ oldfs = get_fs();
+
+ if (user_mode(regs)) {
+ local_irq_enable();
+ current->thread.error_code = error_code;
+ current->thread.trap_no = 0x17;
+
+ set_fs(USER_DS);
+ if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
+ set_fs(oldfs);
+ goto uspace_segv;
+ }
+ tmp = handle_unaligned_access(insn, regs);
+ set_fs(oldfs);
+
+ if (!tmp)
+ return;
+
+ uspace_segv:
+ printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
+ "access\n", current->comm);
+ force_sig(SIGSEGV, current);
+ } else {
+ set_fs(KERNEL_DS);
+ if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
+ set_fs(oldfs);
+ die("insn faulting in do_address_error", regs, 0);
+ }
+ handle_unaligned_access(insn, regs);
+ set_fs(oldfs);
+ }
+}
+
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
new file mode 100644
index 00000000000..729a2645a03
--- /dev/null
+++ b/arch/m32r/kernel/vmlinux.lds.S
@@ -0,0 +1,143 @@
+/* ld script to make M32R Linux kernel
+ */
+
+#include <linux/config.h>
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/addrspace.h>
+#include <asm/page.h>
+
+OUTPUT_ARCH(m32r)
+ENTRY(startup_32)
+#if defined(__LITTLE_ENDIAN__)
+ jiffies = jiffies_64;
+#else
+ jiffies = jiffies_64 + 4;
+#endif
+SECTIONS
+{
+ . = CONFIG_MEMORY_START + __PAGE_OFFSET;
+ eit_vector = .;
+
+ . = . + 0x1000;
+ .empty_zero_page : { *(.empty_zero_page) } = 0
+
+ /* read-only */
+ _text = .; /* Text and read-only data */
+ .boot : { *(.boot) } = 0
+ .text : {
+ *(.text)
+ SCHED_TEXT
+ LOCK_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ } = 0x9090
+#ifdef CONFIG_SMP
+ . = ALIGN(65536);
+ .eit_vector4 : { *(.eit_vector4) }
+#endif
+ _etext = .; /* End of text section */
+
+ . = ALIGN(16); /* Exception table */
+ __start___ex_table = .;
+ __ex_table : { *(__ex_table) }
+ __stop___ex_table = .;
+
+ RODATA
+
+ /* writeable */
+ .data : { /* Data */
+ *(.spu)
+ *(.spi)
+ *(.data)
+ CONSTRUCTORS
+ }
+
+ . = ALIGN(4096);
+ __nosave_begin = .;
+ .data_nosave : { *(.data.nosave) }
+ . = ALIGN(4096);
+ __nosave_end = .;
+
+ . = ALIGN(4096);
+ .data.page_aligned : { *(.data.idt) }
+
+ . = ALIGN(32);
+ .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+ _edata = .; /* End of data section */
+
+ . = ALIGN(8192); /* init_task */
+ .data.init_task : { *(.data.init_task) }
+
+ /* will be freed after init */
+ . = ALIGN(4096); /* Init code and data */
+ __init_begin = .;
+ .init.text : {
+ _sinittext = .;
+ *(.init.text)
+ _einittext = .;
+ }
+ .init.data : { *(.init.data) }
+ . = ALIGN(16);
+ __setup_start = .;
+ .init.setup : { *(.init.setup) }
+ __setup_end = .;
+ __initcall_start = .;
+ .initcall.init : {
+ *(.initcall1.init)
+ *(.initcall2.init)
+ *(.initcall3.init)
+ *(.initcall4.init)
+ *(.initcall5.init)
+ *(.initcall6.init)
+ *(.initcall7.init)
+ }
+ __initcall_end = .;
+ __con_initcall_start = .;
+ .con_initcall.init : { *(.con_initcall.init) }
+ __con_initcall_end = .;
+ SECURITY_INIT
+ . = ALIGN(4);
+ __alt_instructions = .;
+ .altinstructions : { *(.altinstructions) }
+ __alt_instructions_end = .;
+ .altinstr_replacement : { *(.altinstr_replacement) }
+ /* .exit.text is discard at runtime, not link time, to deal with references
+ from .altinstructions and .eh_frame */
+ .exit.text : { *(.exit.text) }
+ .exit.data : { *(.exit.data) }
+ . = ALIGN(4096);
+ __initramfs_start = .;
+ .init.ramfs : { *(.init.ramfs) }
+ __initramfs_end = .;
+ . = ALIGN(32);
+ __per_cpu_start = .;
+ .data.percpu : { *(.data.percpu) }
+ __per_cpu_end = .;
+ . = ALIGN(4096);
+ __init_end = .;
+ /* freed after init ends here */
+
+ __bss_start = .; /* BSS */
+ .bss : { *(.bss) }
+ . = ALIGN(4);
+ __bss_stop = .;
+
+ _end = . ;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.exit.text)
+ *(.exit.data)
+ *(.exitcall.exit)
+ }
+
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+}