diff options
Diffstat (limited to 'arch/ppc/kernel')
38 files changed, 0 insertions, 19797 deletions
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile deleted file mode 100644 index c610ca933a2..00000000000 --- a/arch/ppc/kernel/Makefile +++ /dev/null @@ -1,53 +0,0 @@ -# -# Makefile for the linux kernel. -# -ifneq ($(CONFIG_PPC_MERGE),y) - -extra-$(CONFIG_PPC_STD_MMU) := head.o -extra-$(CONFIG_40x) := head_4xx.o -extra-$(CONFIG_44x) := head_44x.o -extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o -extra-$(CONFIG_8xx) := head_8xx.o -extra-$(CONFIG_6xx) += idle_6xx.o -extra-$(CONFIG_POWER4) += idle_power4.o -extra-y += vmlinux.lds - -obj-y := entry.o traps.o irq.o idle.o time.o misc.o \ - process.o align.o \ - setup.o \ - ppc_htab.o -obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o -obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o -obj-$(CONFIG_POWER4) += cpu_setup_power4.o -obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o -obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o -obj-$(CONFIG_PCI) += pci.o -obj-$(CONFIG_KGDB) += ppc-stub.o -obj-$(CONFIG_SMP) += smp.o smp-tbsync.o -obj-$(CONFIG_TAU) += temp.o -ifndef CONFIG_E200 -obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o -endif -obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o - -ifndef CONFIG_MATH_EMULATION -obj-$(CONFIG_8xx) += softemu8xx.o -endif - -# These are here while we do the architecture merge - -else -obj-y := irq.o idle.o \ - align.o -obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o -obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o -obj-$(CONFIG_MODULES) += module.o -obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o -obj-$(CONFIG_PCI) += pci.o -obj-$(CONFIG_KGDB) += ppc-stub.o -obj-$(CONFIG_TAU) += temp.o -ifndef CONFIG_E200 -obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o -endif -obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o -endif diff --git a/arch/ppc/kernel/align.c b/arch/ppc/kernel/align.c deleted file mode 100644 index ab398c4b70b..00000000000 --- a/arch/ppc/kernel/align.c +++ /dev/null @@ -1,410 +0,0 @@ -/* - * align.c - handle alignment exceptions for the Power PC. - * - * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> - * Copyright (c) 1998-1999 TiVo, Inc. - * PowerPC 403GCX modifications. - * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> - * PowerPC 403GCX/405GP modifications. - */ -#include <linux/config.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <asm/ptrace.h> -#include <asm/processor.h> -#include <asm/uaccess.h> -#include <asm/system.h> -#include <asm/cache.h> - -struct aligninfo { - unsigned char len; - unsigned char flags; -}; - -#if defined(CONFIG_4xx) || defined(CONFIG_POWER4) || defined(CONFIG_BOOKE) -#define OPCD(inst) (((inst) & 0xFC000000) >> 26) -#define RS(inst) (((inst) & 0x03E00000) >> 21) -#define RA(inst) (((inst) & 0x001F0000) >> 16) -#define IS_XFORM(code) ((code) == 31) -#endif - -#define INVALID { 0, 0 } - -#define LD 1 /* load */ -#define ST 2 /* store */ -#define SE 4 /* sign-extend value */ -#define F 8 /* to/from fp regs */ -#define U 0x10 /* update index register */ -#define M 0x20 /* multiple load/store */ -#define S 0x40 /* single-precision fp, or byte-swap value */ -#define SX 0x40 /* byte count in XER */ -#define HARD 0x80 /* string, stwcx. */ - -#define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */ - -/* - * The PowerPC stores certain bits of the instruction that caused the - * alignment exception in the DSISR register. This array maps those - * bits to information about the operand length and what the - * instruction would do. - */ -static struct aligninfo aligninfo[128] = { - { 4, LD }, /* 00 0 0000: lwz / lwarx */ - INVALID, /* 00 0 0001 */ - { 4, ST }, /* 00 0 0010: stw */ - INVALID, /* 00 0 0011 */ - { 2, LD }, /* 00 0 0100: lhz */ - { 2, LD+SE }, /* 00 0 0101: lha */ - { 2, ST }, /* 00 0 0110: sth */ - { 4, LD+M }, /* 00 0 0111: lmw */ - { 4, LD+F+S }, /* 00 0 1000: lfs */ - { 8, LD+F }, /* 00 0 1001: lfd */ - { 4, ST+F+S }, /* 00 0 1010: stfs */ - { 8, ST+F }, /* 00 0 1011: stfd */ - INVALID, /* 00 0 1100 */ - INVALID, /* 00 0 1101: ld/ldu/lwa */ - INVALID, /* 00 0 1110 */ - INVALID, /* 00 0 1111: std/stdu */ - { 4, LD+U }, /* 00 1 0000: lwzu */ - INVALID, /* 00 1 0001 */ - { 4, ST+U }, /* 00 1 0010: stwu */ - INVALID, /* 00 1 0011 */ - { 2, LD+U }, /* 00 1 0100: lhzu */ - { 2, LD+SE+U }, /* 00 1 0101: lhau */ - { 2, ST+U }, /* 00 1 0110: sthu */ - { 4, ST+M }, /* 00 1 0111: stmw */ - { 4, LD+F+S+U }, /* 00 1 1000: lfsu */ - { 8, LD+F+U }, /* 00 1 1001: lfdu */ - { 4, ST+F+S+U }, /* 00 1 1010: stfsu */ - { 8, ST+F+U }, /* 00 1 1011: stfdu */ - INVALID, /* 00 1 1100 */ - INVALID, /* 00 1 1101 */ - INVALID, /* 00 1 1110 */ - INVALID, /* 00 1 1111 */ - INVALID, /* 01 0 0000: ldx */ - INVALID, /* 01 0 0001 */ - INVALID, /* 01 0 0010: stdx */ - INVALID, /* 01 0 0011 */ - INVALID, /* 01 0 0100 */ - INVALID, /* 01 0 0101: lwax */ - INVALID, /* 01 0 0110 */ - INVALID, /* 01 0 0111 */ - { 4, LD+M+HARD+SX }, /* 01 0 1000: lswx */ - { 4, LD+M+HARD }, /* 01 0 1001: lswi */ - { 4, ST+M+HARD+SX }, /* 01 0 1010: stswx */ - { 4, ST+M+HARD }, /* 01 0 1011: stswi */ - INVALID, /* 01 0 1100 */ - INVALID, /* 01 0 1101 */ - INVALID, /* 01 0 1110 */ - INVALID, /* 01 0 1111 */ - INVALID, /* 01 1 0000: ldux */ - INVALID, /* 01 1 0001 */ - INVALID, /* 01 1 0010: stdux */ - INVALID, /* 01 1 0011 */ - INVALID, /* 01 1 0100 */ - INVALID, /* 01 1 0101: lwaux */ - INVALID, /* 01 1 0110 */ - INVALID, /* 01 1 0111 */ - INVALID, /* 01 1 1000 */ - INVALID, /* 01 1 1001 */ - INVALID, /* 01 1 1010 */ - INVALID, /* 01 1 1011 */ - INVALID, /* 01 1 1100 */ - INVALID, /* 01 1 1101 */ - INVALID, /* 01 1 1110 */ - INVALID, /* 01 1 1111 */ - INVALID, /* 10 0 0000 */ - INVALID, /* 10 0 0001 */ - { 0, ST+HARD }, /* 10 0 0010: stwcx. */ - INVALID, /* 10 0 0011 */ - INVALID, /* 10 0 0100 */ - INVALID, /* 10 0 0101 */ - INVALID, /* 10 0 0110 */ - INVALID, /* 10 0 0111 */ - { 4, LD+S }, /* 10 0 1000: lwbrx */ - INVALID, /* 10 0 1001 */ - { 4, ST+S }, /* 10 0 1010: stwbrx */ - INVALID, /* 10 0 1011 */ - { 2, LD+S }, /* 10 0 1100: lhbrx */ - INVALID, /* 10 0 1101 */ - { 2, ST+S }, /* 10 0 1110: sthbrx */ - INVALID, /* 10 0 1111 */ - INVALID, /* 10 1 0000 */ - INVALID, /* 10 1 0001 */ - INVALID, /* 10 1 0010 */ - INVALID, /* 10 1 0011 */ - INVALID, /* 10 1 0100 */ - INVALID, /* 10 1 0101 */ - INVALID, /* 10 1 0110 */ - INVALID, /* 10 1 0111 */ - INVALID, /* 10 1 1000 */ - INVALID, /* 10 1 1001 */ - INVALID, /* 10 1 1010 */ - INVALID, /* 10 1 1011 */ - INVALID, /* 10 1 1100 */ - INVALID, /* 10 1 1101 */ - INVALID, /* 10 1 1110 */ - { 0, ST+HARD }, /* 10 1 1111: dcbz */ - { 4, LD }, /* 11 0 0000: lwzx */ - INVALID, /* 11 0 0001 */ - { 4, ST }, /* 11 0 0010: stwx */ - INVALID, /* 11 0 0011 */ - { 2, LD }, /* 11 0 0100: lhzx */ - { 2, LD+SE }, /* 11 0 0101: lhax */ - { 2, ST }, /* 11 0 0110: sthx */ - INVALID, /* 11 0 0111 */ - { 4, LD+F+S }, /* 11 0 1000: lfsx */ - { 8, LD+F }, /* 11 0 1001: lfdx */ - { 4, ST+F+S }, /* 11 0 1010: stfsx */ - { 8, ST+F }, /* 11 0 1011: stfdx */ - INVALID, /* 11 0 1100 */ - INVALID, /* 11 0 1101: lmd */ - INVALID, /* 11 0 1110 */ - INVALID, /* 11 0 1111: stmd */ - { 4, LD+U }, /* 11 1 0000: lwzux */ - INVALID, /* 11 1 0001 */ - { 4, ST+U }, /* 11 1 0010: stwux */ - INVALID, /* 11 1 0011 */ - { 2, LD+U }, /* 11 1 0100: lhzux */ - { 2, LD+SE+U }, /* 11 1 0101: lhaux */ - { 2, ST+U }, /* 11 1 0110: sthux */ - INVALID, /* 11 1 0111 */ - { 4, LD+F+S+U }, /* 11 1 1000: lfsux */ - { 8, LD+F+U }, /* 11 1 1001: lfdux */ - { 4, ST+F+S+U }, /* 11 1 1010: stfsux */ - { 8, ST+F+U }, /* 11 1 1011: stfdux */ - INVALID, /* 11 1 1100 */ - INVALID, /* 11 1 1101 */ - INVALID, /* 11 1 1110 */ - INVALID, /* 11 1 1111 */ -}; - -#define SWAP(a, b) (t = (a), (a) = (b), (b) = t) - -int -fix_alignment(struct pt_regs *regs) -{ - int instr, nb, flags; -#if defined(CONFIG_4xx) || defined(CONFIG_POWER4) || defined(CONFIG_BOOKE) - int opcode, f1, f2, f3; -#endif - int i, t; - int reg, areg; - int offset, nb0; - unsigned char __user *addr; - unsigned char *rptr; - union { - long l; - float f; - double d; - unsigned char v[8]; - } data; - - CHECK_FULL_REGS(regs); - -#if defined(CONFIG_4xx) || defined(CONFIG_POWER4) || defined(CONFIG_BOOKE) - /* The 4xx-family & Book-E processors have no DSISR register, - * so we emulate it. - * The POWER4 has a DSISR register but doesn't set it on - * an alignment fault. -- paulus - */ - - if (__get_user(instr, (unsigned int __user *) regs->nip)) - return 0; - opcode = OPCD(instr); - reg = RS(instr); - areg = RA(instr); - - if (!IS_XFORM(opcode)) { - f1 = 0; - f2 = (instr & 0x04000000) >> 26; - f3 = (instr & 0x78000000) >> 27; - } else { - f1 = (instr & 0x00000006) >> 1; - f2 = (instr & 0x00000040) >> 6; - f3 = (instr & 0x00000780) >> 7; - } - - instr = ((f1 << 5) | (f2 << 4) | f3); -#else - reg = (regs->dsisr >> 5) & 0x1f; /* source/dest register */ - areg = regs->dsisr & 0x1f; /* register to update */ - instr = (regs->dsisr >> 10) & 0x7f; -#endif - - nb = aligninfo[instr].len; - if (nb == 0) { - long __user *p; - int i; - - if (instr != DCBZ) - return 0; /* too hard or invalid instruction */ - /* - * The dcbz (data cache block zero) instruction - * gives an alignment fault if used on non-cacheable - * memory. We handle the fault mainly for the - * case when we are running with the cache disabled - * for debugging. - */ - p = (long __user *) (regs->dar & -L1_CACHE_BYTES); - if (user_mode(regs) - && !access_ok(VERIFY_WRITE, p, L1_CACHE_BYTES)) - return -EFAULT; - for (i = 0; i < L1_CACHE_BYTES / sizeof(long); ++i) - if (__put_user(0, p+i)) - return -EFAULT; - return 1; - } - - flags = aligninfo[instr].flags; - if ((flags & (LD|ST)) == 0) - return 0; - - /* For the 4xx-family & Book-E processors, the 'dar' field of the - * pt_regs structure is overloaded and is really from the DEAR. - */ - - addr = (unsigned char __user *)regs->dar; - - if (flags & M) { - /* lmw, stmw, lswi/x, stswi/x */ - nb0 = 0; - if (flags & HARD) { - if (flags & SX) { - nb = regs->xer & 127; - if (nb == 0) - return 1; - } else { - if (__get_user(instr, - (unsigned int __user *)regs->nip)) - return 0; - nb = (instr >> 11) & 0x1f; - if (nb == 0) - nb = 32; - } - if (nb + reg * 4 > 128) { - nb0 = nb + reg * 4 - 128; - nb = 128 - reg * 4; - } - } else { - /* lwm, stmw */ - nb = (32 - reg) * 4; - } - - if (!access_ok((flags & ST? VERIFY_WRITE: VERIFY_READ), addr, nb+nb0)) - return -EFAULT; /* bad address */ - - rptr = (unsigned char *) ®s->gpr[reg]; - if (flags & LD) { - for (i = 0; i < nb; ++i) - if (__get_user(rptr[i], addr+i)) - return -EFAULT; - if (nb0 > 0) { - rptr = (unsigned char *) ®s->gpr[0]; - addr += nb; - for (i = 0; i < nb0; ++i) - if (__get_user(rptr[i], addr+i)) - return -EFAULT; - } - for (; (i & 3) != 0; ++i) - rptr[i] = 0; - } else { - for (i = 0; i < nb; ++i) - if (__put_user(rptr[i], addr+i)) - return -EFAULT; - if (nb0 > 0) { - rptr = (unsigned char *) ®s->gpr[0]; - addr += nb; - for (i = 0; i < nb0; ++i) - if (__put_user(rptr[i], addr+i)) - return -EFAULT; - } - } - return 1; - } - - offset = 0; - if (nb < 4) { - /* read/write the least significant bits */ - data.l = 0; - offset = 4 - nb; - } - - /* Verify the address of the operand */ - if (user_mode(regs)) { - if (!access_ok((flags & ST? VERIFY_WRITE: VERIFY_READ), addr, nb)) - return -EFAULT; /* bad address */ - } - - if (flags & F) { - preempt_disable(); - if (regs->msr & MSR_FP) - giveup_fpu(current); - preempt_enable(); - } - - /* If we read the operand, copy it in, else get register values */ - if (flags & LD) { - for (i = 0; i < nb; ++i) - if (__get_user(data.v[offset+i], addr+i)) - return -EFAULT; - } else if (flags & F) { - data.d = current->thread.fpr[reg]; - } else { - data.l = regs->gpr[reg]; - } - - switch (flags & ~U) { - case LD+SE: /* sign extend */ - if (data.v[2] >= 0x80) - data.v[0] = data.v[1] = -1; - break; - - case LD+S: /* byte-swap */ - case ST+S: - if (nb == 2) { - SWAP(data.v[2], data.v[3]); - } else { - SWAP(data.v[0], data.v[3]); - SWAP(data.v[1], data.v[2]); - } - break; - - /* Single-precision FP load and store require conversions... */ - case LD+F+S: -#ifdef CONFIG_PPC_FPU - preempt_disable(); - enable_kernel_fp(); - cvt_fd(&data.f, &data.d, ¤t->thread); - preempt_enable(); -#else - return 0; -#endif - break; - case ST+F+S: -#ifdef CONFIG_PPC_FPU - preempt_disable(); - enable_kernel_fp(); - cvt_df(&data.d, &data.f, ¤t->thread); - preempt_enable(); -#else - return 0; -#endif - break; - } - - if (flags & ST) { - for (i = 0; i < nb; ++i) - if (__put_user(data.v[offset+i], addr+i)) - return -EFAULT; - } else if (flags & F) { - current->thread.fpr[reg] = data.d; - } else { - regs->gpr[reg] = data.l; - } - - if (flags & U) - regs->gpr[areg] = regs->dar; - - return 1; -} diff --git a/arch/ppc/kernel/asm-offsets.c b/arch/ppc/kernel/asm-offsets.c deleted file mode 100644 index 968261d6957..00000000000 --- a/arch/ppc/kernel/asm-offsets.c +++ /dev/null @@ -1,147 +0,0 @@ -/* - * This program is used to generate definitions needed by - * assembly language modules. - * - * We use the technique used in the OSF Mach kernel code: - * generate asm statements containing #defines, - * compile this file to assembler, and then extract the - * #defines from the assembly-language output. - */ - -#include <linux/config.h> -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/types.h> -#include <linux/ptrace.h> -#include <linux/suspend.h> -#include <linux/mman.h> -#include <linux/mm.h> -#include <asm/io.h> -#include <asm/page.h> -#include <asm/pgtable.h> -#include <asm/processor.h> -#include <asm/cputable.h> -#include <asm/thread_info.h> - -#define DEFINE(sym, val) \ - asm volatile("\n->" #sym " %0 " #val : : "i" (val)) - -#define BLANK() asm volatile("\n->" : : ) - -int -main(void) -{ - DEFINE(THREAD, offsetof(struct task_struct, thread)); - DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info)); - DEFINE(MM, offsetof(struct task_struct, mm)); - DEFINE(PTRACE, offsetof(struct task_struct, ptrace)); - DEFINE(KSP, offsetof(struct thread_struct, ksp)); - DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); - DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall)); - DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); - DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); - DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0])); - DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr)); -#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) - DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0)); - DEFINE(PT_PTRACED, PT_PTRACED); -#endif -#ifdef CONFIG_ALTIVEC - DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0])); - DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave)); - DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr)); - DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); -#endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_SPE - DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0])); - DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc)); - DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr)); - DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe)); -#endif /* CONFIG_SPE */ - /* Interrupt register frame */ - DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD); - DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); - /* in fact we only use gpr0 - gpr9 and gpr20 - gpr23 */ - DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0])); - DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1])); - DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2])); - DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3])); - DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4])); - DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5])); - DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6])); - DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7])); - DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8])); - DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9])); - DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10])); - DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11])); - DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12])); - DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13])); - DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14])); - DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15])); - DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16])); - DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17])); - DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18])); - DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19])); - DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20])); - DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21])); - DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22])); - DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23])); - DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24])); - DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25])); - DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26])); - DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27])); - DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28])); - DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29])); - DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30])); - DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31])); - /* Note: these symbols include _ because they overlap with special - * register names - */ - DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip)); - DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr)); - DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr)); - DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link)); - DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr)); - DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq)); - DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer)); - DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); - DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); - /* The PowerPC 400-class & Book-E processors have neither the DAR nor the DSISR - * SPRs. Hence, we overload them to hold the similar DEAR and ESR SPRs - * for such processors. For critical interrupts we use them to - * hold SRR0 and SRR1. - */ - DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); - DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); - DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3)); - DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result)); - DEFINE(TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap)); - DEFINE(CLONE_VM, CLONE_VM); - DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); - DEFINE(MM_PGD, offsetof(struct mm_struct, pgd)); - - /* About the CPU features table */ - DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec)); - DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask)); - DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value)); - DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); - DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); - - DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror)); - DEFINE(TI_TASK, offsetof(struct thread_info, task)); - DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); - DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); - DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); - DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); - - DEFINE(pbe_address, offsetof(struct pbe, address)); - DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); - DEFINE(pbe_next, offsetof(struct pbe, next)); - - DEFINE(TASK_SIZE, TASK_SIZE); - DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); - return 0; -} diff --git a/arch/ppc/kernel/cpu_setup_6xx.S b/arch/ppc/kernel/cpu_setup_6xx.S deleted file mode 100644 index 55ed7716636..00000000000 --- a/arch/ppc/kernel/cpu_setup_6xx.S +++ /dev/null @@ -1,474 +0,0 @@ -/* - * This file contains low level CPU setup functions. - * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include <linux/config.h> -#include <asm/processor.h> -#include <asm/page.h> -#include <asm/cputable.h> -#include <asm/ppc_asm.h> -#include <asm/asm-offsets.h> -#include <asm/cache.h> - -_GLOBAL(__setup_cpu_603) - b setup_common_caches -_GLOBAL(__setup_cpu_604) - mflr r4 - bl setup_common_caches - bl setup_604_hid0 - mtlr r4 - blr -_GLOBAL(__setup_cpu_750) - mflr r4 - bl __init_fpu_registers - bl setup_common_caches - bl setup_750_7400_hid0 - mtlr r4 - blr -_GLOBAL(__setup_cpu_750cx) - mflr r4 - bl __init_fpu_registers - bl setup_common_caches - bl setup_750_7400_hid0 - bl setup_750cx - mtlr r4 - blr -_GLOBAL(__setup_cpu_750fx) - mflr r4 - bl __init_fpu_registers - bl setup_common_caches - bl setup_750_7400_hid0 - bl setup_750fx - mtlr r4 - blr -_GLOBAL(__setup_cpu_7400) - mflr r4 - bl __init_fpu_registers - bl setup_7400_workarounds - bl setup_common_caches - bl setup_750_7400_hid0 - mtlr r4 - blr -_GLOBAL(__setup_cpu_7410) - mflr r4 - bl __init_fpu_registers - bl setup_7410_workarounds - bl setup_common_caches - bl setup_750_7400_hid0 - li r3,0 - mtspr SPRN_L2CR2,r3 - mtlr r4 - blr -_GLOBAL(__setup_cpu_745x) - mflr r4 - bl setup_common_caches - bl setup_745x_specifics - mtlr r4 - blr - -/* Enable caches for 603's, 604, 750 & 7400 */ -setup_common_caches: - mfspr r11,SPRN_HID0 - andi. r0,r11,HID0_DCE - ori r11,r11,HID0_ICE|HID0_DCE - ori r8,r11,HID0_ICFI - bne 1f /* don't invalidate the D-cache */ - ori r8,r8,HID0_DCI /* unless it wasn't enabled */ -1: sync - mtspr SPRN_HID0,r8 /* enable and invalidate caches */ - sync - mtspr SPRN_HID0,r11 /* enable caches */ - sync - isync - blr - -/* 604, 604e, 604ev, ... - * Enable superscalar execution & branch history table - */ -setup_604_hid0: - mfspr r11,SPRN_HID0 - ori r11,r11,HID0_SIED|HID0_BHTE - ori r8,r11,HID0_BTCD - sync - mtspr SPRN_HID0,r8 /* flush branch target address cache */ - sync /* on 604e/604r */ - mtspr SPRN_HID0,r11 - sync - isync - blr - -/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some - * erratas we work around here. - * Moto MPC710CE.pdf describes them, those are errata - * #3, #4 and #5 - * Note that we assume the firmware didn't choose to - * apply other workarounds (there are other ones documented - * in the .pdf). It appear that Apple firmware only works - * around #3 and with the same fix we use. We may want to - * check if the CPU is using 60x bus mode in which case - * the workaround for errata #4 is useless. Also, we may - * want to explicitely clear HID0_NOPDST as this is not - * needed once we have applied workaround #5 (though it's - * not set by Apple's firmware at least). - */ -setup_7400_workarounds: - mfpvr r3 - rlwinm r3,r3,0,20,31 - cmpwi 0,r3,0x0207 - ble 1f - blr -setup_7410_workarounds: - mfpvr r3 - rlwinm r3,r3,0,20,31 - cmpwi 0,r3,0x0100 - bnelr -1: - mfspr r11,SPRN_MSSSR0 - /* Errata #3: Set L1OPQ_SIZE to 0x10 */ - rlwinm r11,r11,0,9,6 - oris r11,r11,0x0100 - /* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */ - oris r11,r11,0x0002 - /* Errata #5: Set DRLT_SIZE to 0x01 */ - rlwinm r11,r11,0,5,2 - oris r11,r11,0x0800 - sync - mtspr SPRN_MSSSR0,r11 - sync - isync - blr - -/* 740/750/7400/7410 - * Enable Store Gathering (SGE), Address Brodcast (ABE), - * Branch History Table (BHTE), Branch Target ICache (BTIC) - * Dynamic Power Management (DPM), Speculative (SPD) - * Clear Instruction cache throttling (ICTC) - */ -setup_750_7400_hid0: - mfspr r11,SPRN_HID0 - ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC - oris r11,r11,HID0_DPM@h -BEGIN_FTR_SECTION - xori r11,r11,HID0_BTIC -END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) -BEGIN_FTR_SECTION - xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */ -END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) - li r3,HID0_SPD - andc r11,r11,r3 /* clear SPD: enable speculative */ - li r3,0 - mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */ - isync - mtspr SPRN_HID0,r11 - sync - isync - blr - -/* 750cx specific - * Looks like we have to disable NAP feature for some PLL settings... - * (waiting for confirmation) - */ -setup_750cx: - mfspr r10, SPRN_HID1 - rlwinm r10,r10,4,28,31 - cmpwi cr0,r10,7 - cmpwi cr1,r10,9 - cmpwi cr2,r10,11 - cror 4*cr0+eq,4*cr0+eq,4*cr1+eq - cror 4*cr0+eq,4*cr0+eq,4*cr2+eq - bnelr - lwz r6,CPU_SPEC_FEATURES(r5) - li r7,CPU_FTR_CAN_NAP - andc r6,r6,r7 - stw r6,CPU_SPEC_FEATURES(r5) - blr - -/* 750fx specific - */ -setup_750fx: - blr - -/* MPC 745x - * Enable Store Gathering (SGE), Branch Folding (FOLD) - * Branch History Table (BHTE), Branch Target ICache (BTIC) - * Dynamic Power Management (DPM), Speculative (SPD) - * Ensure our data cache instructions really operate. - * Timebase has to be running or we wouldn't have made it here, - * just ensure we don't disable it. - * Clear Instruction cache throttling (ICTC) - * Enable L2 HW prefetch - */ -setup_745x_specifics: - /* We check for the presence of an L3 cache setup by - * the firmware. If any, we disable NAP capability as - * it's known to be bogus on rev 2.1 and earlier - */ - mfspr r11,SPRN_L3CR - andis. r11,r11,L3CR_L3E@h - beq 1f - lwz r6,CPU_SPEC_FEATURES(r5) - andi. r0,r6,CPU_FTR_L3_DISABLE_NAP - beq 1f - li r7,CPU_FTR_CAN_NAP - andc r6,r6,r7 - stw r6,CPU_SPEC_FEATURES(r5) -1: - mfspr r11,SPRN_HID0 - - /* All of the bits we have to set..... - */ - ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE - ori r11,r11,HID0_LRSTK | HID0_BTIC - oris r11,r11,HID0_DPM@h -BEGIN_FTR_SECTION - xori r11,r11,HID0_BTIC -END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) -BEGIN_FTR_SECTION - xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */ -END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) - - /* All of the bits we have to clear.... - */ - li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI - andc r11,r11,r3 /* clear SPD: enable speculative */ - li r3,0 - - mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */ - isync - mtspr SPRN_HID0,r11 - sync - isync - - /* Enable L2 HW prefetch, if L2 is enabled - */ - mfspr r3,SPRN_L2CR - andis. r3,r3,L2CR_L2E@h - beqlr - mfspr r3,SPRN_MSSCR0 - ori r3,r3,3 - sync - mtspr SPRN_MSSCR0,r3 - sync - isync - blr - -/* - * Initialize the FPU registers. This is needed to work around an errata - * in some 750 cpus where using a not yet initialized FPU register after - * power on reset may hang the CPU - */ -_GLOBAL(__init_fpu_registers) - mfmsr r10 - ori r11,r10,MSR_FP - mtmsr r11 - isync - addis r9,r3,empty_zero_page@ha - addi r9,r9,empty_zero_page@l - REST_32FPRS(0,r9) - sync - mtmsr r10 - isync - blr - - -/* Definitions for the table use to save CPU states */ -#define CS_HID0 0 -#define CS_HID1 4 -#define CS_HID2 8 -#define CS_MSSCR0 12 -#define CS_MSSSR0 16 -#define CS_ICTRL 20 -#define CS_LDSTCR 24 -#define CS_LDSTDB 28 -#define CS_SIZE 32 - - .data - .balign L1_CACHE_BYTES -cpu_state_storage: - .space CS_SIZE - .balign L1_CACHE_BYTES,0 - .text - -/* Called in normal context to backup CPU 0 state. This - * does not include cache settings. This function is also - * called for machine sleep. This does not include the MMU - * setup, BATs, etc... but rather the "special" registers - * like HID0, HID1, MSSCR0, etc... - */ -_GLOBAL(__save_cpu_setup) - /* Some CR fields are volatile, we back it up all */ - mfcr r7 - - /* Get storage ptr */ - lis r5,cpu_state_storage@h - ori r5,r5,cpu_state_storage@l - - /* Save HID0 (common to all CONFIG_6xx cpus) */ - mfspr r3,SPRN_HID0 - stw r3,CS_HID0(r5) - - /* Now deal with CPU type dependent registers */ - mfspr r3,SPRN_PVR - srwi r3,r3,16 - cmplwi cr0,r3,0x8000 /* 7450 */ - cmplwi cr1,r3,0x000c /* 7400 */ - cmplwi cr2,r3,0x800c /* 7410 */ - cmplwi cr3,r3,0x8001 /* 7455 */ - cmplwi cr4,r3,0x8002 /* 7457 */ - cmplwi cr5,r3,0x8003 /* 7447A */ - cmplwi cr6,r3,0x7000 /* 750FX */ - cmplwi cr7,r3,0x8004 /* 7448 */ - /* cr1 is 7400 || 7410 */ - cror 4*cr1+eq,4*cr1+eq,4*cr2+eq - /* cr0 is 74xx */ - cror 4*cr0+eq,4*cr0+eq,4*cr3+eq - cror 4*cr0+eq,4*cr0+eq,4*cr4+eq - cror 4*cr0+eq,4*cr0+eq,4*cr1+eq - cror 4*cr0+eq,4*cr0+eq,4*cr5+eq - cror 4*cr0+eq,4*cr0+eq,4*cr7+eq - bne 1f - /* Backup 74xx specific regs */ - mfspr r4,SPRN_MSSCR0 - stw r4,CS_MSSCR0(r5) - mfspr r4,SPRN_MSSSR0 - stw r4,CS_MSSSR0(r5) - beq cr1,1f - /* Backup 745x specific registers */ - mfspr r4,SPRN_HID1 - stw r4,CS_HID1(r5) - mfspr r4,SPRN_ICTRL - stw r4,CS_ICTRL(r5) - mfspr r4,SPRN_LDSTCR - stw r4,CS_LDSTCR(r5) - mfspr r4,SPRN_LDSTDB - stw r4,CS_LDSTDB(r5) -1: - bne cr6,1f - /* Backup 750FX specific registers */ - mfspr r4,SPRN_HID1 - stw r4,CS_HID1(r5) - /* If rev 2.x, backup HID2 */ - mfspr r3,SPRN_PVR - andi. r3,r3,0xff00 - cmpwi cr0,r3,0x0200 - bne 1f - mfspr r4,SPRN_HID2 - stw r4,CS_HID2(r5) -1: - mtcr r7 - blr - -/* Called with no MMU context (typically MSR:IR/DR off) to - * restore CPU state as backed up by the previous - * function. This does not include cache setting - */ -_GLOBAL(__restore_cpu_setup) - /* Some CR fields are volatile, we back it up all */ - mfcr r7 - - /* Get storage ptr */ - lis r5,(cpu_state_storage-KERNELBASE)@h - ori r5,r5,cpu_state_storage@l - - /* Restore HID0 */ - lwz r3,CS_HID0(r5) - sync - isync - mtspr SPRN_HID0,r3 - sync - isync - - /* Now deal with CPU type dependent registers */ - mfspr r3,SPRN_PVR - srwi r3,r3,16 - cmplwi cr0,r3,0x8000 /* 7450 */ - cmplwi cr1,r3,0x000c /* 7400 */ - cmplwi cr2,r3,0x800c /* 7410 */ - cmplwi cr3,r3,0x8001 /* 7455 */ - cmplwi cr4,r3,0x8002 /* 7457 */ - cmplwi cr5,r3,0x8003 /* 7447A */ - cmplwi cr6,r3,0x7000 /* 750FX */ - cmplwi cr7,r3,0x8004 /* 7448 */ - /* cr1 is 7400 || 7410 */ - cror 4*cr1+eq,4*cr1+eq,4*cr2+eq - /* cr0 is 74xx */ - cror 4*cr0+eq,4*cr0+eq,4*cr3+eq - cror 4*cr0+eq,4*cr0+eq,4*cr4+eq - cror 4*cr0+eq,4*cr0+eq,4*cr1+eq - cror 4*cr0+eq,4*cr0+eq,4*cr5+eq - cror 4*cr0+eq,4*cr0+eq,4*cr7+eq - bne 2f - /* Restore 74xx specific regs */ - lwz r4,CS_MSSCR0(r5) - sync - mtspr SPRN_MSSCR0,r4 - sync - isync - lwz r4,CS_MSSSR0(r5) - sync - mtspr SPRN_MSSSR0,r4 - sync - isync - bne cr2,1f - /* Clear 7410 L2CR2 */ - li r4,0 - mtspr SPRN_L2CR2,r4 -1: beq cr1,2f - /* Restore 745x specific registers */ - lwz r4,CS_HID1(r5) - sync - mtspr SPRN_HID1,r4 - isync - sync - lwz r4,CS_ICTRL(r5) - sync - mtspr SPRN_ICTRL,r4 - isync - sync - lwz r4,CS_LDSTCR(r5) - sync - mtspr SPRN_LDSTCR,r4 - isync - sync - lwz r4,CS_LDSTDB(r5) - sync - mtspr SPRN_LDSTDB,r4 - isync - sync -2: bne cr6,1f - /* Restore 750FX specific registers - * that is restore HID2 on rev 2.x and PLL config & switch - * to PLL 0 on all - */ - /* If rev 2.x, restore HID2 with low voltage bit cleared */ - mfspr r3,SPRN_PVR - andi. r3,r3,0xff00 - cmpwi cr0,r3,0x0200 - bne 4f - lwz r4,CS_HID2(r5) - rlwinm r4,r4,0,19,17 - mtspr SPRN_HID2,r4 - sync -4: - lwz r4,CS_HID1(r5) - rlwinm r5,r4,0,16,14 - mtspr SPRN_HID1,r5 - /* Wait for PLL to stabilize */ - mftbl r5 -3: mftbl r6 - sub r6,r6,r5 - cmplwi cr0,r6,10000 - ble 3b - /* Setup final PLL */ - mtspr SPRN_HID1,r4 -1: - mtcr r7 - blr - diff --git a/arch/ppc/kernel/cpu_setup_power4.S b/arch/ppc/kernel/cpu_setup_power4.S deleted file mode 100644 index d7bfd60e21f..00000000000 --- a/arch/ppc/kernel/cpu_setup_power4.S +++ /dev/null @@ -1,198 +0,0 @@ -/* - * This file contains low level CPU setup functions. - * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include <linux/config.h> -#include <asm/processor.h> -#include <asm/page.h> -#include <asm/ppc_asm.h> -#include <asm/cputable.h> -#include <asm/asm-offsets.h> -#include <asm/cache.h> - -_GLOBAL(__970_cpu_preinit) - /* - * Deal only with PPC970 and PPC970FX. - */ - mfspr r0,SPRN_PVR - srwi r0,r0,16 - cmpwi cr0,r0,0x39 - cmpwi cr1,r0,0x3c - cror 4*cr0+eq,4*cr0+eq,4*cr1+eq - bnelr - - /* Make sure HID4:rm_ci is off before MMU is turned off, that large - * pages are enabled with HID4:61 and clear HID5:DCBZ_size and - * HID5:DCBZ32_ill - */ - li r0,0 - mfspr r11,SPRN_HID4 - rldimi r11,r0,40,23 /* clear bit 23 (rm_ci) */ - rldimi r11,r0,2,61 /* clear bit 61 (lg_pg_en) */ - sync - mtspr SPRN_HID4,r11 - isync - sync - mfspr r11,SPRN_HID5 - rldimi r11,r0,6,56 /* clear bits 56 & 57 (DCBZ*) */ - sync - mtspr SPRN_HID5,r11 - isync - sync - - /* Setup some basic HID1 features */ - mfspr r0,SPRN_HID1 - li r11,0x1200 /* enable i-fetch cacheability */ - sldi r11,r11,44 /* and prefetch */ - or r0,r0,r11 - mtspr SPRN_HID1,r0 - mtspr SPRN_HID1,r0 - isync - - /* Clear HIOR */ - li r0,0 - sync - mtspr SPRN_HIOR,0 /* Clear interrupt prefix */ - isync - blr - -_GLOBAL(__setup_cpu_ppc970) - mfspr r0,SPRN_HID0 - li r11,5 /* clear DOZE and SLEEP */ - rldimi r0,r11,52,8 /* set NAP and DPM */ - mtspr SPRN_HID0,r0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - sync - isync - blr - -/* Definitions for the table use to save CPU states */ -#define CS_HID0 0 -#define CS_HID1 8 -#define CS_HID4 16 -#define CS_HID5 24 -#define CS_SIZE 32 - - .data - .balign L1_CACHE_BYTES -cpu_state_storage: - .space CS_SIZE - .balign L1_CACHE_BYTES,0 - .text - -/* Called in normal context to backup CPU 0 state. This - * does not include cache settings. This function is also - * called for machine sleep. This does not include the MMU - * setup, BATs, etc... but rather the "special" registers - * like HID0, HID1, HID4, etc... - */ -_GLOBAL(__save_cpu_setup) - /* Some CR fields are volatile, we back it up all */ - mfcr r7 - - /* Get storage ptr */ - lis r5,cpu_state_storage@h - ori r5,r5,cpu_state_storage@l - - /* We only deal with 970 for now */ - mfspr r0,SPRN_PVR - srwi r0,r0,16 - cmpwi cr0,r0,0x39 - cmpwi cr1,r0,0x3c - cror 4*cr0+eq,4*cr0+eq,4*cr1+eq - bne 1f - - /* Save HID0,1,4 and 5 */ - mfspr r3,SPRN_HID0 - std r3,CS_HID0(r5) - mfspr r3,SPRN_HID1 - std r3,CS_HID1(r5) - mfspr r3,SPRN_HID4 - std r3,CS_HID4(r5) - mfspr r3,SPRN_HID5 - std r3,CS_HID5(r5) - -1: - mtcr r7 - blr - -/* Called with no MMU context (typically MSR:IR/DR off) to - * restore CPU state as backed up by the previous - * function. This does not include cache setting - */ -_GLOBAL(__restore_cpu_setup) - /* Some CR fields are volatile, we back it up all */ - mfcr r7 - - /* Get storage ptr */ - lis r5,(cpu_state_storage-KERNELBASE)@h - ori r5,r5,cpu_state_storage@l - - /* We only deal with 970 for now */ - mfspr r0,SPRN_PVR - srwi r0,r0,16 - cmpwi cr0,r0,0x39 - cmpwi cr1,r0,0x3c - cror 4*cr0+eq,4*cr0+eq,4*cr1+eq - bne 1f - - /* Clear interrupt prefix */ - li r0,0 - sync - mtspr SPRN_HIOR,0 - isync - - /* Restore HID0 */ - ld r3,CS_HID0(r5) - sync - isync - mtspr SPRN_HID0,r3 - mfspr r3,SPRN_HID0 - mfspr r3,SPRN_HID0 - mfspr r3,SPRN_HID0 - mfspr r3,SPRN_HID0 - mfspr r3,SPRN_HID0 - mfspr r3,SPRN_HID0 - sync - isync - - /* Restore HID1 */ - ld r3,CS_HID1(r5) - sync - isync - mtspr SPRN_HID1,r3 - mtspr SPRN_HID1,r3 - sync - isync - - /* Restore HID4 */ - ld r3,CS_HID4(r5) - sync - isync - mtspr SPRN_HID4,r3 - sync - isync - - /* Restore HID5 */ - ld r3,CS_HID5(r5) - sync - isync - mtspr SPRN_HID5,r3 - sync - isync -1: - mtcr r7 - blr - diff --git a/arch/ppc/kernel/dma-mapping.c b/arch/ppc/kernel/dma-mapping.c deleted file mode 100644 index 685fd0defe2..00000000000 --- a/arch/ppc/kernel/dma-mapping.c +++ /dev/null @@ -1,443 +0,0 @@ -/* - * PowerPC version derived from arch/arm/mm/consistent.c - * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) - * - * Copyright (C) 2000 Russell King - * - * Consistent memory allocators. Used for DMA devices that want to - * share uncached memory with the processor core. The function return - * is the virtual address and 'dma_handle' is the physical address. - * Mostly stolen from the ARM port, with some changes for PowerPC. - * -- Dan - * - * Reorganized to get rid of the arch-specific consistent_* functions - * and provide non-coherent implementations for the DMA API. -Matt - * - * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent() - * implementation. This is pulled straight from ARM and barely - * modified. -Matt - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/config.h> -#include <linux/module.h> -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/types.h> -#include <linux/ptrace.h> -#include <linux/mman.h> -#include <linux/mm.h> -#include <linux/swap.h> -#include <linux/stddef.h> -#include <linux/vmalloc.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/bootmem.h> -#include <linux/highmem.h> -#include <linux/dma-mapping.h> -#include <linux/hardirq.h> - -#include <asm/pgalloc.h> -#include <asm/prom.h> -#include <asm/io.h> -#include <asm/mmu_context.h> -#include <asm/pgtable.h> -#include <asm/mmu.h> -#include <asm/uaccess.h> -#include <asm/smp.h> -#include <asm/machdep.h> - -int map_page(unsigned long va, phys_addr_t pa, int flags); - -#include <asm/tlbflush.h> - -/* - * This address range defaults to a value that is safe for all - * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It - * can be further configured for specific applications under - * the "Advanced Setup" menu. -Matt - */ -#define CONSISTENT_BASE (CONFIG_CONSISTENT_START) -#define CONSISTENT_END (CONFIG_CONSISTENT_START + CONFIG_CONSISTENT_SIZE) -#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) - -/* - * This is the page table (2MB) covering uncached, DMA consistent allocations - */ -static pte_t *consistent_pte; -static DEFINE_SPINLOCK(consistent_lock); - -/* - * VM region handling support. - * - * This should become something generic, handling VM region allocations for - * vmalloc and similar (ioremap, module space, etc). - * - * I envisage vmalloc()'s supporting vm_struct becoming: - * - * struct vm_struct { - * struct vm_region region; - * unsigned long flags; - * struct page **pages; - * unsigned int nr_pages; - * unsigned long phys_addr; - * }; - * - * get_vm_area() would then call vm_region_alloc with an appropriate - * struct vm_region head (eg): - * - * struct vm_region vmalloc_head = { - * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), - * .vm_start = VMALLOC_START, - * .vm_end = VMALLOC_END, - * }; - * - * However, vmalloc_head.vm_start is variable (typically, it is dependent on - * the amount of RAM found at boot time.) I would imagine that get_vm_area() - * would have to initialise this each time prior to calling vm_region_alloc(). - */ -struct vm_region { - struct list_head vm_list; - unsigned long vm_start; - unsigned long vm_end; -}; - -static struct vm_region consistent_head = { - .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), - .vm_start = CONSISTENT_BASE, - .vm_end = CONSISTENT_END, -}; - -static struct vm_region * -vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) -{ - unsigned long addr = head->vm_start, end = head->vm_end - size; - unsigned long flags; - struct vm_region *c, *new; - - new = kmalloc(sizeof(struct vm_region), gfp); - if (!new) - goto out; - - spin_lock_irqsave(&consistent_lock, flags); - - list_for_each_entry(c, &head->vm_list, vm_list) { - if ((addr + size) < addr) - goto nospc; - if ((addr + size) <= c->vm_start) - goto found; - addr = c->vm_end; - if (addr > end) - goto nospc; - } - - found: - /* - * Insert this entry _before_ the one we found. - */ - list_add_tail(&new->vm_list, &c->vm_list); - new->vm_start = addr; - new->vm_end = addr + size; - - spin_unlock_irqrestore(&consistent_lock, flags); - return new; - - nospc: - spin_unlock_irqrestore(&consistent_lock, flags); - kfree(new); - out: - return NULL; -} - -static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr) -{ - struct vm_region *c; - - list_for_each_entry(c, &head->vm_list, vm_list) { - if (c->vm_start == addr) - goto out; - } - c = NULL; - out: - return c; -} - -/* - * Allocate DMA-coherent memory space and return both the kernel remapped - * virtual and bus address for that space. - */ -void * -__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) -{ - struct page *page; - struct vm_region *c; - unsigned long order; - u64 mask = 0x00ffffff, limit; /* ISA default */ - - if (!consistent_pte) { - printk(KERN_ERR "%s: not initialised\n", __func__); - dump_stack(); - return NULL; - } - - size = PAGE_ALIGN(size); - limit = (mask + 1) & ~mask; - if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) { - printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n", - size, mask); - return NULL; - } - - order = get_order(size); - - if (mask != 0xffffffff) - gfp |= GFP_DMA; - - page = alloc_pages(gfp, order); - if (!page) - goto no_page; - - /* - * Invalidate any data that might be lurking in the - * kernel direct-mapped region for device DMA. - */ - { - unsigned long kaddr = (unsigned long)page_address(page); - memset(page_address(page), 0, size); - flush_dcache_range(kaddr, kaddr + size); - } - - /* - * Allocate a virtual address in the consistent mapping region. - */ - c = vm_region_alloc(&consistent_head, size, - gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); - if (c) { - unsigned long vaddr = c->vm_start; - pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr); - struct page *end = page + (1 << order); - - /* - * Set the "dma handle" - */ - *handle = page_to_bus(page); - - do { - BUG_ON(!pte_none(*pte)); - - set_page_count(page, 1); - SetPageReserved(page); - set_pte_at(&init_mm, vaddr, - pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL))); - page++; - pte++; - vaddr += PAGE_SIZE; - } while (size -= PAGE_SIZE); - - /* - * Free the otherwise unused pages. - */ - while (page < end) { - set_page_count(page, 1); - __free_page(page); - page++; - } - - return (void *)c->vm_start; - } - - if (page) - __free_pages(page, order); - no_page: - return NULL; -} -EXPORT_SYMBOL(__dma_alloc_coherent); - -/* - * free a page as defined by the above mapping. - */ -void __dma_free_coherent(size_t size, void *vaddr) -{ - struct vm_region *c; - unsigned long flags, addr; - pte_t *ptep; - - size = PAGE_ALIGN(size); - - spin_lock_irqsave(&consistent_lock, flags); - - c = vm_region_find(&consistent_head, (unsigned long)vaddr); - if (!c) - goto no_area; - - if ((c->vm_end - c->vm_start) != size) { - printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", - __func__, c->vm_end - c->vm_start, size); - dump_stack(); - size = c->vm_end - c->vm_start; - } - - ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); - addr = c->vm_start; - do { - pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); - unsigned long pfn; - - ptep++; - addr += PAGE_SIZE; - - if (!pte_none(pte) && pte_present(pte)) { - pfn = pte_pfn(pte); - - if (pfn_valid(pfn)) { - struct page *page = pfn_to_page(pfn); - ClearPageReserved(page); - - __free_page(page); - continue; - } - } - - printk(KERN_CRIT "%s: bad page in kernel page table\n", - __func__); - } while (size -= PAGE_SIZE); - - flush_tlb_kernel_range(c->vm_start, c->vm_end); - - list_del(&c->vm_list); - - spin_unlock_irqrestore(&consistent_lock, flags); - - kfree(c); - return; - - no_area: - spin_unlock_irqrestore(&consistent_lock, flags); - printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", - __func__, vaddr); - dump_stack(); -} -EXPORT_SYMBOL(__dma_free_coherent); - -/* - * Initialise the consistent memory allocation. - */ -static int __init dma_alloc_init(void) -{ - pgd_t *pgd; - pmd_t *pmd; - pte_t *pte; - int ret = 0; - - do { - pgd = pgd_offset(&init_mm, CONSISTENT_BASE); - pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); - if (!pmd) { - printk(KERN_ERR "%s: no pmd tables\n", __func__); - ret = -ENOMEM; - break; - } - WARN_ON(!pmd_none(*pmd)); - - pte = pte_alloc_kernel(pmd, CONSISTENT_BASE); - if (!pte) { - printk(KERN_ERR "%s: no pte tables\n", __func__); - ret = -ENOMEM; - break; - } - - consistent_pte = pte; - } while (0); - - return ret; -} - -core_initcall(dma_alloc_init); - -/* - * make an area consistent. - */ -void __dma_sync(void *vaddr, size_t size, int direction) -{ - unsigned long start = (unsigned long)vaddr; - unsigned long end = start + size; - - switch (direction) { - case DMA_NONE: - BUG(); - case DMA_FROM_DEVICE: /* invalidate only */ - invalidate_dcache_range(start, end); - break; - case DMA_TO_DEVICE: /* writeback only */ - clean_dcache_range(start, end); - break; - case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - flush_dcache_range(start, end); - break; - } -} -EXPORT_SYMBOL(__dma_sync); - -#ifdef CONFIG_HIGHMEM -/* - * __dma_sync_page() implementation for systems using highmem. - * In this case, each page of a buffer must be kmapped/kunmapped - * in order to have a virtual address for __dma_sync(). This must - * not sleep so kmap_atomic()/kunmap_atomic() are used. - * - * Note: yes, it is possible and correct to have a buffer extend - * beyond the first page. - */ -static inline void __dma_sync_page_highmem(struct page *page, - unsigned long offset, size_t size, int direction) -{ - size_t seg_size = min((size_t)(PAGE_SIZE - offset), size); - size_t cur_size = seg_size; - unsigned long flags, start, seg_offset = offset; - int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; - int seg_nr = 0; - - local_irq_save(flags); - - do { - start = (unsigned long)kmap_atomic(page + seg_nr, - KM_PPC_SYNC_PAGE) + seg_offset; - - /* Sync this buffer segment */ - __dma_sync((void *)start, seg_size, direction); - kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE); - seg_nr++; - - /* Calculate next buffer segment size */ - seg_size = min((size_t)PAGE_SIZE, size - cur_size); - - /* Add the segment size to our running total */ - cur_size += seg_size; - seg_offset = 0; - } while (seg_nr < nr_segs); - - local_irq_restore(flags); -} -#endif /* CONFIG_HIGHMEM */ - -/* - * __dma_sync_page makes memory consistent. identical to __dma_sync, but - * takes a struct page instead of a virtual address - */ -void __dma_sync_page(struct page *page, unsigned long offset, - size_t size, int direction) -{ -#ifdef CONFIG_HIGHMEM - __dma_sync_page_highmem(page, offset, size, direction); -#else - unsigned long start = (unsigned long)page_address(page) + offset; - __dma_sync((void *)start, size, direction); -#endif -} -EXPORT_SYMBOL(__dma_sync_page); diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S deleted file mode 100644 index f044edbb454..00000000000 --- a/arch/ppc/kernel/entry.S +++ /dev/null @@ -1,1002 +0,0 @@ -/* - * PowerPC version - * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) - * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP - * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com> - * Adapted for Power Macintosh by Paul Mackerras. - * Low-level exception handlers and MMU support - * rewritten by Paul Mackerras. - * Copyright (C) 1996 Paul Mackerras. - * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). - * - * This file contains the system call entry code, context switch - * code, and exception/interrupt return code for PowerPC. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include <linux/config.h> -#include <linux/errno.h> -#include <linux/sys.h> -#include <linux/threads.h> -#include <asm/processor.h> -#include <asm/page.h> -#include <asm/mmu.h> -#include <asm/cputable.h> -#include <asm/thread_info.h> -#include <asm/ppc_asm.h> -#include <asm/asm-offsets.h> -#include <asm/unistd.h> - -#undef SHOW_SYSCALLS -#undef SHOW_SYSCALLS_TASK - -/* - * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE. - */ -#if MSR_KERNEL >= 0x10000 -#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l -#else -#define LOAD_MSR_KERNEL(r, x) li r,(x) -#endif - -#ifdef CONFIG_BOOKE -#include "head_booke.h" -#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \ - mtspr exc_level##_SPRG,r8; \ - BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \ - lwz r0,GPR10-INT_FRAME_SIZE(r8); \ - stw r0,GPR10(r11); \ - lwz r0,GPR11-INT_FRAME_SIZE(r8); \ - stw r0,GPR11(r11); \ - mfspr r8,exc_level##_SPRG - - .globl mcheck_transfer_to_handler -mcheck_transfer_to_handler: - TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK) - b transfer_to_handler_full - - .globl debug_transfer_to_handler -debug_transfer_to_handler: - TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG) - b transfer_to_handler_full - - .globl crit_transfer_to_handler -crit_transfer_to_handler: - TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT) - /* fall through */ -#endif - -#ifdef CONFIG_40x - .globl crit_transfer_to_handler -crit_transfer_to_handler: - lwz r0,crit_r10@l(0) - stw r0,GPR10(r11) - lwz r0,crit_r11@l(0) - stw r0,GPR11(r11) - /* fall through */ -#endif - -/* - * This code finishes saving the registers to the exception frame - * and jumps to the appropriate handler for the exception, turning - * on address translation. - * Note that we rely on the caller having set cr0.eq iff the exception - * occurred in kernel mode (i.e. MSR:PR = 0). - */ - .globl transfer_to_handler_full -transfer_to_handler_full: - SAVE_NVGPRS(r11) - /* fall through */ - - .globl transfer_to_handler -transfer_to_handler: - stw r2,GPR2(r11) - stw r12,_NIP(r11) - stw r9,_MSR(r11) - andi. r2,r9,MSR_PR - mfctr r12 - mfspr r2,SPRN_XER - stw r12,_CTR(r11) - stw r2,_XER(r11) - mfspr r12,SPRN_SPRG3 - addi r2,r12,-THREAD - tovirt(r2,r2) /* set r2 to current */ - beq 2f /* if from user, fix up THREAD.regs */ - addi r11,r1,STACK_FRAME_OVERHEAD - stw r11,PT_REGS(r12) -#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) - /* Check to see if the dbcr0 register is set up to debug. Use the - single-step bit to do this. */ - lwz r12,THREAD_DBCR0(r12) - andis. r12,r12,DBCR0_IC@h - beq+ 3f - /* From user and task is ptraced - load up global dbcr0 */ - li r12,-1 /* clear all pending debug events */ - mtspr SPRN_DBSR,r12 - lis r11,global_dbcr0@ha - tophys(r11,r11) - addi r11,r11,global_dbcr0@l - lwz r12,0(r11) - mtspr SPRN_DBCR0,r12 - lwz r12,4(r11) - addi r12,r12,-1 - stw r12,4(r11) -#endif - b 3f -2: /* if from kernel, check interrupted DOZE/NAP mode and - * check for stack overflow - */ -#ifdef CONFIG_6xx - mfspr r11,SPRN_HID0 - mtcr r11 -BEGIN_FTR_SECTION - bt- 8,power_save_6xx_restore /* Check DOZE */ -END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) -BEGIN_FTR_SECTION - bt- 9,power_save_6xx_restore /* Check NAP */ -END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) -#endif /* CONFIG_6xx */ - .globl transfer_to_handler_cont -transfer_to_handler_cont: - lwz r11,THREAD_INFO-THREAD(r12) - cmplw r1,r11 /* if r1 <= current->thread_info */ - ble- stack_ovf /* then the kernel stack overflowed */ -3: - mflr r9 - lwz r11,0(r9) /* virtual address of handler */ - lwz r9,4(r9) /* where to go when done */ - FIX_SRR1(r10,r12) - mtspr SPRN_SRR0,r11 - mtspr SPRN_SRR1,r10 - mtlr r9 - SYNC - RFI /* jump to handler, enable MMU */ - -/* - * On kernel stack overflow, load up an initial stack pointer - * and call StackOverflow(regs), which should not return. - */ -stack_ovf: - /* sometimes we use a statically-allocated stack, which is OK. */ - lis r11,_end@h - ori r11,r11,_end@l - cmplw r1,r11 - ble 3b /* r1 <= &_end is OK */ - SAVE_NVGPRS(r11) - addi r3,r1,STACK_FRAME_OVERHEAD - lis r1,init_thread_union@ha - addi r1,r1,init_thread_union@l - addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD - lis r9,StackOverflow@ha - addi r9,r9,StackOverflow@l - LOAD_MSR_KERNEL(r10,MSR_KERNEL) - FIX_SRR1(r10,r12) - mtspr SPRN_SRR0,r9 - mtspr SPRN_SRR1,r10 - SYNC - RFI - -/* - * Handle a system call. - */ - .stabs "arch/ppc/kernel/",N_SO,0,0,0f - .stabs "entry.S",N_SO,0,0,0f -0: - -_GLOBAL(DoSyscall) - stw r0,THREAD+LAST_SYSCALL(r2) - stw r3,ORIG_GPR3(r1) - li r12,0 - stw r12,RESULT(r1) - lwz r11,_CCR(r1) /* Clear SO bit in CR */ - rlwinm r11,r11,0,4,2 - stw r11,_CCR(r1) -#ifdef SHOW_SYSCALLS - bl do_show_syscall -#endif /* SHOW_SYSCALLS */ - rlwinm r10,r1,0,0,18 /* current_thread_info() */ - li r11,0 - stb r11,TI_SC_NOERR(r10) - lwz r11,TI_FLAGS(r10) - andi. r11,r11,_TIF_SYSCALL_T_OR_A - bne- syscall_dotrace -syscall_dotrace_cont: - cmplwi 0,r0,NR_syscalls - lis r10,sys_call_table@h - ori r10,r10,sys_call_table@l - slwi r0,r0,2 - bge- 66f - lwzx r10,r10,r0 /* Fetch system call handler [ptr] */ - mtlr r10 - addi r9,r1,STACK_FRAME_OVERHEAD - PPC440EP_ERR42 - blrl /* Call handler */ - .globl ret_from_syscall -ret_from_syscall: -#ifdef SHOW_SYSCALLS - bl do_show_syscall_exit -#endif - mr r6,r3 - li r11,-_LAST_ERRNO - cmplw 0,r3,r11 - rlwinm r12,r1,0,0,18 /* current_thread_info() */ - blt+ 30f - lbz r11,TI_SC_NOERR(r12) - cmpwi r11,0 - bne 30f - neg r3,r3 - lwz r10,_CCR(r1) /* Set SO bit in CR */ - oris r10,r10,0x1000 - stw r10,_CCR(r1) - - /* disable interrupts so current_thread_info()->flags can't change */ -30: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ - SYNC - MTMSRD(r10) - lwz r9,TI_FLAGS(r12) - andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED) - bne- syscall_exit_work -syscall_exit_cont: -#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) - /* If the process has its own DBCR0 value, load it up. The single - step bit tells us that dbcr0 should be loaded. */ - lwz r0,THREAD+THREAD_DBCR0(r2) - andis. r10,r0,DBCR0_IC@h - bnel- load_dbcr0 -#endif - stwcx. r0,0,r1 /* to clear the reservation */ - lwz r4,_LINK(r1) - lwz r5,_CCR(r1) - mtlr r4 - mtcr r5 - lwz r7,_NIP(r1) - lwz r8,_MSR(r1) - FIX_SRR1(r8, r0) - lwz r2,GPR2(r1) - lwz r1,GPR1(r1) - mtspr SPRN_SRR0,r7 - mtspr SPRN_SRR1,r8 - SYNC - RFI - -66: li r3,-ENOSYS - b ret_from_syscall - - .globl ret_from_fork -ret_from_fork: - REST_NVGPRS(r1) - bl schedule_tail - li r3,0 - b ret_from_syscall - -/* Traced system call support */ -syscall_dotrace: - SAVE_NVGPRS(r1) - li r0,0xc00 - stw r0,TRAP(r1) - addi r3,r1,STACK_FRAME_OVERHEAD - bl do_syscall_trace_enter - lwz r0,GPR0(r1) /* Restore original registers */ - lwz r3,GPR3(r1) - lwz r4,GPR4(r1) - lwz r5,GPR5(r1) - lwz r6,GPR6(r1) - lwz r7,GPR7(r1) - lwz r8,GPR8(r1) - REST_NVGPRS(r1) - b syscall_dotrace_cont - -syscall_exit_work: - stw r6,RESULT(r1) /* Save result */ - stw r3,GPR3(r1) /* Update return value */ - andi. r0,r9,_TIF_SYSCALL_T_OR_A - beq 5f - ori r10,r10,MSR_EE - SYNC - MTMSRD(r10) /* re-enable interrupts */ - lwz r4,TRAP(r1) - andi. r4,r4,1 - beq 4f - SAVE_NVGPRS(r1) - li r4,0xc00 - stw r4,TRAP(r1) -4: - addi r3,r1,STACK_FRAME_OVERHEAD - bl do_syscall_trace_leave - REST_NVGPRS(r1) -2: - lwz r3,GPR3(r1) - LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ - SYNC - MTMSRD(r10) /* disable interrupts again */ - rlwinm r12,r1,0,0,18 /* current_thread_info() */ - lwz r9,TI_FLAGS(r12) -5: - andi. r0,r9,_TIF_NEED_RESCHED - bne 1f - lwz r5,_MSR(r1) - andi. r5,r5,MSR_PR - beq syscall_exit_cont - andi. r0,r9,_TIF_SIGPENDING - beq syscall_exit_cont - b do_user_signal -1: - ori r10,r10,MSR_EE - SYNC - MTMSRD(r10) /* re-enable interrupts */ - bl schedule - b 2b - -#ifdef SHOW_SYSCALLS -do_show_syscall: -#ifdef SHOW_SYSCALLS_TASK - lis r11,show_syscalls_task@ha - lwz r11,show_syscalls_task@l(r11) - cmp 0,r2,r11 - bnelr -#endif - stw r31,GPR31(r1) - mflr r31 - lis r3,7f@ha - addi r3,r3,7f@l - lwz r4,GPR0(r1) - lwz r5,GPR3(r1) - lwz r6,GPR4(r1) - lwz r7,GPR5(r1) - lwz r8,GPR6(r1) - lwz r9,GPR7(r1) - bl printk - lis r3,77f@ha - addi r3,r3,77f@l - lwz r4,GPR8(r1) - mr r5,r2 - bl printk - lwz r0,GPR0(r1) - lwz r3,GPR3(r1) - lwz r4,GPR4(r1) - lwz r5,GPR5(r1) - lwz r6,GPR6(r1) - lwz r7,GPR7(r1) - lwz r8,GPR8(r1) - mtlr r31 - lwz r31,GPR31(r1) - blr - -do_show_syscall_exit: -#ifdef SHOW_SYSCALLS_TASK - lis r11,show_syscalls_task@ha - lwz r11,show_syscalls_task@l(r11) - cmp 0,r2,r11 - bnelr -#endif - stw r31,GPR31(r1) - mflr r31 - stw r3,RESULT(r1) /* Save result */ - mr r4,r3 - lis r3,79f@ha - addi r3,r3,79f@l - bl printk - lwz r3,RESULT(r1) - mtlr r31 - lwz r31,GPR31(r1) - blr - -7: .string "syscall %d(%x, %x, %x, %x, %x, " -77: .string "%x), current=%p\n" -79: .string " -> %x\n" - .align 2,0 - -#ifdef SHOW_SYSCALLS_TASK - .data - .globl show_syscalls_task -show_syscalls_task: - .long -1 - .text -#endif -#endif /* SHOW_SYSCALLS */ - -/* - * The sigsuspend and rt_sigsuspend system calls can call do_signal - * and thus put the process into the stopped state where we might - * want to examine its user state with ptrace. Therefore we need - * to save all the nonvolatile registers (r13 - r31) before calling - * the C code. - */ - .globl ppc_sigsuspend -ppc_sigsuspend: - SAVE_NVGPRS(r1) - lwz r0,TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,TRAP(r1) /* register set saved */ - b sys_sigsuspend - - .globl ppc_rt_sigsuspend -ppc_rt_sigsuspend: - SAVE_NVGPRS(r1) - lwz r0,TRAP(r1) - rlwinm r0,r0,0,0,30 - stw r0,TRAP(r1) - b sys_rt_sigsuspend - - .globl ppc_fork -ppc_fork: - SAVE_NVGPRS(r1) - lwz r0,TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,TRAP(r1) /* register set saved */ - b sys_fork - - .globl ppc_vfork -ppc_vfork: - SAVE_NVGPRS(r1) - lwz r0,TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,TRAP(r1) /* register set saved */ - b sys_vfork - - .globl ppc_clone -ppc_clone: - SAVE_NVGPRS(r1) - lwz r0,TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,TRAP(r1) /* register set saved */ - b sys_clone - - .globl ppc_swapcontext -ppc_swapcontext: - SAVE_NVGPRS(r1) - lwz r0,TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,TRAP(r1) /* register set saved */ - b sys_swapcontext - -/* - * Top-level page fault handling. - * This is in assembler because if do_page_fault tells us that - * it is a bad kernel page fault, we want to save the non-volatile - * registers before calling bad_page_fault. - */ - .globl handle_page_fault -handle_page_fault: - stw r4,_DAR(r1) - addi r3,r1,STACK_FRAME_OVERHEAD - bl do_page_fault - cmpwi r3,0 - beq+ ret_from_except - SAVE_NVGPRS(r1) - lwz r0,TRAP(r1) - clrrwi r0,r0,1 - stw r0,TRAP(r1) - mr r5,r3 - addi r3,r1,STACK_FRAME_OVERHEAD - lwz r4,_DAR(r1) - bl bad_page_fault - b ret_from_except_full - -/* - * This routine switches between two different tasks. The process - * state of one is saved on its kernel stack. Then the state - * of the other is restored from its kernel stack. The memory - * management hardware is updated to the second process's state. - * Finally, we can return to the second process. - * On entry, r3 points to the THREAD for the current task, r4 - * points to the THREAD for the new task. - * - * This routine is always called with interrupts disabled. - * - * Note: there are two ways to get to the "going out" portion - * of this code; either by coming in via the entry (_switch) - * or via "fork" which must set up an environment equivalent - * to the "_switch" path. If you change this , you'll have to - * change the fork code also. - * - * The code which creates the new task context is in 'copy_thread' - * in arch/ppc/kernel/process.c - */ -_GLOBAL(_switch) - stwu r1,-INT_FRAME_SIZE(r1) - mflr r0 - stw r0,INT_FRAME_SIZE+4(r1) - /* r3-r12 are caller saved -- Cort */ - SAVE_NVGPRS(r1) - stw r0,_NIP(r1) /* Return to switch caller */ - mfmsr r11 - li r0,MSR_FP /* Disable floating-point */ -#ifdef CONFIG_ALTIVEC -BEGIN_FTR_SECTION - oris r0,r0,MSR_VEC@h /* Disable altivec */ - mfspr r12,SPRN_VRSAVE /* save vrsave register value */ - stw r12,THREAD+THREAD_VRSAVE(r2) -END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) -#endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_SPE - oris r0,r0,MSR_SPE@h /* Disable SPE */ - mfspr r12,SPRN_SPEFSCR /* save spefscr register value */ - stw r12,THREAD+THREAD_SPEFSCR(r2) -#endif /* CONFIG_SPE */ - and. r0,r0,r11 /* FP or altivec or SPE enabled? */ - beq+ 1f - andc r11,r11,r0 - MTMSRD(r11) - isync -1: stw r11,_MSR(r1) - mfcr r10 - stw r10,_CCR(r1) - stw r1,KSP(r3) /* Set old stack pointer */ - -#ifdef CONFIG_SMP - /* We need a sync somewhere here to make sure that if the - * previous task gets rescheduled on another CPU, it sees all - * stores it has performed on this one. - */ - sync -#endif /* CONFIG_SMP */ - - tophys(r0,r4) - CLR_TOP32(r0) - mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */ - lwz r1,KSP(r4) /* Load new stack pointer */ - - /* save the old current 'last' for return value */ - mr r3,r2 - addi r2,r4,-THREAD /* Update current */ - -#ifdef CONFIG_ALTIVEC -BEGIN_FTR_SECTION - lwz r0,THREAD+THREAD_VRSAVE(r2) - mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ -END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) -#endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_SPE - lwz r0,THREAD+THREAD_SPEFSCR(r2) - mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */ -#endif /* CONFIG_SPE */ - - lwz r0,_CCR(r1) - mtcrf 0xFF,r0 - /* r3-r12 are destroyed -- Cort */ - REST_NVGPRS(r1) - - lwz r4,_NIP(r1) /* Return to _switch caller in new task */ - mtlr r4 - addi r1,r1,INT_FRAME_SIZE - blr - - .globl fast_exception_return -fast_exception_return: -#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) - andi. r10,r9,MSR_RI /* check for recoverable interrupt */ - beq 1f /* if not, we've got problems */ -#endif - -2: REST_4GPRS(3, r11) - lwz r10,_CCR(r11) - REST_GPR(1, r11) - mtcr r10 - lwz r10,_LINK(r11) - mtlr r10 - REST_GPR(10, r11) - mtspr SPRN_SRR1,r9 - mtspr SPRN_SRR0,r12 - REST_GPR(9, r11) - REST_GPR(12, r11) - lwz r11,GPR11(r11) - SYNC - RFI - -#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) -/* check if the exception happened in a restartable section */ -1: lis r3,exc_exit_restart_end@ha - addi r3,r3,exc_exit_restart_end@l - cmplw r12,r3 - bge 3f - lis r4,exc_exit_restart@ha - addi r4,r4,exc_exit_restart@l - cmplw r12,r4 - blt 3f - lis r3,fee_restarts@ha - tophys(r3,r3) - lwz r5,fee_restarts@l(r3) - addi r5,r5,1 - stw r5,fee_restarts@l(r3) - mr r12,r4 /* restart at exc_exit_restart */ - b 2b - - .comm fee_restarts,4 - -/* aargh, a nonrecoverable interrupt, panic */ -/* aargh, we don't know which trap this is */ -/* but the 601 doesn't implement the RI bit, so assume it's OK */ -3: -BEGIN_FTR_SECTION - b 2b -END_FTR_SECTION_IFSET(CPU_FTR_601) - li r10,-1 - stw r10,TRAP(r11) - addi r3,r1,STACK_FRAME_OVERHEAD - lis r10,MSR_KERNEL@h - ori r10,r10,MSR_KERNEL@l - bl transfer_to_handler_full - .long nonrecoverable_exception - .long ret_from_except -#endif - - .globl sigreturn_exit -sigreturn_exit: - subi r1,r3,STACK_FRAME_OVERHEAD - rlwinm r12,r1,0,0,18 /* current_thread_info() */ - lwz r9,TI_FLAGS(r12) - andi. r0,r9,_TIF_SYSCALL_T_OR_A - beq+ ret_from_except_full - bl do_syscall_trace_leave - /* fall through */ - - .globl ret_from_except_full -ret_from_except_full: - REST_NVGPRS(r1) - /* fall through */ - - .globl ret_from_except -ret_from_except: - /* Hard-disable interrupts so that current_thread_info()->flags - * can't change between when we test it and when we return - * from the interrupt. */ - LOAD_MSR_KERNEL(r10,MSR_KERNEL) - SYNC /* Some chip revs have problems here... */ - MTMSRD(r10) /* disable interrupts */ - - lwz r3,_MSR(r1) /* Returning to user mode? */ - andi. r0,r3,MSR_PR - beq resume_kernel - -user_exc_return: /* r10 contains MSR_KERNEL here */ - /* Check current_thread_info()->flags */ - rlwinm r9,r1,0,0,18 - lwz r9,TI_FLAGS(r9) - andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED) - bne do_work - -restore_user: -#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) - /* Check whether this process has its own DBCR0 value. The single - step bit tells us that dbcr0 should be loaded. */ - lwz r0,THREAD+THREAD_DBCR0(r2) - andis. r10,r0,DBCR0_IC@h - bnel- load_dbcr0 -#endif - -#ifdef CONFIG_PREEMPT - b restore - -/* N.B. the only way to get here is from the beq following ret_from_except. */ -resume_kernel: - /* check current_thread_info->preempt_count */ - rlwinm r9,r1,0,0,18 - lwz r0,TI_PREEMPT(r9) - cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ - bne restore - lwz r0,TI_FLAGS(r9) - andi. r0,r0,_TIF_NEED_RESCHED - beq+ restore - andi. r0,r3,MSR_EE /* interrupts off? */ - beq restore /* don't schedule if so */ -1: bl preempt_schedule_irq - rlwinm r9,r1,0,0,18 - lwz r3,TI_FLAGS(r9) - andi. r0,r3,_TIF_NEED_RESCHED - bne- 1b -#else -resume_kernel: -#endif /* CONFIG_PREEMPT */ - - /* interrupts are hard-disabled at this point */ -restore: - lwz r0,GPR0(r1) - lwz r2,GPR2(r1) - REST_4GPRS(3, r1) - REST_2GPRS(7, r1) - - lwz r10,_XER(r1) - lwz r11,_CTR(r1) - mtspr SPRN_XER,r10 - mtctr r11 - - PPC405_ERR77(0,r1) - stwcx. r0,0,r1 /* to clear the reservation */ - -#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) - lwz r9,_MSR(r1) - andi. r10,r9,MSR_RI /* check if this exception occurred */ - beql nonrecoverable /* at a bad place (MSR:RI = 0) */ - - lwz r10,_CCR(r1) - lwz r11,_LINK(r1) - mtcrf 0xFF,r10 - mtlr r11 - - /* - * Once we put values in SRR0 and SRR1, we are in a state - * where exceptions are not recoverable, since taking an - * exception will trash SRR0 and SRR1. Therefore we clear the - * MSR:RI bit to indicate this. If we do take an exception, - * we can't return to the point of the exception but we - * can restart the exception exit path at the label - * exc_exit_restart below. -- paulus - */ - LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI) - SYNC - MTMSRD(r10) /* clear the RI bit */ - .globl exc_exit_restart -exc_exit_restart: - lwz r9,_MSR(r1) - lwz r12,_NIP(r1) - FIX_SRR1(r9,r10) - mtspr SPRN_SRR0,r12 - mtspr SPRN_SRR1,r9 - REST_4GPRS(9, r1) - lwz r1,GPR1(r1) - .globl exc_exit_restart_end -exc_exit_restart_end: - SYNC - RFI - -#else /* !(CONFIG_4xx || CONFIG_BOOKE) */ - /* - * This is a bit different on 4xx/Book-E because it doesn't have - * the RI bit in the MSR. - * The TLB miss handler checks if we have interrupted - * the exception exit path and restarts it if so - * (well maybe one day it will... :). - */ - lwz r11,_LINK(r1) - mtlr r11 - lwz r10,_CCR(r1) - mtcrf 0xff,r10 - REST_2GPRS(9, r1) - .globl exc_exit_restart -exc_exit_restart: - lwz r11,_NIP(r1) - lwz r12,_MSR(r1) -exc_exit_start: - mtspr SPRN_SRR0,r11 - mtspr SPRN_SRR1,r12 - REST_2GPRS(11, r1) - lwz r1,GPR1(r1) - .globl exc_exit_restart_end -exc_exit_restart_end: - PPC405_ERR77_SYNC - rfi - b . /* prevent prefetch past rfi */ - -/* - * Returning from a critical interrupt in user mode doesn't need - * to be any different from a normal exception. For a critical - * interrupt in the kernel, we just return (without checking for - * preemption) since the interrupt may have happened at some crucial - * place (e.g. inside the TLB miss handler), and because we will be - * running with r1 pointing into critical_stack, not the current - * process's kernel stack (and therefore current_thread_info() will - * give the wrong answer). - * We have to restore various SPRs that may have been in use at the - * time of the critical interrupt. - * - */ -#ifdef CONFIG_40x -#define PPC_40x_TURN_OFF_MSR_DR \ - /* avoid any possible TLB misses here by turning off MSR.DR, we \ - * assume the instructions here are mapped by a pinned TLB entry */ \ - li r10,MSR_IR; \ - mtmsr r10; \ - isync; \ - tophys(r1, r1); -#else -#define PPC_40x_TURN_OFF_MSR_DR -#endif - -#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \ - REST_NVGPRS(r1); \ - lwz r3,_MSR(r1); \ - andi. r3,r3,MSR_PR; \ - LOAD_MSR_KERNEL(r10,MSR_KERNEL); \ - bne user_exc_return; \ - lwz r0,GPR0(r1); \ - lwz r2,GPR2(r1); \ - REST_4GPRS(3, r1); \ - REST_2GPRS(7, r1); \ - lwz r10,_XER(r1); \ - lwz r11,_CTR(r1); \ - mtspr SPRN_XER,r10; \ - mtctr r11; \ - PPC405_ERR77(0,r1); \ - stwcx. r0,0,r1; /* to clear the reservation */ \ - lwz r11,_LINK(r1); \ - mtlr r11; \ - lwz r10,_CCR(r1); \ - mtcrf 0xff,r10; \ - PPC_40x_TURN_OFF_MSR_DR; \ - lwz r9,_DEAR(r1); \ - lwz r10,_ESR(r1); \ - mtspr SPRN_DEAR,r9; \ - mtspr SPRN_ESR,r10; \ - lwz r11,_NIP(r1); \ - lwz r12,_MSR(r1); \ - mtspr exc_lvl_srr0,r11; \ - mtspr exc_lvl_srr1,r12; \ - lwz r9,GPR9(r1); \ - lwz r12,GPR12(r1); \ - lwz r10,GPR10(r1); \ - lwz r11,GPR11(r1); \ - lwz r1,GPR1(r1); \ - PPC405_ERR77_SYNC; \ - exc_lvl_rfi; \ - b .; /* prevent prefetch past exc_lvl_rfi */ - - .globl ret_from_crit_exc -ret_from_crit_exc: - RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI) - -#ifdef CONFIG_BOOKE - .globl ret_from_debug_exc -ret_from_debug_exc: - RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI) - - .globl ret_from_mcheck_exc -ret_from_mcheck_exc: - RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI) -#endif /* CONFIG_BOOKE */ - -/* - * Load the DBCR0 value for a task that is being ptraced, - * having first saved away the global DBCR0. Note that r0 - * has the dbcr0 value to set upon entry to this. - */ -load_dbcr0: - mfmsr r10 /* first disable debug exceptions */ - rlwinm r10,r10,0,~MSR_DE - mtmsr r10 - isync - mfspr r10,SPRN_DBCR0 - lis r11,global_dbcr0@ha - addi r11,r11,global_dbcr0@l - stw r10,0(r11) - mtspr SPRN_DBCR0,r0 - lwz r10,4(r11) - addi r10,r10,1 - stw r10,4(r11) - li r11,-1 - mtspr SPRN_DBSR,r11 /* clear all pending debug events */ - blr - - .comm global_dbcr0,8 -#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ - -do_work: /* r10 contains MSR_KERNEL here */ - andi. r0,r9,_TIF_NEED_RESCHED - beq do_user_signal - -do_resched: /* r10 contains MSR_KERNEL here */ - ori r10,r10,MSR_EE - SYNC - MTMSRD(r10) /* hard-enable interrupts */ - bl schedule -recheck: - LOAD_MSR_KERNEL(r10,MSR_KERNEL) - SYNC - MTMSRD(r10) /* disable interrupts */ - rlwinm r9,r1,0,0,18 - lwz r9,TI_FLAGS(r9) - andi. r0,r9,_TIF_NEED_RESCHED - bne- do_resched - andi. r0,r9,_TIF_SIGPENDING - beq restore_user -do_user_signal: /* r10 contains MSR_KERNEL here */ - ori r10,r10,MSR_EE - SYNC - MTMSRD(r10) /* hard-enable interrupts */ - /* save r13-r31 in the exception frame, if not already done */ - lwz r3,TRAP(r1) - andi. r0,r3,1 - beq 2f - SAVE_NVGPRS(r1) - rlwinm r3,r3,0,0,30 - stw r3,TRAP(r1) -2: li r3,0 - addi r4,r1,STACK_FRAME_OVERHEAD - bl do_signal - REST_NVGPRS(r1) - b recheck - -/* - * We come here when we are at the end of handling an exception - * that occurred at a place where taking an exception will lose - * state information, such as the contents of SRR0 and SRR1. - */ -nonrecoverable: - lis r10,exc_exit_restart_end@ha - addi r10,r10,exc_exit_restart_end@l - cmplw r12,r10 - bge 3f - lis r11,exc_exit_restart@ha - addi r11,r11,exc_exit_restart@l - cmplw r12,r11 - blt 3f - lis r10,ee_restarts@ha - lwz r12,ee_restarts@l(r10) - addi r12,r12,1 - stw r12,ee_restarts@l(r10) - mr r12,r11 /* restart at exc_exit_restart */ - blr -3: /* OK, we can't recover, kill this process */ - /* but the 601 doesn't implement the RI bit, so assume it's OK */ -BEGIN_FTR_SECTION - blr -END_FTR_SECTION_IFSET(CPU_FTR_601) - lwz r3,TRAP(r1) - andi. r0,r3,1 - beq 4f - SAVE_NVGPRS(r1) - rlwinm r3,r3,0,0,30 - stw r3,TRAP(r1) -4: addi r3,r1,STACK_FRAME_OVERHEAD - bl nonrecoverable_exception - /* shouldn't return */ - b 4b - - .comm ee_restarts,4 - -/* - * PROM code for specific machines follows. Put it - * here so it's easy to add arch-specific sections later. - * -- Cort - */ -#ifdef CONFIG_PPC_OF -/* - * On CHRP, the Run-Time Abstraction Services (RTAS) have to be - * called with the MMU off. - */ -_GLOBAL(enter_rtas) - stwu r1,-INT_FRAME_SIZE(r1) - mflr r0 - stw r0,INT_FRAME_SIZE+4(r1) - lis r4,rtas_data@ha - lwz r4,rtas_data@l(r4) - lis r6,1f@ha /* physical return address for rtas */ - addi r6,r6,1f@l - tophys(r6,r6) - tophys(r7,r1) - lis r8,rtas_entry@ha - lwz r8,rtas_entry@l(r8) - mfmsr r9 - stw r9,8(r1) - LOAD_MSR_KERNEL(r0,MSR_KERNEL) - SYNC /* disable interrupts so SRR0/1 */ - MTMSRD(r0) /* don't get trashed */ - li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR) - mtlr r6 - CLR_TOP32(r7) - mtspr SPRN_SPRG2,r7 - mtspr SPRN_SRR0,r8 - mtspr SPRN_SRR1,r9 - RFI -1: tophys(r9,r1) - lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */ - lwz r9,8(r9) /* original msr value */ - FIX_SRR1(r9,r0) - addi r1,r1,INT_FRAME_SIZE - li r0,0 - mtspr SPRN_SPRG2,r0 - mtspr SPRN_SRR0,r8 - mtspr SPRN_SRR1,r9 - RFI /* return to caller */ - - .globl machine_check_in_rtas -machine_check_in_rtas: - twi 31,0,0 - /* XXX load up BATs and panic */ - -#endif /* CONFIG_PPC_OF */ diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S deleted file mode 100644 index c5a890dca9c..00000000000 --- a/arch/ppc/kernel/head.S +++ /dev/null @@ -1,1539 +0,0 @@ -/* - * PowerPC version - * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) - * - * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP - * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> - * Adapted for Power Macintosh by Paul Mackerras. - * Low-level exception handlers and MMU support - * rewritten by Paul Mackerras. - * Copyright (C) 1996 Paul Mackerras. - * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). - * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). - * - * This file contains the low-level support and setup for the - * PowerPC platform, including trap and interrupt dispatch. - * (The PPC 8xx embedded CPUs use head_8xx.S instead.) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include <linux/config.h> -#include <asm/processor.h> -#include <asm/page.h> -#include <asm/mmu.h> -#include <asm/pgtable.h> -#include <asm/cputable.h> -#include <asm/cache.h> -#include <asm/thread_info.h> -#include <asm/ppc_asm.h> -#include <asm/asm-offsets.h> - -#ifdef CONFIG_APUS -#include <asm/amigappc.h> -#endif - -#ifdef CONFIG_PPC64BRIDGE -#define LOAD_BAT(n, reg, RA, RB) \ - ld RA,(n*32)+0(reg); \ - ld RB,(n*32)+8(reg); \ - mtspr SPRN_IBAT##n##U,RA; \ - mtspr SPRN_IBAT##n##L,RB; \ - ld RA,(n*32)+16(reg); \ - ld RB,(n*32)+24(reg); \ - mtspr SPRN_DBAT##n##U,RA; \ - mtspr SPRN_DBAT##n##L,RB; \ - -#else /* CONFIG_PPC64BRIDGE */ - -/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ -#define LOAD_BAT(n, reg, RA, RB) \ - /* see the comment for clear_bats() -- Cort */ \ - li RA,0; \ - mtspr SPRN_IBAT##n##U,RA; \ - mtspr SPRN_DBAT##n##U,RA; \ - lwz RA,(n*16)+0(reg); \ - lwz RB,(n*16)+4(reg); \ - mtspr SPRN_IBAT##n##U,RA; \ - mtspr SPRN_IBAT##n##L,RB; \ - beq 1f; \ - lwz RA,(n*16)+8(reg); \ - lwz RB,(n*16)+12(reg); \ - mtspr SPRN_DBAT##n##U,RA; \ - mtspr SPRN_DBAT##n##L,RB; \ -1: -#endif /* CONFIG_PPC64BRIDGE */ - - .text - .stabs "arch/ppc/kernel/",N_SO,0,0,0f - .stabs "head.S",N_SO,0,0,0f -0: - .globl _stext -_stext: - -/* - * _start is defined this way because the XCOFF loader in the OpenFirmware - * on the powermac expects the entry point to be a procedure descriptor. - */ - .text - .globl _start -_start: - /* - * These are here for legacy reasons, the kernel used to - * need to look like a coff function entry for the pmac - * but we're always started by some kind of bootloader now. - * -- Cort - */ - nop /* used by __secondary_hold on prep (mtx) and chrp smp */ - nop /* used by __secondary_hold on prep (mtx) and chrp smp */ - nop - -/* PMAC - * Enter here with the kernel text, data and bss loaded starting at - * 0, running with virtual == physical mapping. - * r5 points to the prom entry point (the client interface handler - * address). Address translation is turned on, with the prom - * managing the hash table. Interrupts are disabled. The stack - * pointer (r1) points to just below the end of the half-meg region - * from 0x380000 - 0x400000, which is mapped in already. - * - * If we are booted from MacOS via BootX, we enter with the kernel - * image loaded somewhere, and the following values in registers: - * r3: 'BooX' (0x426f6f58) - * r4: virtual address of boot_infos_t - * r5: 0 - * - * APUS - * r3: 'APUS' - * r4: physical address of memory base - * Linux/m68k style BootInfo structure at &_end. - * - * PREP - * This is jumped to on prep systems right after the kernel is relocated - * to its proper place in memory by the boot loader. The expected layout - * of the regs is: - * r3: ptr to residual data - * r4: initrd_start or if no initrd then 0 - * r5: initrd_end - unused if r4 is 0 - * r6: Start of command line string - * r7: End of command line string - * - * This just gets a minimal mmu environment setup so we can call - * start_here() to do the real work. - * -- Cort - */ - - .globl __start -__start: -/* - * We have to do any OF calls before we map ourselves to KERNELBASE, - * because OF may have I/O devices mapped into that area - * (particularly on CHRP). - */ - mr r31,r3 /* save parameters */ - mr r30,r4 - mr r29,r5 - mr r28,r6 - mr r27,r7 - li r24,0 /* cpu # */ - -/* - * early_init() does the early machine identification and does - * the necessary low-level setup and clears the BSS - * -- Cort <cort@fsmlabs.com> - */ - bl early_init - -/* - * On POWER4, we first need to tweak some CPU configuration registers - * like real mode cache inhibit or exception base - */ -#ifdef CONFIG_POWER4 - bl __970_cpu_preinit -#endif /* CONFIG_POWER4 */ - -#ifdef CONFIG_APUS -/* On APUS the __va/__pa constants need to be set to the correct - * values before continuing. - */ - mr r4,r30 - bl fix_mem_constants -#endif /* CONFIG_APUS */ - -/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains - * the physical address we are running at, returned by early_init() - */ - bl mmu_off -__after_mmu_off: -#ifndef CONFIG_POWER4 - bl clear_bats - bl flush_tlbs - - bl initial_bats -#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) - bl setup_disp_bat -#endif -#else /* CONFIG_POWER4 */ - bl reloc_offset - bl initial_mm_power4 -#endif /* CONFIG_POWER4 */ - -/* - * Call setup_cpu for CPU 0 and initialize 6xx Idle - */ - bl reloc_offset - li r24,0 /* cpu# */ - bl call_setup_cpu /* Call setup_cpu for this CPU */ -#ifdef CONFIG_6xx - bl reloc_offset - bl init_idle_6xx -#endif /* CONFIG_6xx */ -#ifdef CONFIG_POWER4 - bl reloc_offset - bl init_idle_power4 -#endif /* CONFIG_POWER4 */ - - -#ifndef CONFIG_APUS -/* - * We need to run with _start at physical address 0. - * On CHRP, we are loaded at 0x10000 since OF on CHRP uses - * the exception vectors at 0 (and therefore this copy - * overwrites OF's exception vectors with our own). - * If the MMU is already turned on, we copy stuff to KERNELBASE, - * otherwise we copy it to 0. - */ - bl reloc_offset - mr r26,r3 - addis r4,r3,KERNELBASE@h /* current address of _start */ - cmpwi 0,r4,0 /* are we already running at 0? */ - bne relocate_kernel -#endif /* CONFIG_APUS */ -/* - * we now have the 1st 16M of ram mapped with the bats. - * prep needs the mmu to be turned on here, but pmac already has it on. - * this shouldn't bother the pmac since it just gets turned on again - * as we jump to our code at KERNELBASE. -- Cort - * Actually no, pmac doesn't have it on any more. BootX enters with MMU - * off, and in other cases, we now turn it off before changing BATs above. - */ -turn_on_mmu: - mfmsr r0 - ori r0,r0,MSR_DR|MSR_IR - mtspr SPRN_SRR1,r0 - lis r0,start_here@h - ori r0,r0,start_here@l - mtspr SPRN_SRR0,r0 - SYNC - RFI /* enables MMU */ - -/* - * We need __secondary_hold as a place to hold the other cpus on - * an SMP machine, even when we are running a UP kernel. - */ - . = 0xc0 /* for prep bootloader */ - li r3,1 /* MTX only has 1 cpu */ - .globl __secondary_hold -__secondary_hold: - /* tell the master we're here */ - stw r3,4(0) -#ifdef CONFIG_SMP -100: lwz r4,0(0) - /* wait until we're told to start */ - cmpw 0,r4,r3 - bne 100b - /* our cpu # was at addr 0 - go */ - mr r24,r3 /* cpu # */ - b __secondary_start -#else - b . -#endif /* CONFIG_SMP */ - -/* - * Exception entry code. This code runs with address translation - * turned off, i.e. using physical addresses. - * We assume sprg3 has the physical address of the current - * task's thread_struct. - */ -#define EXCEPTION_PROLOG \ - mtspr SPRN_SPRG0,r10; \ - mtspr SPRN_SPRG1,r11; \ - mfcr r10; \ - EXCEPTION_PROLOG_1; \ - EXCEPTION_PROLOG_2 - -#define EXCEPTION_PROLOG_1 \ - mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \ - andi. r11,r11,MSR_PR; \ - tophys(r11,r1); /* use tophys(r1) if kernel */ \ - beq 1f; \ - mfspr r11,SPRN_SPRG3; \ - lwz r11,THREAD_INFO-THREAD(r11); \ - addi r11,r11,THREAD_SIZE; \ - tophys(r11,r11); \ -1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */ - - -#define EXCEPTION_PROLOG_2 \ - CLR_TOP32(r11); \ - stw r10,_CCR(r11); /* save registers */ \ - stw r12,GPR12(r11); \ - stw r9,GPR9(r11); \ - mfspr r10,SPRN_SPRG0; \ - stw r10,GPR10(r11); \ - mfspr r12,SPRN_SPRG1; \ - stw r12,GPR11(r11); \ - mflr r10; \ - stw r10,_LINK(r11); \ - mfspr r12,SPRN_SRR0; \ - mfspr r9,SPRN_SRR1; \ - stw r1,GPR1(r11); \ - stw r1,0(r11); \ - tovirt(r1,r11); /* set new kernel sp */ \ - li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \ - MTMSRD(r10); /* (except for mach check in rtas) */ \ - stw r0,GPR0(r11); \ - SAVE_4GPRS(3, r11); \ - SAVE_2GPRS(7, r11) - -/* - * Note: code which follows this uses cr0.eq (set if from kernel), - * r11, r12 (SRR0), and r9 (SRR1). - * - * Note2: once we have set r1 we are in a position to take exceptions - * again, and we could thus set MSR:RI at that point. - */ - -/* - * Exception vectors. - */ -#define EXCEPTION(n, label, hdlr, xfer) \ - . = n; \ -label: \ - EXCEPTION_PROLOG; \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - xfer(n, hdlr) - -#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \ - li r10,trap; \ - stw r10,TRAP(r11); \ - li r10,MSR_KERNEL; \ - copyee(r10, r9); \ - bl tfer; \ -i##n: \ - .long hdlr; \ - .long ret - -#define COPY_EE(d, s) rlwimi d,s,0,16,16 -#define NOCOPY(d, s) - -#define EXC_XFER_STD(n, hdlr) \ - EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \ - ret_from_except_full) - -#define EXC_XFER_LITE(n, hdlr) \ - EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \ - ret_from_except) - -#define EXC_XFER_EE(n, hdlr) \ - EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \ - ret_from_except_full) - -#define EXC_XFER_EE_LITE(n, hdlr) \ - EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \ - ret_from_except) - -/* System reset */ -/* core99 pmac starts the seconary here by changing the vector, and - putting it back to what it was (unknown_exception) when done. */ -#if defined(CONFIG_GEMINI) && defined(CONFIG_SMP) - . = 0x100 - b __secondary_start_gemini -#else - EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD) -#endif - -/* Machine check */ -/* - * On CHRP, this is complicated by the fact that we could get a - * machine check inside RTAS, and we have no guarantee that certain - * critical registers will have the values we expect. The set of - * registers that might have bad values includes all the GPRs - * and all the BATs. We indicate that we are in RTAS by putting - * a non-zero value, the address of the exception frame to use, - * in SPRG2. The machine check handler checks SPRG2 and uses its - * value if it is non-zero. If we ever needed to free up SPRG2, - * we could use a field in the thread_info or thread_struct instead. - * (Other exception handlers assume that r1 is a valid kernel stack - * pointer when we take an exception from supervisor mode.) - * -- paulus. - */ - . = 0x200 - mtspr SPRN_SPRG0,r10 - mtspr SPRN_SPRG1,r11 - mfcr r10 -#ifdef CONFIG_PPC_CHRP - mfspr r11,SPRN_SPRG2 - cmpwi 0,r11,0 - bne 7f -#endif /* CONFIG_PPC_CHRP */ - EXCEPTION_PROLOG_1 -7: EXCEPTION_PROLOG_2 - addi r3,r1,STACK_FRAME_OVERHEAD -#ifdef CONFIG_PPC_CHRP - mfspr r4,SPRN_SPRG2 - cmpwi cr1,r4,0 - bne cr1,1f -#endif - EXC_XFER_STD(0x200, machine_check_exception) -#ifdef CONFIG_PPC_CHRP -1: b machine_check_in_rtas -#endif - -/* Data access exception. */ - . = 0x300 -#ifdef CONFIG_PPC64BRIDGE - b DataAccess -DataAccessCont: -#else -DataAccess: - EXCEPTION_PROLOG -#endif /* CONFIG_PPC64BRIDGE */ - mfspr r10,SPRN_DSISR - andis. r0,r10,0xa470 /* weird error? */ - bne 1f /* if not, try to put a PTE */ - mfspr r4,SPRN_DAR /* into the hash table */ - rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ - bl hash_page -1: stw r10,_DSISR(r11) - mr r5,r10 - mfspr r4,SPRN_DAR - EXC_XFER_EE_LITE(0x300, handle_page_fault) - -#ifdef CONFIG_PPC64BRIDGE -/* SLB fault on data access. */ - . = 0x380 - b DataSegment -#endif /* CONFIG_PPC64BRIDGE */ - -/* Instruction access exception. */ - . = 0x400 -#ifdef CONFIG_PPC64BRIDGE - b InstructionAccess -InstructionAccessCont: -#else -InstructionAccess: - EXCEPTION_PROLOG -#endif /* CONFIG_PPC64BRIDGE */ - andis. r0,r9,0x4000 /* no pte found? */ - beq 1f /* if so, try to put a PTE */ - li r3,0 /* into the hash table */ - mr r4,r12 /* SRR0 is fault address */ - bl hash_page -1: mr r4,r12 - mr r5,r9 - EXC_XFER_EE_LITE(0x400, handle_page_fault) - -#ifdef CONFIG_PPC64BRIDGE -/* SLB fault on instruction access. */ - . = 0x480 - b InstructionSegment -#endif /* CONFIG_PPC64BRIDGE */ - -/* External interrupt */ - EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) - -/* Alignment exception */ - . = 0x600 -Alignment: - EXCEPTION_PROLOG - mfspr r4,SPRN_DAR - stw r4,_DAR(r11) - mfspr r5,SPRN_DSISR - stw r5,_DSISR(r11) - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_EE(0x600, alignment_exception) - -/* Program check exception */ - EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD) - -/* Floating-point unavailable */ - . = 0x800 -FPUnavailable: - EXCEPTION_PROLOG - bne load_up_fpu /* if from user, just load it up */ - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception) - -/* Decrementer */ - EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) - - EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE) - EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE) - -/* System call */ - . = 0xc00 -SystemCall: - EXCEPTION_PROLOG - EXC_XFER_EE_LITE(0xc00, DoSyscall) - -/* Single step - not used on 601 */ - EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD) - EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE) - -/* - * The Altivec unavailable trap is at 0x0f20. Foo. - * We effectively remap it to 0x3000. - * We include an altivec unavailable exception vector even if - * not configured for Altivec, so that you can't panic a - * non-altivec kernel running on a machine with altivec just - * by executing an altivec instruction. - */ - . = 0xf00 - b Trap_0f - - . = 0xf20 - b AltiVecUnavailable - -Trap_0f: - EXCEPTION_PROLOG - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_EE(0xf00, unknown_exception) - -/* - * Handle TLB miss for instruction on 603/603e. - * Note: we get an alternate set of r0 - r3 to use automatically. - */ - . = 0x1000 -InstructionTLBMiss: -/* - * r0: stored ctr - * r1: linux style pte ( later becomes ppc hardware pte ) - * r2: ptr to linux-style pte - * r3: scratch - */ - mfctr r0 - /* Get PTE (linux-style) and check access */ - mfspr r3,SPRN_IMISS - lis r1,KERNELBASE@h /* check if kernel address */ - cmplw 0,r3,r1 - mfspr r2,SPRN_SPRG3 - li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ - lwz r2,PGDIR(r2) - blt+ 112f - lis r2,swapper_pg_dir@ha /* if kernel address, use */ - addi r2,r2,swapper_pg_dir@l /* kernel page table */ - mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ - rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ -112: tophys(r2,r2) - rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ - lwz r2,0(r2) /* get pmd entry */ - rlwinm. r2,r2,0,0,19 /* extract address of pte page */ - beq- InstructionAddressInvalid /* return if no mapping */ - rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ - lwz r3,0(r2) /* get linux-style pte */ - andc. r1,r1,r3 /* check access & ~permission */ - bne- InstructionAddressInvalid /* return if access not permitted */ - ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ - /* - * NOTE! We are assuming this is not an SMP system, otherwise - * we would need to update the pte atomically with lwarx/stwcx. - */ - stw r3,0(r2) /* update PTE (accessed bit) */ - /* Convert linux-style PTE to low word of PPC-style PTE */ - rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ - rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ - and r1,r1,r2 /* writable if _RW and _DIRTY */ - rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ - rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ - ori r1,r1,0xe14 /* clear out reserved bits and M */ - andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ - mtspr SPRN_RPA,r1 - mfspr r3,SPRN_IMISS - tlbli r3 - mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ - mtcrf 0x80,r3 - rfi -InstructionAddressInvalid: - mfspr r3,SPRN_SRR1 - rlwinm r1,r3,9,6,6 /* Get load/store bit */ - - addis r1,r1,0x2000 - mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */ - mtctr r0 /* Restore CTR */ - andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ - or r2,r2,r1 - mtspr SPRN_SRR1,r2 - mfspr r1,SPRN_IMISS /* Get failing address */ - rlwinm. r2,r2,0,31,31 /* Check for little endian access */ - rlwimi r2,r2,1,30,30 /* change 1 -> 3 */ - xor r1,r1,r2 - mtspr SPRN_DAR,r1 /* Set fault address */ - mfmsr r0 /* Restore "normal" registers */ - xoris r0,r0,MSR_TGPR>>16 - mtcrf 0x80,r3 /* Restore CR0 */ - mtmsr r0 - b InstructionAccess - -/* - * Handle TLB miss for DATA Load operation on 603/603e - */ - . = 0x1100 -DataLoadTLBMiss: -/* - * r0: stored ctr - * r1: linux style pte ( later becomes ppc hardware pte ) - * r2: ptr to linux-style pte - * r3: scratch - */ - mfctr r0 - /* Get PTE (linux-style) and check access */ - mfspr r3,SPRN_DMISS - lis r1,KERNELBASE@h /* check if kernel address */ - cmplw 0,r3,r1 - mfspr r2,SPRN_SPRG3 - li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ - lwz r2,PGDIR(r2) - blt+ 112f - lis r2,swapper_pg_dir@ha /* if kernel address, use */ - addi r2,r2,swapper_pg_dir@l /* kernel page table */ - mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ - rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ -112: tophys(r2,r2) - rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ - lwz r2,0(r2) /* get pmd entry */ - rlwinm. r2,r2,0,0,19 /* extract address of pte page */ - beq- DataAddressInvalid /* return if no mapping */ - rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ - lwz r3,0(r2) /* get linux-style pte */ - andc. r1,r1,r3 /* check access & ~permission */ - bne- DataAddressInvalid /* return if access not permitted */ - ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ - /* - * NOTE! We are assuming this is not an SMP system, otherwise - * we would need to update the pte atomically with lwarx/stwcx. - */ - stw r3,0(r2) /* update PTE (accessed bit) */ - /* Convert linux-style PTE to low word of PPC-style PTE */ - rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ - rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ - and r1,r1,r2 /* writable if _RW and _DIRTY */ - rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ - rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ - ori r1,r1,0xe14 /* clear out reserved bits and M */ - andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ - mtspr SPRN_RPA,r1 - mfspr r3,SPRN_DMISS - tlbld r3 - mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ - mtcrf 0x80,r3 - rfi -DataAddressInvalid: - mfspr r3,SPRN_SRR1 - rlwinm r1,r3,9,6,6 /* Get load/store bit */ - addis r1,r1,0x2000 - mtspr SPRN_DSISR,r1 - mtctr r0 /* Restore CTR */ - andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ - mtspr SPRN_SRR1,r2 - mfspr r1,SPRN_DMISS /* Get failing address */ - rlwinm. r2,r2,0,31,31 /* Check for little endian access */ - beq 20f /* Jump if big endian */ - xori r1,r1,3 -20: mtspr SPRN_DAR,r1 /* Set fault address */ - mfmsr r0 /* Restore "normal" registers */ - xoris r0,r0,MSR_TGPR>>16 - mtcrf 0x80,r3 /* Restore CR0 */ - mtmsr r0 - b DataAccess - -/* - * Handle TLB miss for DATA Store on 603/603e - */ - . = 0x1200 -DataStoreTLBMiss: -/* - * r0: stored ctr - * r1: linux style pte ( later becomes ppc hardware pte ) - * r2: ptr to linux-style pte - * r3: scratch - */ - mfctr r0 - /* Get PTE (linux-style) and check access */ - mfspr r3,SPRN_DMISS - lis r1,KERNELBASE@h /* check if kernel address */ - cmplw 0,r3,r1 - mfspr r2,SPRN_SPRG3 - li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */ - lwz r2,PGDIR(r2) - blt+ 112f - lis r2,swapper_pg_dir@ha /* if kernel address, use */ - addi r2,r2,swapper_pg_dir@l /* kernel page table */ - mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ - rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ -112: tophys(r2,r2) - rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ - lwz r2,0(r2) /* get pmd entry */ - rlwinm. r2,r2,0,0,19 /* extract address of pte page */ - beq- DataAddressInvalid /* return if no mapping */ - rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ - lwz r3,0(r2) /* get linux-style pte */ - andc. r1,r1,r3 /* check access & ~permission */ - bne- DataAddressInvalid /* return if access not permitted */ - ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY - /* - * NOTE! We are assuming this is not an SMP system, otherwise - * we would need to update the pte atomically with lwarx/stwcx. - */ - stw r3,0(r2) /* update PTE (accessed/dirty bits) */ - /* Convert linux-style PTE to low word of PPC-style PTE */ - rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ - li r1,0xe15 /* clear out reserved bits and M */ - andc r1,r3,r1 /* PP = user? 2: 0 */ - mtspr SPRN_RPA,r1 - mfspr r3,SPRN_DMISS - tlbld r3 - mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ - mtcrf 0x80,r3 - rfi - -#ifndef CONFIG_ALTIVEC -#define altivec_assist_exception unknown_exception -#endif - - EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE) - EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE) - EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE) -#ifdef CONFIG_POWER4 - EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1700, Trap_17, altivec_assist_exception, EXC_XFER_EE) - EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD) -#else /* !CONFIG_POWER4 */ - EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE) - EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD) - EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE) -#endif /* CONFIG_POWER4 */ - EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE) - EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE) - - .globl mol_trampoline - .set mol_trampoline, i0x2f00 - - . = 0x3000 - -AltiVecUnavailable: - EXCEPTION_PROLOG -#ifdef CONFIG_ALTIVEC - bne load_up_altivec /* if from user, just load it up */ -#endif /* CONFIG_ALTIVEC */ - EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception) - -#ifdef CONFIG_PPC64BRIDGE -DataAccess: - EXCEPTION_PROLOG - b DataAccessCont - -InstructionAccess: - EXCEPTION_PROLOG - b InstructionAccessCont - -DataSegment: - EXCEPTION_PROLOG - addi r3,r1,STACK_FRAME_OVERHEAD - mfspr r4,SPRN_DAR - stw r4,_DAR(r11) - EXC_XFER_STD(0x380, unknown_exception) - -InstructionSegment: - EXCEPTION_PROLOG - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_STD(0x480, unknown_exception) -#endif /* CONFIG_PPC64BRIDGE */ - -#ifdef CONFIG_ALTIVEC -/* Note that the AltiVec support is closely modeled after the FP - * support. Changes to one are likely to be applicable to the - * other! */ -load_up_altivec: -/* - * Disable AltiVec for the task which had AltiVec previously, - * and save its AltiVec registers in its thread_struct. - * Enables AltiVec for use in the kernel on return. - * On SMP we know the AltiVec units are free, since we give it up every - * switch. -- Kumar - */ - mfmsr r5 - oris r5,r5,MSR_VEC@h - MTMSRD(r5) /* enable use of AltiVec now */ - isync -/* - * For SMP, we don't do lazy AltiVec switching because it just gets too - * horrendously complex, especially when a task switches from one CPU - * to another. Instead we call giveup_altivec in switch_to. - */ -#ifndef CONFIG_SMP - tophys(r6,0) - addis r3,r6,last_task_used_altivec@ha - lwz r4,last_task_used_altivec@l(r3) - cmpwi 0,r4,0 - beq 1f - add r4,r4,r6 - addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */ - SAVE_32VRS(0,r10,r4) - mfvscr vr0 - li r10,THREAD_VSCR - stvx vr0,r10,r4 - lwz r5,PT_REGS(r4) - add r5,r5,r6 - lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) - lis r10,MSR_VEC@h - andc r4,r4,r10 /* disable altivec for previous task */ - stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) -1: -#endif /* CONFIG_SMP */ - /* enable use of AltiVec after return */ - oris r9,r9,MSR_VEC@h - mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ - li r4,1 - li r10,THREAD_VSCR - stw r4,THREAD_USED_VR(r5) - lvx vr0,r10,r5 - mtvscr vr0 - REST_32VRS(0,r10,r5) -#ifndef CONFIG_SMP - subi r4,r5,THREAD - sub r4,r4,r6 - stw r4,last_task_used_altivec@l(r3) -#endif /* CONFIG_SMP */ - /* restore registers and return */ - /* we haven't used ctr or xer or lr */ - b fast_exception_return - -/* - * AltiVec unavailable trap from kernel - print a message, but let - * the task use AltiVec in the kernel until it returns to user mode. - */ -KernelAltiVec: - lwz r3,_MSR(r1) - oris r3,r3,MSR_VEC@h - stw r3,_MSR(r1) /* enable use of AltiVec after return */ - lis r3,87f@h - ori r3,r3,87f@l - mr r4,r2 /* current */ - lwz r5,_NIP(r1) - bl printk - b ret_from_except -87: .string "AltiVec used in kernel (task=%p, pc=%x) \n" - .align 4,0 - -/* - * giveup_altivec(tsk) - * Disable AltiVec for the task given as the argument, - * and save the AltiVec registers in its thread_struct. - * Enables AltiVec for use in the kernel on return. - */ - - .globl giveup_altivec -giveup_altivec: - mfmsr r5 - oris r5,r5,MSR_VEC@h - SYNC - MTMSRD(r5) /* enable use of AltiVec now */ - isync - cmpwi 0,r3,0 - beqlr- /* if no previous owner, done */ - addi r3,r3,THREAD /* want THREAD of task */ - lwz r5,PT_REGS(r3) - cmpwi 0,r5,0 - SAVE_32VRS(0, r4, r3) - mfvscr vr0 - li r4,THREAD_VSCR - stvx vr0,r4,r3 - beq 1f - lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) - lis r3,MSR_VEC@h - andc r4,r4,r3 /* disable AltiVec for previous task */ - stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) -1: -#ifndef CONFIG_SMP - li r5,0 - lis r4,last_task_used_altivec@ha - stw r5,last_task_used_altivec@l(r4) -#endif /* CONFIG_SMP */ - blr -#endif /* CONFIG_ALTIVEC */ - -/* - * This code is jumped to from the startup code to copy - * the kernel image to physical address 0. - */ -relocate_kernel: - addis r9,r26,klimit@ha /* fetch klimit */ - lwz r25,klimit@l(r9) - addis r25,r25,-KERNELBASE@h - li r3,0 /* Destination base address */ - li r6,0 /* Destination offset */ - li r5,0x4000 /* # bytes of memory to copy */ - bl copy_and_flush /* copy the first 0x4000 bytes */ - addi r0,r3,4f@l /* jump to the address of 4f */ - mtctr r0 /* in copy and do the rest. */ - bctr /* jump to the copy */ -4: mr r5,r25 - bl copy_and_flush /* copy the rest */ - b turn_on_mmu - -/* - * Copy routine used to copy the kernel to start at physical address 0 - * and flush and invalidate the caches as needed. - * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset - * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. - */ -copy_and_flush: - addi r5,r5,-4 - addi r6,r6,-4 -4: li r0,L1_CACHE_BYTES/4 - mtctr r0 -3: addi r6,r6,4 /* copy a cache line */ - lwzx r0,r6,r4 - stwx r0,r6,r3 - bdnz 3b - dcbst r6,r3 /* write it to memory */ - sync - icbi r6,r3 /* flush the icache line */ - cmplw 0,r6,r5 - blt 4b - sync /* additional sync needed on g4 */ - isync - addi r5,r5,4 - addi r6,r6,4 - blr - -#ifdef CONFIG_APUS -/* - * On APUS the physical base address of the kernel is not known at compile - * time, which means the __pa/__va constants used are incorrect. In the - * __init section is recorded the virtual addresses of instructions using - * these constants, so all that has to be done is fix these before - * continuing the kernel boot. - * - * r4 = The physical address of the kernel base. - */ -fix_mem_constants: - mr r10,r4 - addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */ - neg r11,r10 /* phys_to_virt constant */ - - lis r12,__vtop_table_begin@h - ori r12,r12,__vtop_table_begin@l - add r12,r12,r10 /* table begin phys address */ - lis r13,__vtop_table_end@h - ori r13,r13,__vtop_table_end@l - add r13,r13,r10 /* table end phys address */ - subi r12,r12,4 - subi r13,r13,4 -1: lwzu r14,4(r12) /* virt address of instruction */ - add r14,r14,r10 /* phys address of instruction */ - lwz r15,0(r14) /* instruction, now insert top */ - rlwimi r15,r10,16,16,31 /* half of vp const in low half */ - stw r15,0(r14) /* of instruction and restore. */ - dcbst r0,r14 /* write it to memory */ - sync - icbi r0,r14 /* flush the icache line */ - cmpw r12,r13 - bne 1b - sync /* additional sync needed on g4 */ - isync - -/* - * Map the memory where the exception handlers will - * be copied to when hash constants have been patched. - */ -#ifdef CONFIG_APUS_FAST_EXCEPT - lis r8,0xfff0 -#else - lis r8,0 -#endif - ori r8,r8,0x2 /* 128KB, supervisor */ - mtspr SPRN_DBAT3U,r8 - mtspr SPRN_DBAT3L,r8 - - lis r12,__ptov_table_begin@h - ori r12,r12,__ptov_table_begin@l - add r12,r12,r10 /* table begin phys address */ - lis r13,__ptov_table_end@h - ori r13,r13,__ptov_table_end@l - add r13,r13,r10 /* table end phys address */ - subi r12,r12,4 - subi r13,r13,4 -1: lwzu r14,4(r12) /* virt address of instruction */ - add r14,r14,r10 /* phys address of instruction */ - lwz r15,0(r14) /* instruction, now insert top */ - rlwimi r15,r11,16,16,31 /* half of pv const in low half*/ - stw r15,0(r14) /* of instruction and restore. */ - dcbst r0,r14 /* write it to memory */ - sync - icbi r0,r14 /* flush the icache line */ - cmpw r12,r13 - bne 1b - - sync /* additional sync needed on g4 */ - isync /* No speculative loading until now */ - blr - -/*********************************************************************** - * Please note that on APUS the exception handlers are located at the - * physical address 0xfff0000. For this reason, the exception handlers - * cannot use relative branches to access the code below. - ***********************************************************************/ -#endif /* CONFIG_APUS */ - -#ifdef CONFIG_SMP -#ifdef CONFIG_GEMINI - .globl __secondary_start_gemini -__secondary_start_gemini: - mfspr r4,SPRN_HID0 - ori r4,r4,HID0_ICFI - li r3,0 - ori r3,r3,HID0_ICE - andc r4,r4,r3 - mtspr SPRN_HID0,r4 - sync - b __secondary_start -#endif /* CONFIG_GEMINI */ - - .globl __secondary_start_pmac_0 -__secondary_start_pmac_0: - /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ - li r24,0 - b 1f - li r24,1 - b 1f - li r24,2 - b 1f - li r24,3 -1: - /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0 - set to map the 0xf0000000 - 0xffffffff region */ - mfmsr r0 - rlwinm r0,r0,0,28,26 /* clear DR (0x10) */ - SYNC - mtmsr r0 - isync - - .globl __secondary_start -__secondary_start: -#ifdef CONFIG_PPC64BRIDGE - mfmsr r0 - clrldi r0,r0,1 /* make sure it's in 32-bit mode */ - SYNC - MTMSRD(r0) - isync -#endif - /* Copy some CPU settings from CPU 0 */ - bl __restore_cpu_setup - - lis r3,-KERNELBASE@h - mr r4,r24 - bl call_setup_cpu /* Call setup_cpu for this CPU */ -#ifdef CONFIG_6xx - lis r3,-KERNELBASE@h - bl init_idle_6xx -#endif /* CONFIG_6xx */ -#ifdef CONFIG_POWER4 - lis r3,-KERNELBASE@h - bl init_idle_power4 -#endif /* CONFIG_POWER4 */ - - /* get current_thread_info and current */ - lis r1,secondary_ti@ha - tophys(r1,r1) - lwz r1,secondary_ti@l(r1) - tophys(r2,r1) - lwz r2,TI_TASK(r2) - - /* stack */ - addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD - li r0,0 - tophys(r3,r1) - stw r0,0(r3) - - /* load up the MMU */ - bl load_up_mmu - - /* ptr to phys current thread */ - tophys(r4,r2) - addi r4,r4,THREAD /* phys address of our thread_struct */ - CLR_TOP32(r4) - mtspr SPRN_SPRG3,r4 - li r3,0 - mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */ - - /* enable MMU and jump to start_secondary */ - li r4,MSR_KERNEL - FIX_SRR1(r4,r5) - lis r3,start_secondary@h - ori r3,r3,start_secondary@l - mtspr SPRN_SRR0,r3 - mtspr SPRN_SRR1,r4 - SYNC - RFI -#endif /* CONFIG_SMP */ - -/* - * Those generic dummy functions are kept for CPUs not - * included in CONFIG_6xx - */ -#if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) -_GLOBAL(__save_cpu_setup) - blr -_GLOBAL(__restore_cpu_setup) - blr -#endif /* !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) */ - - -/* - * Load stuff into the MMU. Intended to be called with - * IR=0 and DR=0. - */ -load_up_mmu: - sync /* Force all PTE updates to finish */ - isync - tlbia /* Clear all TLB entries */ - sync /* wait for tlbia/tlbie to finish */ - TLBSYNC /* ... on all CPUs */ - /* Load the SDR1 register (hash table base & size) */ - lis r6,_SDR1@ha - tophys(r6,r6) - lwz r6,_SDR1@l(r6) - mtspr SPRN_SDR1,r6 -#ifdef CONFIG_PPC64BRIDGE - /* clear the ASR so we only use the pseudo-segment registers. */ - li r6,0 - mtasr r6 -#endif /* CONFIG_PPC64BRIDGE */ - li r0,16 /* load up segment register values */ - mtctr r0 /* for context 0 */ - lis r3,0x2000 /* Ku = 1, VSID = 0 */ - li r4,0 -3: mtsrin r3,r4 - addi r3,r3,0x111 /* increment VSID */ - addis r4,r4,0x1000 /* address of next segment */ - bdnz 3b -#ifndef CONFIG_POWER4 -/* Load the BAT registers with the values set up by MMU_init. - MMU_init takes care of whether we're on a 601 or not. */ - mfpvr r3 - srwi r3,r3,16 - cmpwi r3,1 - lis r3,BATS@ha - addi r3,r3,BATS@l - tophys(r3,r3) - LOAD_BAT(0,r3,r4,r5) - LOAD_BAT(1,r3,r4,r5) - LOAD_BAT(2,r3,r4,r5) - LOAD_BAT(3,r3,r4,r5) -#endif /* CONFIG_POWER4 */ - blr - -/* - * This is where the main kernel code starts. - */ -start_here: - /* ptr to current */ - lis r2,init_task@h - ori r2,r2,init_task@l - /* Set up for using our exception vectors */ - /* ptr to phys current thread */ - tophys(r4,r2) - addi r4,r4,THREAD /* init task's THREAD */ - CLR_TOP32(r4) - mtspr SPRN_SPRG3,r4 - li r3,0 - mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */ - - /* stack */ - lis r1,init_thread_union@ha - addi r1,r1,init_thread_union@l - li r0,0 - stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) -/* - * Do early bootinfo parsing, platform-specific initialization, - * and set up the MMU. - */ - mr r3,r31 - mr r4,r30 - mr r5,r29 - mr r6,r28 - mr r7,r27 - bl machine_init - bl MMU_init - -#ifdef CONFIG_APUS - /* Copy exception code to exception vector base on APUS. */ - lis r4,KERNELBASE@h -#ifdef CONFIG_APUS_FAST_EXCEPT - lis r3,0xfff0 /* Copy to 0xfff00000 */ -#else - lis r3,0 /* Copy to 0x00000000 */ -#endif - li r5,0x4000 /* # bytes of memory to copy */ - li r6,0 - bl copy_and_flush /* copy the first 0x4000 bytes */ -#endif /* CONFIG_APUS */ - -/* - * Go back to running unmapped so we can load up new values - * for SDR1 (hash table pointer) and the segment registers - * and change to using our exception vectors. - */ - lis r4,2f@h - ori r4,r4,2f@l - tophys(r4,r4) - li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) - FIX_SRR1(r3,r5) - mtspr SPRN_SRR0,r4 - mtspr SPRN_SRR1,r3 - SYNC - RFI -/* Load up the kernel context */ -2: bl load_up_mmu - -#ifdef CONFIG_BDI_SWITCH - /* Add helper information for the Abatron bdiGDB debugger. - * We do this here because we know the mmu is disabled, and - * will be enabled for real in just a few instructions. - */ - lis r5, abatron_pteptrs@h - ori r5, r5, abatron_pteptrs@l - stw r5, 0xf0(r0) /* This much match your Abatron config */ - lis r6, swapper_pg_dir@h - ori r6, r6, swapper_pg_dir@l - tophys(r5, r5) - stw r6, 0(r5) -#endif /* CONFIG_BDI_SWITCH */ - -/* Now turn on the MMU for real! */ - li r4,MSR_KERNEL - FIX_SRR1(r4,r5) - lis r3,start_kernel@h - ori r3,r3,start_kernel@l - mtspr SPRN_SRR0,r3 - mtspr SPRN_SRR1,r4 - SYNC - RFI - -/* - * Set up the segment registers for a new context. - */ -_GLOBAL(set_context) - mulli r3,r3,897 /* multiply context by skew factor */ - rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */ - addis r3,r3,0x6000 /* Set Ks, Ku bits */ - li r0,NUM_USER_SEGMENTS - mtctr r0 - -#ifdef CONFIG_BDI_SWITCH - /* Context switch the PTE pointer for the Abatron BDI2000. - * The PGDIR is passed as second argument. - */ - lis r5, KERNELBASE@h - lwz r5, 0xf0(r5) - stw r4, 0x4(r5) -#endif - li r4,0 - isync -3: -#ifdef CONFIG_PPC64BRIDGE - slbie r4 -#endif /* CONFIG_PPC64BRIDGE */ - mtsrin r3,r4 - addi r3,r3,0x111 /* next VSID */ - rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */ - addis r4,r4,0x1000 /* address of next segment */ - bdnz 3b - sync - isync - blr - -/* - * An undocumented "feature" of 604e requires that the v bit - * be cleared before changing BAT values. - * - * Also, newer IBM firmware does not clear bat3 and 4 so - * this makes sure it's done. - * -- Cort - */ -clear_bats: - li r10,0 - mfspr r9,SPRN_PVR - rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ - cmpwi r9, 1 - beq 1f - - mtspr SPRN_DBAT0U,r10 - mtspr SPRN_DBAT0L,r10 - mtspr SPRN_DBAT1U,r10 - mtspr SPRN_DBAT1L,r10 - mtspr SPRN_DBAT2U,r10 - mtspr SPRN_DBAT2L,r10 - mtspr SPRN_DBAT3U,r10 - mtspr SPRN_DBAT3L,r10 -1: - mtspr SPRN_IBAT0U,r10 - mtspr SPRN_IBAT0L,r10 - mtspr SPRN_IBAT1U,r10 - mtspr SPRN_IBAT1L,r10 - mtspr SPRN_IBAT2U,r10 - mtspr SPRN_IBAT2L,r10 - mtspr SPRN_IBAT3U,r10 - mtspr SPRN_IBAT3L,r10 -BEGIN_FTR_SECTION - /* Here's a tweak: at this point, CPU setup have - * not been called yet, so HIGH_BAT_EN may not be - * set in HID0 for the 745x processors. However, it - * seems that doesn't affect our ability to actually - * write to these SPRs. - */ - mtspr SPRN_DBAT4U,r10 - mtspr SPRN_DBAT4L,r10 - mtspr SPRN_DBAT5U,r10 - mtspr SPRN_DBAT5L,r10 - mtspr SPRN_DBAT6U,r10 - mtspr SPRN_DBAT6L,r10 - mtspr SPRN_DBAT7U,r10 - mtspr SPRN_DBAT7L,r10 - mtspr SPRN_IBAT4U,r10 - mtspr SPRN_IBAT4L,r10 - mtspr SPRN_IBAT5U,r10 - mtspr SPRN_IBAT5L,r10 - mtspr SPRN_IBAT6U,r10 - mtspr SPRN_IBAT6L,r10 - mtspr SPRN_IBAT7U,r10 - mtspr SPRN_IBAT7L,r10 -END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) - blr - -flush_tlbs: - lis r10, 0x40 -1: addic. r10, r10, -0x1000 - tlbie r10 - blt 1b - sync - blr - -mmu_off: - addi r4, r3, __after_mmu_off - _start - mfmsr r3 - andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */ - beqlr - andc r3,r3,r0 - mtspr SPRN_SRR0,r4 - mtspr SPRN_SRR1,r3 - sync - RFI - -#ifndef CONFIG_POWER4 -/* - * Use the first pair of BAT registers to map the 1st 16MB - * of RAM to KERNELBASE. From this point on we can't safely - * call OF any more. - */ -initial_bats: - lis r11,KERNELBASE@h -#ifndef CONFIG_PPC64BRIDGE - mfspr r9,SPRN_PVR - rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ - cmpwi 0,r9,1 - bne 4f - ori r11,r11,4 /* set up BAT registers for 601 */ - li r8,0x7f /* valid, block length = 8MB */ - oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */ - oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */ - mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */ - mtspr SPRN_IBAT0L,r8 /* lower BAT register */ - mtspr SPRN_IBAT1U,r9 - mtspr SPRN_IBAT1L,r10 - isync - blr -#endif /* CONFIG_PPC64BRIDGE */ - -4: tophys(r8,r11) -#ifdef CONFIG_SMP - ori r8,r8,0x12 /* R/W access, M=1 */ -#else - ori r8,r8,2 /* R/W access */ -#endif /* CONFIG_SMP */ -#ifdef CONFIG_APUS - ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */ -#else - ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */ -#endif /* CONFIG_APUS */ - -#ifdef CONFIG_PPC64BRIDGE - /* clear out the high 32 bits in the BAT */ - clrldi r11,r11,32 - clrldi r8,r8,32 -#endif /* CONFIG_PPC64BRIDGE */ - mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */ - mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */ - mtspr SPRN_IBAT0L,r8 - mtspr SPRN_IBAT0U,r11 - isync - blr - -#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) -setup_disp_bat: - /* - * setup the display bat prepared for us in prom.c - */ - mflr r8 - bl reloc_offset - mtlr r8 - addis r8,r3,disp_BAT@ha - addi r8,r8,disp_BAT@l - lwz r11,0(r8) - lwz r8,4(r8) - mfspr r9,SPRN_PVR - rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ - cmpwi 0,r9,1 - beq 1f - mtspr SPRN_DBAT3L,r8 - mtspr SPRN_DBAT3U,r11 - blr -1: mtspr SPRN_IBAT3L,r8 - mtspr SPRN_IBAT3U,r11 - blr - -#endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */ - -#else /* CONFIG_POWER4 */ -/* - * Load up the SDR1 and segment register values now - * since we don't have the BATs. - * Also make sure we are running in 32-bit mode. - */ - -initial_mm_power4: - addis r14,r3,_SDR1@ha /* get the value from _SDR1 */ - lwz r14,_SDR1@l(r14) /* assume hash table below 4GB */ - mtspr SPRN_SDR1,r14 - slbia - lis r4,0x2000 /* set pseudo-segment reg 12 */ - ori r5,r4,0x0ccc - mtsr 12,r5 -#if 0 - ori r5,r4,0x0888 /* set pseudo-segment reg 8 */ - mtsr 8,r5 /* (for access to serial port) */ -#endif -#ifdef CONFIG_BOOTX_TEXT - ori r5,r4,0x0999 /* set pseudo-segment reg 9 */ - mtsr 9,r5 /* (for access to screen) */ -#endif - mfmsr r0 - clrldi r0,r0,1 - sync - mtmsr r0 - isync - blr - -#endif /* CONFIG_POWER4 */ - -#ifdef CONFIG_8260 -/* Jump into the system reset for the rom. - * We first disable the MMU, and then jump to the ROM reset address. - * - * r3 is the board info structure, r4 is the location for starting. - * I use this for building a small kernel that can load other kernels, - * rather than trying to write or rely on a rom monitor that can tftp load. - */ - .globl m8260_gorom -m8260_gorom: - mfmsr r0 - rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */ - sync - mtmsr r0 - sync - mfspr r11, SPRN_HID0 - lis r10, 0 - ori r10,r10,HID0_ICE|HID0_DCE - andc r11, r11, r10 - mtspr SPRN_HID0, r11 - isync - li r5, MSR_ME|MSR_RI - lis r6,2f@h - addis r6,r6,-KERNELBASE@h - ori r6,r6,2f@l - mtspr SPRN_SRR0,r6 - mtspr SPRN_SRR1,r5 - isync - sync - rfi -2: - mtlr r4 - blr -#endif - - -/* - * We put a few things here that have to be page-aligned. - * This stuff goes at the beginning of the data segment, - * which is page-aligned. - */ - .data - .globl sdata -sdata: - .globl empty_zero_page -empty_zero_page: - .space 4096 - - .globl swapper_pg_dir -swapper_pg_dir: - .space 4096 - -/* - * This space gets a copy of optional info passed to us by the bootstrap - * Used to pass parameters into the kernel like root=/dev/sda1, etc. - */ - .globl cmd_line -cmd_line: - .space 512 - - .globl intercept_table -intercept_table: - .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700 - .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0 - .long 0, 0, 0, i0x1300, 0, 0, 0, 0 - .long 0, 0, 0, 0, 0, 0, 0, 0 - .long 0, 0, 0, 0, 0, 0, 0, 0 - .long 0, 0, 0, 0, 0, 0, 0, 0 - -/* Room for two PTE pointers, usually the kernel and current user pointers - * to their respective root page table. - */ -abatron_pteptrs: - .space 8 diff --git a/arch/ppc/kernel/head_44x.S b/arch/ppc/kernel/head_44x.S deleted file mode 100644 index 677c571aa27..00000000000 --- a/arch/ppc/kernel/head_44x.S +++ /dev/null @@ -1,782 +0,0 @@ -/* - * arch/ppc/kernel/head_44x.S - * - * Kernel execution entry point code. - * - * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> - * Initial PowerPC version. - * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> - * Rewritten for PReP - * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> - * Low-level exception handers, MMU support, and rewrite. - * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> - * PowerPC 8xx modifications. - * Copyright (c) 1998-1999 TiVo, Inc. - * PowerPC 403GCX modifications. - * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> - * PowerPC 403GCX/405GP modifications. - * Copyright 2000 MontaVista Software Inc. - * PPC405 modifications - * PowerPC 403GCX/405GP modifications. - * Author: MontaVista Software, Inc. - * frank_rowand@mvista.com or source@mvista.com - * debbie_chu@mvista.com - * Copyright 2002-2005 MontaVista Software, Inc. - * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <linux/config.h> -#include <asm/processor.h> -#include <asm/page.h> -#include <asm/mmu.h> -#include <asm/pgtable.h> -#include <asm/ibm4xx.h> -#include <asm/ibm44x.h> -#include <asm/cputable.h> -#include <asm/thread_info.h> -#include <asm/ppc_asm.h> -#include <asm/asm-offsets.h> -#include "head_booke.h" - - -/* As with the other PowerPC ports, it is expected that when code - * execution begins here, the following registers contain valid, yet - * optional, information: - * - * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) - * r4 - Starting address of the init RAM disk - * r5 - Ending address of the init RAM disk - * r6 - Start of kernel command line string (e.g. "mem=128") - * r7 - End of kernel command line string - * - */ - .text -_GLOBAL(_stext) -_GLOBAL(_start) - /* - * Reserve a word at a fixed location to store the address - * of abatron_pteptrs - */ - nop -/* - * Save parameters we are passed - */ - mr r31,r3 - mr r30,r4 - mr r29,r5 - mr r28,r6 - mr r27,r7 - li r24,0 /* CPU number */ - -/* - * Set up the initial MMU state - * - * We are still executing code at the virtual address - * mappings set by the firmware for the base of RAM. - * - * We first invalidate all TLB entries but the one - * we are running from. We then load the KERNELBASE - * mappings so we can begin to use kernel addresses - * natively and so the interrupt vector locations are - * permanently pinned (necessary since Book E - * implementations always have translation enabled). - * - * TODO: Use the known TLB entry we are running from to - * determine which physical region we are located - * in. This can be used to determine where in RAM - * (on a shared CPU system) or PCI memory space - * (on a DRAMless system) we are located. - * For now, we assume a perfect world which means - * we are located at the base of DRAM (physical 0). - */ - -/* - * Search TLB for entry that we are currently using. - * Invalidate all entries but the one we are using. - */ - /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ - mfspr r3,SPRN_PID /* Get PID */ - mfmsr r4 /* Get MSR */ - andi. r4,r4,MSR_IS@l /* TS=1? */ - beq wmmucr /* If not, leave STS=0 */ - oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */ -wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ - sync - - bl invstr /* Find our address */ -invstr: mflr r5 /* Make it accessible */ - tlbsx r23,0,r5 /* Find entry we are in */ - li r4,0 /* Start at TLB entry 0 */ - li r3,0 /* Set PAGEID inval value */ -1: cmpw r23,r4 /* Is this our entry? */ - beq skpinv /* If so, skip the inval */ - tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ -skpinv: addi r4,r4,1 /* Increment */ - cmpwi r4,64 /* Are we done? */ - bne 1b /* If not, repeat */ - isync /* If so, context change */ - -/* - * Configure and load pinned entry into TLB slot 63. - */ - - lis r3,KERNELBASE@h /* Load the kernel virtual address */ - ori r3,r3,KERNELBASE@l - - /* Kernel is at the base of RAM */ - li r4, 0 /* Load the kernel physical address */ - - /* Load the kernel PID = 0 */ - li r0,0 - mtspr SPRN_PID,r0 - sync - - /* Initialize MMUCR */ - li r5,0 - mtspr SPRN_MMUCR,r5 - sync - - /* pageid fields */ - clrrwi r3,r3,10 /* Mask off the effective page number */ - ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M - - /* xlat fields */ - clrrwi r4,r4,10 /* Mask off the real page number */ - /* ERPN is 0 for first 4GB page */ - - /* attrib fields */ - /* Added guarded bit to protect against speculative loads/stores */ - li r5,0 - ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) - - li r0,63 /* TLB slot 63 */ - - tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ - tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ - tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ - - /* Force context change */ - mfmsr r0 - mtspr SPRN_SRR1, r0 - lis r0,3f@h - ori r0,r0,3f@l - mtspr SPRN_SRR0,r0 - sync - rfi - - /* If necessary, invalidate original entry we used */ -3: cmpwi r23,63 - beq 4f - li r6,0 - tlbwe r6,r23,PPC44x_TLB_PAGEID - isync - -4: -#ifdef CONFIG_SERIAL_TEXT_DEBUG - /* - * Add temporary UART mapping for early debug. - * We can map UART registers wherever we want as long as they don't - * interfere with other system mappings (e.g. with pinned entries). - * For an example of how we handle this - see ocotea.h. --ebs - */ - /* pageid fields */ - lis r3,UART0_IO_BASE@h - ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_4K - - /* xlat fields */ - lis r4,UART0_PHYS_IO_BASE@h /* RPN depends on SoC */ -#ifdef UART0_PHYS_ERPN - ori r4,r4,UART0_PHYS_ERPN /* Add ERPN if above 4GB */ -#endif - - /* attrib fields */ - li r5,0 - ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_I | PPC44x_TLB_G) - - li r0,0 /* TLB slot 0 */ - - tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ - tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ - tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ - - /* Force context change */ - isync -#endif /* CONFIG_SERIAL_TEXT_DEBUG */ - - /* Establish the interrupt vector offsets */ - SET_IVOR(0, CriticalInput); - SET_IVOR(1, MachineCheck); - SET_IVOR(2, DataStorage); - SET_IVOR(3, InstructionStorage); - SET_IVOR(4, ExternalInput); - SET_IVOR(5, Alignment); - SET_IVOR(6, Program); - SET_IVOR(7, FloatingPointUnavailable); - SET_IVOR(8, SystemCall); - SET_IVOR(9, AuxillaryProcessorUnavailable); - SET_IVOR(10, Decrementer); - SET_IVOR(11, FixedIntervalTimer); - SET_IVOR(12, WatchdogTimer); - SET_IVOR(13, DataTLBError); - SET_IVOR(14, InstructionTLBError); - SET_IVOR(15, Debug); - - /* Establish the interrupt vector base */ - lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ - mtspr SPRN_IVPR,r4 - -#ifdef CONFIG_440EP - /* Clear DAPUIB flag in CCR0 (enable APU between CPU and FPU) */ - mfspr r2,SPRN_CCR0 - lis r3,0xffef - ori r3,r3,0xffff - and r2,r2,r3 - mtspr SPRN_CCR0,r2 - isync -#endif - - /* - * This is where the main kernel code starts. - */ - - /* ptr to current */ - lis r2,init_task@h - ori r2,r2,init_task@l - - /* ptr to current thread */ - addi r4,r2,THREAD /* init task's THREAD */ - mtspr SPRN_SPRG3,r4 - - /* stack */ - lis r1,init_thread_union@h - ori r1,r1,init_thread_union@l - li r0,0 - stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) - - bl early_init - -/* - * Decide what sort of machine this is and initialize the MMU. - */ - mr r3,r31 - mr r4,r30 - mr r5,r29 - mr r6,r28 - mr r7,r27 - bl machine_init - bl MMU_init - - /* Setup PTE pointers for the Abatron bdiGDB */ - lis r6, swapper_pg_dir@h - ori r6, r6, swapper_pg_dir@l - lis r5, abatron_pteptrs@h - ori r5, r5, abatron_pteptrs@l - lis r4, KERNELBASE@h - ori r4, r4, KERNELBASE@l - stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ - stw r6, 0(r5) - - /* Let's move on */ - lis r4,start_kernel@h - ori r4,r4,start_kernel@l - lis r3,MSR_KERNEL@h - ori r3,r3,MSR_KERNEL@l - mtspr SPRN_SRR0,r4 - mtspr SPRN_SRR1,r3 - rfi /* change context and jump to start_kernel */ - -/* - * Interrupt vector entry code - * - * The Book E MMUs are always on so we don't need to handle - * interrupts in real mode as with previous PPC processors. In - * this case we handle interrupts in the kernel virtual address - * space. - * - * Interrupt vectors are dynamically placed relative to the - * interrupt prefix as determined by the address of interrupt_base. - * The interrupt vectors offsets are programmed using the labels - * for each interrupt vector entry. - * - * Interrupt vectors must be aligned on a 16 byte boundary. - * We align on a 32 byte cache line boundary for good measure. - */ - -interrupt_base: - /* Critical Input Interrupt */ - CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception) - - /* Machine Check Interrupt */ -#ifdef CONFIG_440A - MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception) -#else - CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) -#endif - - /* Data Storage Interrupt */ - START_EXCEPTION(DataStorage) - mtspr SPRN_SPRG0, r10 /* Save some working registers */ - mtspr SPRN_SPRG1, r11 - mtspr SPRN_SPRG4W, r12 - mtspr SPRN_SPRG5W, r13 - mfcr r11 - mtspr SPRN_SPRG7W, r11 - - /* - * Check if it was a store fault, if not then bail - * because a user tried to access a kernel or - * read-protected page. Otherwise, get the - * offending address and handle it. - */ - mfspr r10, SPRN_ESR - andis. r10, r10, ESR_ST@h - beq 2f - - mfspr r10, SPRN_DEAR /* Get faulting address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - lis r11, TASK_SIZE@h - cmplw r10, r11 - blt+ 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - - mfspr r12,SPRN_MMUCR - rlwinm r12,r12,0,0,23 /* Clear TID */ - - b 4f - - /* Get the PGD for the current thread */ -3: - mfspr r11,SPRN_SPRG3 - lwz r11,PGDIR(r11) - - /* Load PID into MMUCR TID */ - mfspr r12,SPRN_MMUCR /* Get MMUCR */ - mfspr r13,SPRN_PID /* Get PID */ - rlwimi r12,r13,0,24,31 /* Set TID */ - -4: - mtspr SPRN_MMUCR,r12 - - rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ - lwzx r11, r12, r11 /* Get pgd/pmd entry */ - rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ - beq 2f /* Bail if no table */ - - rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ - lwz r11, 4(r12) /* Get pte entry */ - - andi. r13, r11, _PAGE_RW /* Is it writeable? */ - beq 2f /* Bail if not */ - - /* Update 'changed'. - */ - ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE - stw r11, 4(r12) /* Update Linux page table */ - - li r13, PPC44x_TLB_SR@l /* Set SR */ - rlwimi r13, r11, 29, 29, 29 /* SX = _PAGE_HWEXEC */ - rlwimi r13, r11, 0, 30, 30 /* SW = _PAGE_RW */ - rlwimi r13, r11, 29, 28, 28 /* UR = _PAGE_USER */ - rlwimi r12, r11, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */ - rlwimi r12, r11, 29, 30, 30 /* (_PAGE_USER>>3)->r12 */ - and r12, r12, r11 /* HWEXEC/RW & USER */ - rlwimi r13, r12, 0, 26, 26 /* UX = HWEXEC & USER */ - rlwimi r13, r12, 3, 27, 27 /* UW = RW & USER */ - - rlwimi r11,r13,0,26,31 /* Insert static perms */ - - rlwinm r11,r11,0,20,15 /* Clear U0-U3 */ - - /* find the TLB index that caused the fault. It has to be here. */ - tlbsx r10, 0, r10 - - tlbwe r11, r10, PPC44x_TLB_ATTRIB /* Write ATTRIB */ - - /* Done...restore registers and get out of here. - */ - mfspr r11, SPRN_SPRG7R - mtcr r11 - mfspr r13, SPRN_SPRG5R - mfspr r12, SPRN_SPRG4R - - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - rfi /* Force context change */ - -2: - /* - * The bailout. Restore registers to pre-exception conditions - * and call the heavyweights to help us out. - */ - mfspr r11, SPRN_SPRG7R - mtcr r11 - mfspr r13, SPRN_SPRG5R - mfspr r12, SPRN_SPRG4R - - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - b data_access - - /* Instruction Storage Interrupt */ - INSTRUCTION_STORAGE_EXCEPTION - - /* External Input Interrupt */ - EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) - - /* Alignment Interrupt */ - ALIGNMENT_EXCEPTION - - /* Program Interrupt */ - PROGRAM_EXCEPTION - - /* Floating Point Unavailable Interrupt */ -#ifdef CONFIG_PPC_FPU - FP_UNAVAILABLE_EXCEPTION -#else - EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) -#endif - - /* System Call Interrupt */ - START_EXCEPTION(SystemCall) - NORMAL_EXCEPTION_PROLOG - EXC_XFER_EE_LITE(0x0c00, DoSyscall) - - /* Auxillary Processor Unavailable Interrupt */ - EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) - - /* Decrementer Interrupt */ - DECREMENTER_EXCEPTION - - /* Fixed Internal Timer Interrupt */ - /* TODO: Add FIT support */ - EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE) - - /* Watchdog Timer Interrupt */ - /* TODO: Add watchdog support */ -#ifdef CONFIG_BOOKE_WDT - CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException) -#else - CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception) -#endif - - /* Data TLB Error Interrupt */ - START_EXCEPTION(DataTLBError) - mtspr SPRN_SPRG0, r10 /* Save some working registers */ - mtspr SPRN_SPRG1, r11 - mtspr SPRN_SPRG4W, r12 - mtspr SPRN_SPRG5W, r13 - mfcr r11 - mtspr SPRN_SPRG7W, r11 - mfspr r10, SPRN_DEAR /* Get faulting address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - lis r11, TASK_SIZE@h - cmplw r10, r11 - blt+ 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - - mfspr r12,SPRN_MMUCR - rlwinm r12,r12,0,0,23 /* Clear TID */ - - b 4f - - /* Get the PGD for the current thread */ -3: - mfspr r11,SPRN_SPRG3 - lwz r11,PGDIR(r11) - - /* Load PID into MMUCR TID */ - mfspr r12,SPRN_MMUCR - mfspr r13,SPRN_PID /* Get PID */ - rlwimi r12,r13,0,24,31 /* Set TID */ - -4: - mtspr SPRN_MMUCR,r12 - - rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ - lwzx r11, r12, r11 /* Get pgd/pmd entry */ - rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ - beq 2f /* Bail if no table */ - - rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ - lwz r11, 4(r12) /* Get pte entry */ - andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ - beq 2f /* Bail if not present */ - - ori r11, r11, _PAGE_ACCESSED - stw r11, 4(r12) - - /* Jump to common tlb load */ - b finish_tlb_load - -2: - /* The bailout. Restore registers to pre-exception conditions - * and call the heavyweights to help us out. - */ - mfspr r11, SPRN_SPRG7R - mtcr r11 - mfspr r13, SPRN_SPRG5R - mfspr r12, SPRN_SPRG4R - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - b data_access - - /* Instruction TLB Error Interrupt */ - /* - * Nearly the same as above, except we get our - * information from different registers and bailout - * to a different point. - */ - START_EXCEPTION(InstructionTLBError) - mtspr SPRN_SPRG0, r10 /* Save some working registers */ - mtspr SPRN_SPRG1, r11 - mtspr SPRN_SPRG4W, r12 - mtspr SPRN_SPRG5W, r13 - mfcr r11 - mtspr SPRN_SPRG7W, r11 - mfspr r10, SPRN_SRR0 /* Get faulting address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - lis r11, TASK_SIZE@h - cmplw r10, r11 - blt+ 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - - mfspr r12,SPRN_MMUCR - rlwinm r12,r12,0,0,23 /* Clear TID */ - - b 4f - - /* Get the PGD for the current thread */ -3: - mfspr r11,SPRN_SPRG3 - lwz r11,PGDIR(r11) - - /* Load PID into MMUCR TID */ - mfspr r12,SPRN_MMUCR - mfspr r13,SPRN_PID /* Get PID */ - rlwimi r12,r13,0,24,31 /* Set TID */ - -4: - mtspr SPRN_MMUCR,r12 - - rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ - lwzx r11, r12, r11 /* Get pgd/pmd entry */ - rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ - beq 2f /* Bail if no table */ - - rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ - lwz r11, 4(r12) /* Get pte entry */ - andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ - beq 2f /* Bail if not present */ - - ori r11, r11, _PAGE_ACCESSED - stw r11, 4(r12) - - /* Jump to common TLB load point */ - b finish_tlb_load - -2: - /* The bailout. Restore registers to pre-exception conditions - * and call the heavyweights to help us out. - */ - mfspr r11, SPRN_SPRG7R - mtcr r11 - mfspr r13, SPRN_SPRG5R - mfspr r12, SPRN_SPRG4R - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - b InstructionStorage - - /* Debug Interrupt */ - DEBUG_EXCEPTION - -/* - * Local functions - */ - /* - * Data TLB exceptions will bail out to this point - * if they can't resolve the lightweight TLB fault. - */ -data_access: - NORMAL_EXCEPTION_PROLOG - mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ - stw r5,_ESR(r11) - mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ - EXC_XFER_EE_LITE(0x0300, handle_page_fault) - -/* - - * Both the instruction and data TLB miss get to this - * point to load the TLB. - * r10 - EA of fault - * r11 - available to use - * r12 - Pointer to the 64-bit PTE - * r13 - available to use - * MMUCR - loaded with proper value when we get here - * Upon exit, we reload everything and RFI. - */ -finish_tlb_load: - /* - * We set execute, because we don't have the granularity to - * properly set this at the page level (Linux problem). - * If shared is set, we cause a zero PID->TID load. - * Many of these bits are software only. Bits we don't set - * here we (properly should) assume have the appropriate value. - */ - - /* Load the next available TLB index */ - lis r13, tlb_44x_index@ha - lwz r13, tlb_44x_index@l(r13) - /* Load the TLB high watermark */ - lis r11, tlb_44x_hwater@ha - lwz r11, tlb_44x_hwater@l(r11) - - /* Increment, rollover, and store TLB index */ - addi r13, r13, 1 - cmpw 0, r13, r11 /* reserve entries */ - ble 7f - li r13, 0 -7: - /* Store the next available TLB index */ - lis r11, tlb_44x_index@ha - stw r13, tlb_44x_index@l(r11) - - lwz r11, 0(r12) /* Get MS word of PTE */ - lwz r12, 4(r12) /* Get LS word of PTE */ - rlwimi r11, r12, 0, 0 , 19 /* Insert RPN */ - tlbwe r11, r13, PPC44x_TLB_XLAT /* Write XLAT */ - - /* - * Create PAGEID. This is the faulting address, - * page size, and valid flag. - */ - li r11, PPC44x_TLB_VALID | PPC44x_TLB_4K - rlwimi r10, r11, 0, 20, 31 /* Insert valid and page size */ - tlbwe r10, r13, PPC44x_TLB_PAGEID /* Write PAGEID */ - - li r10, PPC44x_TLB_SR@l /* Set SR */ - rlwimi r10, r12, 0, 30, 30 /* Set SW = _PAGE_RW */ - rlwimi r10, r12, 29, 29, 29 /* SX = _PAGE_HWEXEC */ - rlwimi r10, r12, 29, 28, 28 /* UR = _PAGE_USER */ - rlwimi r11, r12, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */ - and r11, r12, r11 /* HWEXEC & USER */ - rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */ - - rlwimi r12, r10, 0, 26, 31 /* Insert static perms */ - rlwinm r12, r12, 0, 20, 15 /* Clear U0-U3 */ - tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */ - - /* Done...restore registers and get out of here. - */ - mfspr r11, SPRN_SPRG7R - mtcr r11 - mfspr r13, SPRN_SPRG5R - mfspr r12, SPRN_SPRG4R - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - rfi /* Force context change */ - -/* - * Global functions - */ - -/* - * extern void giveup_altivec(struct task_struct *prev) - * - * The 44x core does not have an AltiVec unit. - */ -_GLOBAL(giveup_altivec) - blr - -/* - * extern void giveup_fpu(struct task_struct *prev) - * - * The 44x core does not have an FPU. - */ -#ifndef CONFIG_PPC_FPU -_GLOBAL(giveup_fpu) - blr -#endif - -/* - * extern void abort(void) - * - * At present, this routine just applies a system reset. - */ -_GLOBAL(abort) - mfspr r13,SPRN_DBCR0 - oris r13,r13,DBCR0_RST_SYSTEM@h - mtspr SPRN_DBCR0,r13 - -_GLOBAL(set_context) - -#ifdef CONFIG_BDI_SWITCH - /* Context switch the PTE pointer for the Abatron BDI2000. - * The PGDIR is the second parameter. - */ - lis r5, abatron_pteptrs@h - ori r5, r5, abatron_pteptrs@l - stw r4, 0x4(r5) -#endif - mtspr SPRN_PID,r3 - isync /* Force context change */ - blr - -/* - * We put a few things here that have to be page-aligned. This stuff - * goes at the beginning of the data segment, which is page-aligned. - */ - .data - .align 12 - .globl sdata -sdata: - .globl empty_zero_page -empty_zero_page: - .space 4096 - -/* - * To support >32-bit physical addresses, we use an 8KB pgdir. - */ - .globl swapper_pg_dir -swapper_pg_dir: - .space 8192 - -/* Reserved 4k for the critical exception stack & 4k for the machine - * check stack per CPU for kernel mode exceptions */ - .section .bss - .align 12 -exception_stack_bottom: - .space BOOKE_EXCEPTION_STACK_SIZE - .globl exception_stack_top -exception_stack_top: - -/* - * This space gets a copy of optional info passed to us by the bootstrap - * which is used to pass parameters into the kernel like root=/dev/sda1, etc. - */ - .globl cmd_line -cmd_line: - .space 512 - -/* - * Room for two PTE pointers, usually the kernel and current user pointers - * to their respective root page table. - */ -abatron_pteptrs: - .space 8 diff --git a/arch/ppc/kernel/head_4xx.S b/arch/ppc/kernel/head_4xx.S deleted file mode 100644 index 10c261c6702..00000000000 --- a/arch/ppc/kernel/head_4xx.S +++ /dev/null @@ -1,1022 +0,0 @@ -/* - * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> - * Initial PowerPC version. - * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> - * Rewritten for PReP - * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> - * Low-level exception handers, MMU support, and rewrite. - * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> - * PowerPC 8xx modifications. - * Copyright (c) 1998-1999 TiVo, Inc. - * PowerPC 403GCX modifications. - * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> - * PowerPC 403GCX/405GP modifications. - * Copyright 2000 MontaVista Software Inc. - * PPC405 modifications - * PowerPC 403GCX/405GP modifications. - * Author: MontaVista Software, Inc. - * frank_rowand@mvista.com or source@mvista.com - * debbie_chu@mvista.com - * - * - * Module name: head_4xx.S - * - * Description: - * Kernel execution entry point code. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include <linux/config.h> -#include <asm/processor.h> -#include <asm/page.h> -#include <asm/mmu.h> -#include <asm/pgtable.h> -#include <asm/ibm4xx.h> -#include <asm/cputable.h> -#include <asm/thread_info.h> -#include <asm/ppc_asm.h> -#include <asm/asm-offsets.h> - -/* As with the other PowerPC ports, it is expected that when code - * execution begins here, the following registers contain valid, yet - * optional, information: - * - * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) - * r4 - Starting address of the init RAM disk - * r5 - Ending address of the init RAM disk - * r6 - Start of kernel command line string (e.g. "mem=96m") - * r7 - End of kernel command line string - * - * This is all going to change RSN when we add bi_recs....... -- Dan - */ - .text -_GLOBAL(_stext) -_GLOBAL(_start) - - /* Save parameters we are passed. - */ - mr r31,r3 - mr r30,r4 - mr r29,r5 - mr r28,r6 - mr r27,r7 - - /* We have to turn on the MMU right away so we get cache modes - * set correctly. - */ - bl initial_mmu - -/* We now have the lower 16 Meg mapped into TLB entries, and the caches - * ready to work. - */ -turn_on_mmu: - lis r0,MSR_KERNEL@h - ori r0,r0,MSR_KERNEL@l - mtspr SPRN_SRR1,r0 - lis r0,start_here@h - ori r0,r0,start_here@l - mtspr SPRN_SRR0,r0 - SYNC - rfi /* enables MMU */ - b . /* prevent prefetch past rfi */ - -/* - * This area is used for temporarily saving registers during the - * critical exception prolog. - */ - . = 0xc0 -crit_save: -_GLOBAL(crit_r10) - .space 4 -_GLOBAL(crit_r11) - .space 4 - -/* - * Exception vector entry code. This code runs with address translation - * turned off (i.e. using physical addresses). We assume SPRG3 has the - * physical address of the current task thread_struct. - * Note that we have to have decremented r1 before we write to any fields - * of the exception frame, since a critical interrupt could occur at any - * time, and it will write to the area immediately below the current r1. - */ -#define NORMAL_EXCEPTION_PROLOG \ - mtspr SPRN_SPRG0,r10; /* save two registers to work with */\ - mtspr SPRN_SPRG1,r11; \ - mtspr SPRN_SPRG2,r1; \ - mfcr r10; /* save CR in r10 for now */\ - mfspr r11,SPRN_SRR1; /* check whether user or kernel */\ - andi. r11,r11,MSR_PR; \ - beq 1f; \ - mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\ - lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\ - addi r1,r1,THREAD_SIZE; \ -1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\ - tophys(r11,r1); \ - stw r10,_CCR(r11); /* save various registers */\ - stw r12,GPR12(r11); \ - stw r9,GPR9(r11); \ - mfspr r10,SPRN_SPRG0; \ - stw r10,GPR10(r11); \ - mfspr r12,SPRN_SPRG1; \ - stw r12,GPR11(r11); \ - mflr r10; \ - stw r10,_LINK(r11); \ - mfspr r10,SPRN_SPRG2; \ - mfspr r12,SPRN_SRR0; \ - stw r10,GPR1(r11); \ - mfspr r9,SPRN_SRR1; \ - stw r10,0(r11); \ - rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ - stw r0,GPR0(r11); \ - SAVE_4GPRS(3, r11); \ - SAVE_2GPRS(7, r11) - -/* - * Exception prolog for critical exceptions. This is a little different - * from the normal exception prolog above since a critical exception - * can potentially occur at any point during normal exception processing. - * Thus we cannot use the same SPRG registers as the normal prolog above. - * Instead we use a couple of words of memory at low physical addresses. - * This is OK since we don't support SMP on these processors. - */ -#define CRITICAL_EXCEPTION_PROLOG \ - stw r10,crit_r10@l(0); /* save two registers to work with */\ - stw r11,crit_r11@l(0); \ - mfcr r10; /* save CR in r10 for now */\ - mfspr r11,SPRN_SRR3; /* check whether user or kernel */\ - andi. r11,r11,MSR_PR; \ - lis r11,critical_stack_top@h; \ - ori r11,r11,critical_stack_top@l; \ - beq 1f; \ - /* COMING FROM USER MODE */ \ - mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\ - lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ - addi r11,r11,THREAD_SIZE; \ -1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\ - tophys(r11,r11); \ - stw r10,_CCR(r11); /* save various registers */\ - stw r12,GPR12(r11); \ - stw r9,GPR9(r11); \ - mflr r10; \ - stw r10,_LINK(r11); \ - mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\ - stw r12,_DEAR(r11); /* since they may have had stuff */\ - mfspr r9,SPRN_ESR; /* in them at the point where the */\ - stw r9,_ESR(r11); /* exception was taken */\ - mfspr r12,SPRN_SRR2; \ - stw r1,GPR1(r11); \ - mfspr r9,SPRN_SRR3; \ - stw r1,0(r11); \ - tovirt(r1,r11); \ - rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ - stw r0,GPR0(r11); \ - SAVE_4GPRS(3, r11); \ - SAVE_2GPRS(7, r11) - - /* - * State at this point: - * r9 saved in stack frame, now saved SRR3 & ~MSR_WE - * r10 saved in crit_r10 and in stack frame, trashed - * r11 saved in crit_r11 and in stack frame, - * now phys stack/exception frame pointer - * r12 saved in stack frame, now saved SRR2 - * CR saved in stack frame, CR0.EQ = !SRR3.PR - * LR, DEAR, ESR in stack frame - * r1 saved in stack frame, now virt stack/excframe pointer - * r0, r3-r8 saved in stack frame - */ - -/* - * Exception vectors. - */ -#define START_EXCEPTION(n, label) \ - . = n; \ -label: - -#define EXCEPTION(n, label, hdlr, xfer) \ - START_EXCEPTION(n, label); \ - NORMAL_EXCEPTION_PROLOG; \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - xfer(n, hdlr) - -#define CRITICAL_EXCEPTION(n, label, hdlr) \ - START_EXCEPTION(n, label); \ - CRITICAL_EXCEPTION_PROLOG; \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ - NOCOPY, crit_transfer_to_handler, \ - ret_from_crit_exc) - -#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \ - li r10,trap; \ - stw r10,TRAP(r11); \ - lis r10,msr@h; \ - ori r10,r10,msr@l; \ - copyee(r10, r9); \ - bl tfer; \ - .long hdlr; \ - .long ret - -#define COPY_EE(d, s) rlwimi d,s,0,16,16 -#define NOCOPY(d, s) - -#define EXC_XFER_STD(n, hdlr) \ - EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \ - ret_from_except_full) - -#define EXC_XFER_LITE(n, hdlr) \ - EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \ - ret_from_except) - -#define EXC_XFER_EE(n, hdlr) \ - EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \ - ret_from_except_full) - -#define EXC_XFER_EE_LITE(n, hdlr) \ - EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \ - ret_from_except) - - -/* - * 0x0100 - Critical Interrupt Exception - */ - CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception) - -/* - * 0x0200 - Machine Check Exception - */ - CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) - -/* - * 0x0300 - Data Storage Exception - * This happens for just a few reasons. U0 set (but we don't do that), - * or zone protection fault (user violation, write to protected page). - * If this is just an update of modified status, we do that quickly - * and exit. Otherwise, we call heavywight functions to do the work. - */ - START_EXCEPTION(0x0300, DataStorage) - mtspr SPRN_SPRG0, r10 /* Save some working registers */ - mtspr SPRN_SPRG1, r11 -#ifdef CONFIG_403GCX - stw r12, 0(r0) - stw r9, 4(r0) - mfcr r11 - mfspr r12, SPRN_PID - stw r11, 8(r0) - stw r12, 12(r0) -#else - mtspr SPRN_SPRG4, r12 - mtspr SPRN_SPRG5, r9 - mfcr r11 - mfspr r12, SPRN_PID - mtspr SPRN_SPRG7, r11 - mtspr SPRN_SPRG6, r12 -#endif - - /* First, check if it was a zone fault (which means a user - * tried to access a kernel or read-protected page - always - * a SEGV). All other faults here must be stores, so no - * need to check ESR_DST as well. */ - mfspr r10, SPRN_ESR - andis. r10, r10, ESR_DIZ@h - bne 2f - - mfspr r10, SPRN_DEAR /* Get faulting address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - lis r11, TASK_SIZE@h - cmplw r10, r11 - blt+ 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - li r9, 0 - mtspr SPRN_PID, r9 /* TLB will have 0 TID */ - b 4f - - /* Get the PGD for the current thread. - */ -3: - mfspr r11,SPRN_SPRG3 - lwz r11,PGDIR(r11) -4: - tophys(r11, r11) - rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ - lwz r11, 0(r11) /* Get L1 entry */ - rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ - beq 2f /* Bail if no table */ - - rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ - lwz r11, 0(r12) /* Get Linux PTE */ - - andi. r9, r11, _PAGE_RW /* Is it writeable? */ - beq 2f /* Bail if not */ - - /* Update 'changed'. - */ - ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE - stw r11, 0(r12) /* Update Linux page table */ - - /* Most of the Linux PTE is ready to load into the TLB LO. - * We set ZSEL, where only the LS-bit determines user access. - * We set execute, because we don't have the granularity to - * properly set this at the page level (Linux problem). - * If shared is set, we cause a zero PID->TID load. - * Many of these bits are software only. Bits we don't set - * here we (properly should) assume have the appropriate value. - */ - li r12, 0x0ce2 - andc r11, r11, r12 /* Make sure 20, 21 are zero */ - - /* find the TLB index that caused the fault. It has to be here. - */ - tlbsx r9, 0, r10 - - tlbwe r11, r9, TLB_DATA /* Load TLB LO */ - - /* Done...restore registers and get out of here. - */ -#ifdef CONFIG_403GCX - lwz r12, 12(r0) - lwz r11, 8(r0) - mtspr SPRN_PID, r12 - mtcr r11 - lwz r9, 4(r0) - lwz r12, 0(r0) -#else - mfspr r12, SPRN_SPRG6 - mfspr r11, SPRN_SPRG7 - mtspr SPRN_PID, r12 - mtcr r11 - mfspr r9, SPRN_SPRG5 - mfspr r12, SPRN_SPRG4 -#endif - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - PPC405_ERR77_SYNC - rfi /* Should sync shadow TLBs */ - b . /* prevent prefetch past rfi */ - -2: - /* The bailout. Restore registers to pre-exception conditions - * and call the heavyweights to help us out. - */ -#ifdef CONFIG_403GCX - lwz r12, 12(r0) - lwz r11, 8(r0) - mtspr SPRN_PID, r12 - mtcr r11 - lwz r9, 4(r0) - lwz r12, 0(r0) -#else - mfspr r12, SPRN_SPRG6 - mfspr r11, SPRN_SPRG7 - mtspr SPRN_PID, r12 - mtcr r11 - mfspr r9, SPRN_SPRG5 - mfspr r12, SPRN_SPRG4 -#endif - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - b DataAccess - -/* - * 0x0400 - Instruction Storage Exception - * This is caused by a fetch from non-execute or guarded pages. - */ - START_EXCEPTION(0x0400, InstructionAccess) - NORMAL_EXCEPTION_PROLOG - mr r4,r12 /* Pass SRR0 as arg2 */ - li r5,0 /* Pass zero as arg3 */ - EXC_XFER_EE_LITE(0x400, handle_page_fault) - -/* 0x0500 - External Interrupt Exception */ - EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) - -/* 0x0600 - Alignment Exception */ - START_EXCEPTION(0x0600, Alignment) - NORMAL_EXCEPTION_PROLOG - mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */ - stw r4,_DEAR(r11) - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_EE(0x600, alignment_exception) - -/* 0x0700 - Program Exception */ - START_EXCEPTION(0x0700, ProgramCheck) - NORMAL_EXCEPTION_PROLOG - mfspr r4,SPRN_ESR /* Grab the ESR and save it */ - stw r4,_ESR(r11) - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_STD(0x700, program_check_exception) - - EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE) - -/* 0x0C00 - System Call Exception */ - START_EXCEPTION(0x0C00, SystemCall) - NORMAL_EXCEPTION_PROLOG - EXC_XFER_EE_LITE(0xc00, DoSyscall) - - EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE) - -/* 0x1000 - Programmable Interval Timer (PIT) Exception */ - START_EXCEPTION(0x1000, Decrementer) - NORMAL_EXCEPTION_PROLOG - lis r0,TSR_PIS@h - mtspr SPRN_TSR,r0 /* Clear the PIT exception */ - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_LITE(0x1000, timer_interrupt) - -#if 0 -/* NOTE: - * FIT and WDT handlers are not implemented yet. - */ - -/* 0x1010 - Fixed Interval Timer (FIT) Exception -*/ - STND_EXCEPTION(0x1010, FITException, unknown_exception) - -/* 0x1020 - Watchdog Timer (WDT) Exception -*/ -#ifdef CONFIG_BOOKE_WDT - CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException) -#else - CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception) -#endif -#endif - -/* 0x1100 - Data TLB Miss Exception - * As the name implies, translation is not in the MMU, so search the - * page tables and fix it. The only purpose of this function is to - * load TLB entries from the page table if they exist. - */ - START_EXCEPTION(0x1100, DTLBMiss) - mtspr SPRN_SPRG0, r10 /* Save some working registers */ - mtspr SPRN_SPRG1, r11 -#ifdef CONFIG_403GCX - stw r12, 0(r0) - stw r9, 4(r0) - mfcr r11 - mfspr r12, SPRN_PID - stw r11, 8(r0) - stw r12, 12(r0) -#else - mtspr SPRN_SPRG4, r12 - mtspr SPRN_SPRG5, r9 - mfcr r11 - mfspr r12, SPRN_PID - mtspr SPRN_SPRG7, r11 - mtspr SPRN_SPRG6, r12 -#endif - mfspr r10, SPRN_DEAR /* Get faulting address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - lis r11, TASK_SIZE@h - cmplw r10, r11 - blt+ 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - li r9, 0 - mtspr SPRN_PID, r9 /* TLB will have 0 TID */ - b 4f - - /* Get the PGD for the current thread. - */ -3: - mfspr r11,SPRN_SPRG3 - lwz r11,PGDIR(r11) -4: - tophys(r11, r11) - rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ - lwz r12, 0(r11) /* Get L1 entry */ - andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */ - beq 2f /* Bail if no table */ - - rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ - lwz r11, 0(r12) /* Get Linux PTE */ - andi. r9, r11, _PAGE_PRESENT - beq 5f - - ori r11, r11, _PAGE_ACCESSED - stw r11, 0(r12) - - /* Create TLB tag. This is the faulting address plus a static - * set of bits. These are size, valid, E, U0. - */ - li r12, 0x00c0 - rlwimi r10, r12, 0, 20, 31 - - b finish_tlb_load - -2: /* Check for possible large-page pmd entry */ - rlwinm. r9, r12, 2, 22, 24 - beq 5f - - /* Create TLB tag. This is the faulting address, plus a static - * set of bits (valid, E, U0) plus the size from the PMD. - */ - ori r9, r9, 0x40 - rlwimi r10, r9, 0, 20, 31 - mr r11, r12 - - b finish_tlb_load - -5: - /* The bailout. Restore registers to pre-exception conditions - * and call the heavyweights to help us out. - */ -#ifdef CONFIG_403GCX - lwz r12, 12(r0) - lwz r11, 8(r0) - mtspr SPRN_PID, r12 - mtcr r11 - lwz r9, 4(r0) - lwz r12, 0(r0) -#else - mfspr r12, SPRN_SPRG6 - mfspr r11, SPRN_SPRG7 - mtspr SPRN_PID, r12 - mtcr r11 - mfspr r9, SPRN_SPRG5 - mfspr r12, SPRN_SPRG4 -#endif - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - b DataAccess - -/* 0x1200 - Instruction TLB Miss Exception - * Nearly the same as above, except we get our information from different - * registers and bailout to a different point. - */ - START_EXCEPTION(0x1200, ITLBMiss) - mtspr SPRN_SPRG0, r10 /* Save some working registers */ - mtspr SPRN_SPRG1, r11 -#ifdef CONFIG_403GCX - stw r12, 0(r0) - stw r9, 4(r0) - mfcr r11 - mfspr r12, SPRN_PID - stw r11, 8(r0) - stw r12, 12(r0) -#else - mtspr SPRN_SPRG4, r12 - mtspr SPRN_SPRG5, r9 - mfcr r11 - mfspr r12, SPRN_PID - mtspr SPRN_SPRG7, r11 - mtspr SPRN_SPRG6, r12 -#endif - mfspr r10, SPRN_SRR0 /* Get faulting address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - lis r11, TASK_SIZE@h - cmplw r10, r11 - blt+ 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - li r9, 0 - mtspr SPRN_PID, r9 /* TLB will have 0 TID */ - b 4f - - /* Get the PGD for the current thread. - */ -3: - mfspr r11,SPRN_SPRG3 - lwz r11,PGDIR(r11) -4: - tophys(r11, r11) - rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ - lwz r12, 0(r11) /* Get L1 entry */ - andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */ - beq 2f /* Bail if no table */ - - rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ - lwz r11, 0(r12) /* Get Linux PTE */ - andi. r9, r11, _PAGE_PRESENT - beq 5f - - ori r11, r11, _PAGE_ACCESSED - stw r11, 0(r12) - - /* Create TLB tag. This is the faulting address plus a static - * set of bits. These are size, valid, E, U0. - */ - li r12, 0x00c0 - rlwimi r10, r12, 0, 20, 31 - - b finish_tlb_load - -2: /* Check for possible large-page pmd entry */ - rlwinm. r9, r12, 2, 22, 24 - beq 5f - - /* Create TLB tag. This is the faulting address, plus a static - * set of bits (valid, E, U0) plus the size from the PMD. - */ - ori r9, r9, 0x40 - rlwimi r10, r9, 0, 20, 31 - mr r11, r12 - - b finish_tlb_load - -5: - /* The bailout. Restore registers to pre-exception conditions - * and call the heavyweights to help us out. - */ -#ifdef CONFIG_403GCX - lwz r12, 12(r0) - lwz r11, 8(r0) - mtspr SPRN_PID, r12 - mtcr r11 - lwz r9, 4(r0) - lwz r12, 0(r0) -#else - mfspr r12, SPRN_SPRG6 - mfspr r11, SPRN_SPRG7 - mtspr SPRN_PID, r12 - mtcr r11 - mfspr r9, SPRN_SPRG5 - mfspr r12, SPRN_SPRG4 -#endif - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - b InstructionAccess - - EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE) -#ifdef CONFIG_IBM405_ERR51 - /* 405GP errata 51 */ - START_EXCEPTION(0x1700, Trap_17) - b DTLBMiss -#else - EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE) -#endif - EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE) - -/* Check for a single step debug exception while in an exception - * handler before state has been saved. This is to catch the case - * where an instruction that we are trying to single step causes - * an exception (eg ITLB/DTLB miss) and thus the first instruction of - * the exception handler generates a single step debug exception. - * - * If we get a debug trap on the first instruction of an exception handler, - * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is - * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR). - * The exception handler was handling a non-critical interrupt, so it will - * save (and later restore) the MSR via SPRN_SRR1, which will still have - * the MSR_DE bit set. - */ - /* 0x2000 - Debug Exception */ - START_EXCEPTION(0x2000, DebugTrap) - CRITICAL_EXCEPTION_PROLOG - - /* - * If this is a single step or branch-taken exception in an - * exception entry sequence, it was probably meant to apply to - * the code where the exception occurred (since exception entry - * doesn't turn off DE automatically). We simulate the effect - * of turning off DE on entry to an exception handler by turning - * off DE in the SRR3 value and clearing the debug status. - */ - mfspr r10,SPRN_DBSR /* check single-step/branch taken */ - andis. r10,r10,DBSR_IC@h - beq+ 2f - - andi. r10,r9,MSR_IR|MSR_PR /* check supervisor + MMU off */ - beq 1f /* branch and fix it up */ - - mfspr r10,SPRN_SRR2 /* Faulting instruction address */ - cmplwi r10,0x2100 - bgt+ 2f /* address above exception vectors */ - - /* here it looks like we got an inappropriate debug exception. */ -1: rlwinm r9,r9,0,~MSR_DE /* clear DE in the SRR3 value */ - lis r10,DBSR_IC@h /* clear the IC event */ - mtspr SPRN_DBSR,r10 - /* restore state and get out */ - lwz r10,_CCR(r11) - lwz r0,GPR0(r11) - lwz r1,GPR1(r11) - mtcrf 0x80,r10 - mtspr SPRN_SRR2,r12 - mtspr SPRN_SRR3,r9 - lwz r9,GPR9(r11) - lwz r12,GPR12(r11) - lwz r10,crit_r10@l(0) - lwz r11,crit_r11@l(0) - PPC405_ERR77_SYNC - rfci - b . - - /* continue normal handling for a critical exception... */ -2: mfspr r4,SPRN_DBSR - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_TEMPLATE(DebugException, 0x2002, \ - (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ - NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) - -/* - * The other Data TLB exceptions bail out to this point - * if they can't resolve the lightweight TLB fault. - */ -DataAccess: - NORMAL_EXCEPTION_PROLOG - mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ - stw r5,_ESR(r11) - mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ - EXC_XFER_EE_LITE(0x300, handle_page_fault) - -/* Other PowerPC processors, namely those derived from the 6xx-series - * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved. - * However, for the 4xx-series processors these are neither defined nor - * reserved. - */ - - /* Damn, I came up one instruction too many to fit into the - * exception space :-). Both the instruction and data TLB - * miss get to this point to load the TLB. - * r10 - TLB_TAG value - * r11 - Linux PTE - * r12, r9 - avilable to use - * PID - loaded with proper value when we get here - * Upon exit, we reload everything and RFI. - * Actually, it will fit now, but oh well.....a common place - * to load the TLB. - */ -tlb_4xx_index: - .long 0 -finish_tlb_load: - /* load the next available TLB index. - */ - lwz r9, tlb_4xx_index@l(0) - addi r9, r9, 1 - andi. r9, r9, (PPC4XX_TLB_SIZE-1) - stw r9, tlb_4xx_index@l(0) - -6: - /* - * Clear out the software-only bits in the PTE to generate the - * TLB_DATA value. These are the bottom 2 bits of the RPM, the - * top 3 bits of the zone field, and M. - */ - li r12, 0x0ce2 - andc r11, r11, r12 - - tlbwe r11, r9, TLB_DATA /* Load TLB LO */ - tlbwe r10, r9, TLB_TAG /* Load TLB HI */ - - /* Done...restore registers and get out of here. - */ -#ifdef CONFIG_403GCX - lwz r12, 12(r0) - lwz r11, 8(r0) - mtspr SPRN_PID, r12 - mtcr r11 - lwz r9, 4(r0) - lwz r12, 0(r0) -#else - mfspr r12, SPRN_SPRG6 - mfspr r11, SPRN_SPRG7 - mtspr SPRN_PID, r12 - mtcr r11 - mfspr r9, SPRN_SPRG5 - mfspr r12, SPRN_SPRG4 -#endif - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - PPC405_ERR77_SYNC - rfi /* Should sync shadow TLBs */ - b . /* prevent prefetch past rfi */ - -/* extern void giveup_fpu(struct task_struct *prev) - * - * The PowerPC 4xx family of processors do not have an FPU, so this just - * returns. - */ -_GLOBAL(giveup_fpu) - blr - -/* This is where the main kernel code starts. - */ -start_here: - - /* ptr to current */ - lis r2,init_task@h - ori r2,r2,init_task@l - - /* ptr to phys current thread */ - tophys(r4,r2) - addi r4,r4,THREAD /* init task's THREAD */ - mtspr SPRN_SPRG3,r4 - - /* stack */ - lis r1,init_thread_union@ha - addi r1,r1,init_thread_union@l - li r0,0 - stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) - - bl early_init /* We have to do this with MMU on */ - -/* - * Decide what sort of machine this is and initialize the MMU. - */ - mr r3,r31 - mr r4,r30 - mr r5,r29 - mr r6,r28 - mr r7,r27 - bl machine_init - bl MMU_init - -/* Go back to running unmapped so we can load up new values - * and change to using our exception vectors. - * On the 4xx, all we have to do is invalidate the TLB to clear - * the old 16M byte TLB mappings. - */ - lis r4,2f@h - ori r4,r4,2f@l - tophys(r4,r4) - lis r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h - ori r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l - mtspr SPRN_SRR0,r4 - mtspr SPRN_SRR1,r3 - rfi - b . /* prevent prefetch past rfi */ - -/* Load up the kernel context */ -2: - sync /* Flush to memory before changing TLB */ - tlbia - isync /* Flush shadow TLBs */ - - /* set up the PTE pointers for the Abatron bdiGDB. - */ - lis r6, swapper_pg_dir@h - ori r6, r6, swapper_pg_dir@l - lis r5, abatron_pteptrs@h - ori r5, r5, abatron_pteptrs@l - stw r5, 0xf0(r0) /* Must match your Abatron config file */ - tophys(r5,r5) - stw r6, 0(r5) - -/* Now turn on the MMU for real! */ - lis r4,MSR_KERNEL@h - ori r4,r4,MSR_KERNEL@l - lis r3,start_kernel@h - ori r3,r3,start_kernel@l - mtspr SPRN_SRR0,r3 - mtspr SPRN_SRR1,r4 - rfi /* enable MMU and jump to start_kernel */ - b . /* prevent prefetch past rfi */ - -/* Set up the initial MMU state so we can do the first level of - * kernel initialization. This maps the first 16 MBytes of memory 1:1 - * virtual to physical and more importantly sets the cache mode. - */ -initial_mmu: - tlbia /* Invalidate all TLB entries */ - isync - - /* We should still be executing code at physical address 0x0000xxxx - * at this point. However, start_here is at virtual address - * 0xC000xxxx. So, set up a TLB mapping to cover this once - * translation is enabled. - */ - - lis r3,KERNELBASE@h /* Load the kernel virtual address */ - ori r3,r3,KERNELBASE@l - tophys(r4,r3) /* Load the kernel physical address */ - - iccci r0,r3 /* Invalidate the i-cache before use */ - - /* Load the kernel PID. - */ - li r0,0 - mtspr SPRN_PID,r0 - sync - - /* Configure and load two entries into TLB slots 62 and 63. - * In case we are pinning TLBs, these are reserved in by the - * other TLB functions. If not reserving, then it doesn't - * matter where they are loaded. - */ - clrrwi r4,r4,10 /* Mask off the real page number */ - ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ - - clrrwi r3,r3,10 /* Mask off the effective page number */ - ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M)) - - li r0,63 /* TLB slot 63 */ - - tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ - tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ - -#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE) - - /* Load a TLB entry for the UART, so that ppc4xx_progress() can use - * the UARTs nice and early. We use a 4k real==virtual mapping. */ - - lis r3,SERIAL_DEBUG_IO_BASE@h - ori r3,r3,SERIAL_DEBUG_IO_BASE@l - mr r4,r3 - clrrwi r4,r4,12 - ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G) - - clrrwi r3,r3,12 - ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K)) - - li r0,0 /* TLB slot 0 */ - tlbwe r4,r0,TLB_DATA - tlbwe r3,r0,TLB_TAG -#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */ - - isync - - /* Establish the exception vector base - */ - lis r4,KERNELBASE@h /* EVPR only uses the high 16-bits */ - tophys(r0,r4) /* Use the physical address */ - mtspr SPRN_EVPR,r0 - - blr - -_GLOBAL(abort) - mfspr r13,SPRN_DBCR0 - oris r13,r13,DBCR0_RST_SYSTEM@h - mtspr SPRN_DBCR0,r13 - -_GLOBAL(set_context) - -#ifdef CONFIG_BDI_SWITCH - /* Context switch the PTE pointer for the Abatron BDI2000. - * The PGDIR is the second parameter. - */ - lis r5, KERNELBASE@h - lwz r5, 0xf0(r5) - stw r4, 0x4(r5) -#endif - sync - mtspr SPRN_PID,r3 - isync /* Need an isync to flush shadow */ - /* TLBs after changing PID */ - blr - -/* We put a few things here that have to be page-aligned. This stuff - * goes at the beginning of the data segment, which is page-aligned. - */ - .data - .align 12 - .globl sdata -sdata: - .globl empty_zero_page -empty_zero_page: - .space 4096 - .globl swapper_pg_dir -swapper_pg_dir: - .space 4096 - - -/* Stack for handling critical exceptions from kernel mode */ - .section .bss - .align 12 -exception_stack_bottom: - .space 4096 -critical_stack_top: - .globl exception_stack_top -exception_stack_top: - -/* This space gets a copy of optional info passed to us by the bootstrap - * which is used to pass parameters into the kernel like root=/dev/sda1, etc. - */ - .globl cmd_line -cmd_line: - .space 512 - -/* Room for two PTE pointers, usually the kernel and current user pointers - * to their respective root page table. - */ -abatron_pteptrs: - .space 8 diff --git a/arch/ppc/kernel/head_8xx.S b/arch/ppc/kernel/head_8xx.S deleted file mode 100644 index de097874222..00000000000 --- a/arch/ppc/kernel/head_8xx.S +++ /dev/null @@ -1,860 +0,0 @@ -/* - * arch/ppc/kernel/except_8xx.S - * - * PowerPC version - * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) - * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP - * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> - * Low-level exception handlers and MMU support - * rewritten by Paul Mackerras. - * Copyright (C) 1996 Paul Mackerras. - * MPC8xx modifications by Dan Malek - * Copyright (C) 1997 Dan Malek (dmalek@jlc.net). - * - * This file contains low-level support and setup for PowerPC 8xx - * embedded processors, including trap and interrupt dispatch. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include <linux/config.h> -#include <asm/processor.h> -#include <asm/page.h> -#include <asm/mmu.h> -#include <asm/cache.h> -#include <asm/pgtable.h> -#include <asm/cputable.h> -#include <asm/thread_info.h> -#include <asm/ppc_asm.h> -#include <asm/asm-offsets.h> - -/* Macro to make the code more readable. */ -#ifdef CONFIG_8xx_CPU6 -#define DO_8xx_CPU6(val, reg) \ - li reg, val; \ - stw reg, 12(r0); \ - lwz reg, 12(r0); -#else -#define DO_8xx_CPU6(val, reg) -#endif - .text - .globl _stext -_stext: - .text - .globl _start -_start: - -/* MPC8xx - * This port was done on an MBX board with an 860. Right now I only - * support an ELF compressed (zImage) boot from EPPC-Bug because the - * code there loads up some registers before calling us: - * r3: ptr to board info data - * r4: initrd_start or if no initrd then 0 - * r5: initrd_end - unused if r4 is 0 - * r6: Start of command line string - * r7: End of command line string - * - * I decided to use conditional compilation instead of checking PVR and - * adding more processor specific branches around code I don't need. - * Since this is an embedded processor, I also appreciate any memory - * savings I can get. - * - * The MPC8xx does not have any BATs, but it supports large page sizes. - * We first initialize the MMU to support 8M byte pages, then load one - * entry into each of the instruction and data TLBs to map the first - * 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to - * the "internal" processor registers before MMU_init is called. - * - * The TLB code currently contains a major hack. Since I use the condition - * code register, I have to save and restore it. I am out of registers, so - * I just store it in memory location 0 (the TLB handlers are not reentrant). - * To avoid making any decisions, I need to use the "segment" valid bit - * in the first level table, but that would require many changes to the - * Linux page directory/table functions that I don't want to do right now. - * - * I used to use SPRG2 for a temporary register in the TLB handler, but it - * has since been put to other uses. I now use a hack to save a register - * and the CCR at memory location 0.....Someday I'll fix this..... - * -- Dan - */ - .globl __start -__start: - mr r31,r3 /* save parameters */ - mr r30,r4 - mr r29,r5 - mr r28,r6 - mr r27,r7 - - /* We have to turn on the MMU right away so we get cache modes - * set correctly. - */ - bl initial_mmu - -/* We now have the lower 8 Meg mapped into TLB entries, and the caches - * ready to work. - */ - -turn_on_mmu: - mfmsr r0 - ori r0,r0,MSR_DR|MSR_IR - mtspr SPRN_SRR1,r0 - lis r0,start_here@h - ori r0,r0,start_here@l - mtspr SPRN_SRR0,r0 - SYNC - rfi /* enables MMU */ - -/* - * Exception entry code. This code runs with address translation - * turned off, i.e. using physical addresses. - * We assume sprg3 has the physical address of the current - * task's thread_struct. - */ -#define EXCEPTION_PROLOG \ - mtspr SPRN_SPRG0,r10; \ - mtspr SPRN_SPRG1,r11; \ - mfcr r10; \ - EXCEPTION_PROLOG_1; \ - EXCEPTION_PROLOG_2 - -#define EXCEPTION_PROLOG_1 \ - mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \ - andi. r11,r11,MSR_PR; \ - tophys(r11,r1); /* use tophys(r1) if kernel */ \ - beq 1f; \ - mfspr r11,SPRN_SPRG3; \ - lwz r11,THREAD_INFO-THREAD(r11); \ - addi r11,r11,THREAD_SIZE; \ - tophys(r11,r11); \ -1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */ - - -#define EXCEPTION_PROLOG_2 \ - CLR_TOP32(r11); \ - stw r10,_CCR(r11); /* save registers */ \ - stw r12,GPR12(r11); \ - stw r9,GPR9(r11); \ - mfspr r10,SPRN_SPRG0; \ - stw r10,GPR10(r11); \ - mfspr r12,SPRN_SPRG1; \ - stw r12,GPR11(r11); \ - mflr r10; \ - stw r10,_LINK(r11); \ - mfspr r12,SPRN_SRR0; \ - mfspr r9,SPRN_SRR1; \ - stw r1,GPR1(r11); \ - stw r1,0(r11); \ - tovirt(r1,r11); /* set new kernel sp */ \ - li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \ - MTMSRD(r10); /* (except for mach check in rtas) */ \ - stw r0,GPR0(r11); \ - SAVE_4GPRS(3, r11); \ - SAVE_2GPRS(7, r11) - -/* - * Note: code which follows this uses cr0.eq (set if from kernel), - * r11, r12 (SRR0), and r9 (SRR1). - * - * Note2: once we have set r1 we are in a position to take exceptions - * again, and we could thus set MSR:RI at that point. - */ - -/* - * Exception vectors. - */ -#define EXCEPTION(n, label, hdlr, xfer) \ - . = n; \ -label: \ - EXCEPTION_PROLOG; \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - xfer(n, hdlr) - -#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \ - li r10,trap; \ - stw r10,TRAP(r11); \ - li r10,MSR_KERNEL; \ - copyee(r10, r9); \ - bl tfer; \ -i##n: \ - .long hdlr; \ - .long ret - -#define COPY_EE(d, s) rlwimi d,s,0,16,16 -#define NOCOPY(d, s) - -#define EXC_XFER_STD(n, hdlr) \ - EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \ - ret_from_except_full) - -#define EXC_XFER_LITE(n, hdlr) \ - EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \ - ret_from_except) - -#define EXC_XFER_EE(n, hdlr) \ - EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \ - ret_from_except_full) - -#define EXC_XFER_EE_LITE(n, hdlr) \ - EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \ - ret_from_except) - -/* System reset */ - EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD) - -/* Machine check */ - . = 0x200 -MachineCheck: - EXCEPTION_PROLOG - mfspr r4,SPRN_DAR - stw r4,_DAR(r11) - mfspr r5,SPRN_DSISR - stw r5,_DSISR(r11) - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_STD(0x200, machine_check_exception) - -/* Data access exception. - * This is "never generated" by the MPC8xx. We jump to it for other - * translation errors. - */ - . = 0x300 -DataAccess: - EXCEPTION_PROLOG - mfspr r10,SPRN_DSISR - stw r10,_DSISR(r11) - mr r5,r10 - mfspr r4,SPRN_DAR - EXC_XFER_EE_LITE(0x300, handle_page_fault) - -/* Instruction access exception. - * This is "never generated" by the MPC8xx. We jump to it for other - * translation errors. - */ - . = 0x400 -InstructionAccess: - EXCEPTION_PROLOG - mr r4,r12 - mr r5,r9 - EXC_XFER_EE_LITE(0x400, handle_page_fault) - -/* External interrupt */ - EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) - -/* Alignment exception */ - . = 0x600 -Alignment: - EXCEPTION_PROLOG - mfspr r4,SPRN_DAR - stw r4,_DAR(r11) - mfspr r5,SPRN_DSISR - stw r5,_DSISR(r11) - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_EE(0x600, alignment_exception) - -/* Program check exception */ - EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD) - -/* No FPU on MPC8xx. This exception is not supposed to happen. -*/ - EXCEPTION(0x800, FPUnavailable, unknown_exception, EXC_XFER_STD) - -/* Decrementer */ - EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) - - EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE) - EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE) - -/* System call */ - . = 0xc00 -SystemCall: - EXCEPTION_PROLOG - EXC_XFER_EE_LITE(0xc00, DoSyscall) - -/* Single step - not used on 601 */ - EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD) - EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE) - EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_EE) - -/* On the MPC8xx, this is a software emulation interrupt. It occurs - * for all unimplemented and illegal instructions. - */ - EXCEPTION(0x1000, SoftEmu, SoftwareEmulation, EXC_XFER_STD) - - . = 0x1100 -/* - * For the MPC8xx, this is a software tablewalk to load the instruction - * TLB. It is modelled after the example in the Motorola manual. The task - * switch loads the M_TWB register with the pointer to the first level table. - * If we discover there is no second level table (value is zero) or if there - * is an invalid pte, we load that into the TLB, which causes another fault - * into the TLB Error interrupt where we can handle such problems. - * We have to use the MD_xxx registers for the tablewalk because the - * equivalent MI_xxx registers only perform the attribute functions. - */ -InstructionTLBMiss: -#ifdef CONFIG_8xx_CPU6 - stw r3, 8(r0) -#endif - DO_8xx_CPU6(0x3f80, r3) - mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ - mfcr r10 - stw r10, 0(r0) - stw r11, 4(r0) - mfspr r10, SPRN_SRR0 /* Get effective address of fault */ - DO_8xx_CPU6(0x3780, r3) - mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */ - mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - andi. r11, r10, 0x0800 /* Address >= 0x80000000 */ - beq 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - rlwimi r10, r11, 0, 2, 19 -3: - lwz r11, 0(r10) /* Get the level 1 entry */ - rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ - beq 2f /* If zero, don't try to find a pte */ - - /* We have a pte table, so load the MI_TWC with the attributes - * for this "segment." - */ - ori r11,r11,1 /* Set valid bit */ - DO_8xx_CPU6(0x2b80, r3) - mtspr SPRN_MI_TWC, r11 /* Set segment attributes */ - DO_8xx_CPU6(0x3b80, r3) - mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ - mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ - lwz r10, 0(r11) /* Get the pte */ - - ori r10, r10, _PAGE_ACCESSED - stw r10, 0(r11) - - /* The Linux PTE won't go exactly into the MMU TLB. - * Software indicator bits 21, 22 and 28 must be clear. - * Software indicator bits 24, 25, 26, and 27 must be - * set. All other Linux PTE bits control the behavior - * of the MMU. - */ -2: li r11, 0x00f0 - rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ - DO_8xx_CPU6(0x2d80, r3) - mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ - - mfspr r10, SPRN_M_TW /* Restore registers */ - lwz r11, 0(r0) - mtcr r11 - lwz r11, 4(r0) -#ifdef CONFIG_8xx_CPU6 - lwz r3, 8(r0) -#endif - rfi - - . = 0x1200 -DataStoreTLBMiss: -#ifdef CONFIG_8xx_CPU6 - stw r3, 8(r0) -#endif - DO_8xx_CPU6(0x3f80, r3) - mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ - mfcr r10 - stw r10, 0(r0) - stw r11, 4(r0) - mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - andi. r11, r10, 0x0800 - beq 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - rlwimi r10, r11, 0, 2, 19 -3: - lwz r11, 0(r10) /* Get the level 1 entry */ - rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ - beq 2f /* If zero, don't try to find a pte */ - - /* We have a pte table, so load fetch the pte from the table. - */ - ori r11, r11, 1 /* Set valid bit in physical L2 page */ - DO_8xx_CPU6(0x3b80, r3) - mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ - mfspr r10, SPRN_MD_TWC /* ....and get the pte address */ - lwz r10, 0(r10) /* Get the pte */ - - /* Insert the Guarded flag into the TWC from the Linux PTE. - * It is bit 27 of both the Linux PTE and the TWC (at least - * I got that right :-). It will be better when we can put - * this into the Linux pgd/pmd and load it in the operation - * above. - */ - rlwimi r11, r10, 0, 27, 27 - DO_8xx_CPU6(0x3b80, r3) - mtspr SPRN_MD_TWC, r11 - - mfspr r11, SPRN_MD_TWC /* get the pte address again */ - ori r10, r10, _PAGE_ACCESSED - stw r10, 0(r11) - - /* The Linux PTE won't go exactly into the MMU TLB. - * Software indicator bits 21, 22 and 28 must be clear. - * Software indicator bits 24, 25, 26, and 27 must be - * set. All other Linux PTE bits control the behavior - * of the MMU. - */ -2: li r11, 0x00f0 - rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ - DO_8xx_CPU6(0x3d80, r3) - mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ - - mfspr r10, SPRN_M_TW /* Restore registers */ - lwz r11, 0(r0) - mtcr r11 - lwz r11, 4(r0) -#ifdef CONFIG_8xx_CPU6 - lwz r3, 8(r0) -#endif - rfi - -/* This is an instruction TLB error on the MPC8xx. This could be due - * to many reasons, such as executing guarded memory or illegal instruction - * addresses. There is nothing to do but handle a big time error fault. - */ - . = 0x1300 -InstructionTLBError: - b InstructionAccess - -/* This is the data TLB error on the MPC8xx. This could be due to - * many reasons, including a dirty update to a pte. We can catch that - * one here, but anything else is an error. First, we track down the - * Linux pte. If it is valid, write access is allowed, but the - * page dirty bit is not set, we will set it and reload the TLB. For - * any other case, we bail out to a higher level function that can - * handle it. - */ - . = 0x1400 -DataTLBError: -#ifdef CONFIG_8xx_CPU6 - stw r3, 8(r0) -#endif - DO_8xx_CPU6(0x3f80, r3) - mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ - mfcr r10 - stw r10, 0(r0) - stw r11, 4(r0) - - /* First, make sure this was a store operation. - */ - mfspr r10, SPRN_DSISR - andis. r11, r10, 0x0200 /* If set, indicates store op */ - beq 2f - - /* The EA of a data TLB miss is automatically stored in the MD_EPN - * register. The EA of a data TLB error is automatically stored in - * the DAR, but not the MD_EPN register. We must copy the 20 most - * significant bits of the EA from the DAR to MD_EPN before we - * start walking the page tables. We also need to copy the CASID - * value from the M_CASID register. - * Addendum: The EA of a data TLB error is _supposed_ to be stored - * in DAR, but it seems that this doesn't happen in some cases, such - * as when the error is due to a dcbi instruction to a page with a - * TLB that doesn't have the changed bit set. In such cases, there - * does not appear to be any way to recover the EA of the error - * since it is neither in DAR nor MD_EPN. As a workaround, the - * _PAGE_HWWRITE bit is set for all kernel data pages when the PTEs - * are initialized in mapin_ram(). This will avoid the problem, - * assuming we only use the dcbi instruction on kernel addresses. - */ - mfspr r10, SPRN_DAR - rlwinm r11, r10, 0, 0, 19 - ori r11, r11, MD_EVALID - mfspr r10, SPRN_M_CASID - rlwimi r11, r10, 0, 28, 31 - DO_8xx_CPU6(0x3780, r3) - mtspr SPRN_MD_EPN, r11 - - mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - andi. r11, r10, 0x0800 - beq 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - rlwimi r10, r11, 0, 2, 19 -3: - lwz r11, 0(r10) /* Get the level 1 entry */ - rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ - beq 2f /* If zero, bail */ - - /* We have a pte table, so fetch the pte from the table. - */ - ori r11, r11, 1 /* Set valid bit in physical L2 page */ - DO_8xx_CPU6(0x3b80, r3) - mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ - mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ - lwz r10, 0(r11) /* Get the pte */ - - andi. r11, r10, _PAGE_RW /* Is it writeable? */ - beq 2f /* Bail out if not */ - - /* Update 'changed', among others. - */ - ori r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE - mfspr r11, SPRN_MD_TWC /* Get pte address again */ - stw r10, 0(r11) /* and update pte in table */ - - /* The Linux PTE won't go exactly into the MMU TLB. - * Software indicator bits 21, 22 and 28 must be clear. - * Software indicator bits 24, 25, 26, and 27 must be - * set. All other Linux PTE bits control the behavior - * of the MMU. - */ - li r11, 0x00f0 - rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ - DO_8xx_CPU6(0x3d80, r3) - mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ - - mfspr r10, SPRN_M_TW /* Restore registers */ - lwz r11, 0(r0) - mtcr r11 - lwz r11, 4(r0) -#ifdef CONFIG_8xx_CPU6 - lwz r3, 8(r0) -#endif - rfi -2: - mfspr r10, SPRN_M_TW /* Restore registers */ - lwz r11, 0(r0) - mtcr r11 - lwz r11, 4(r0) -#ifdef CONFIG_8xx_CPU6 - lwz r3, 8(r0) -#endif - b DataAccess - - EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE) - -/* On the MPC8xx, these next four traps are used for development - * support of breakpoints and such. Someday I will get around to - * using them. - */ - EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE) - - . = 0x2000 - - .globl giveup_fpu -giveup_fpu: - blr - -/* - * This is where the main kernel code starts. - */ -start_here: - /* ptr to current */ - lis r2,init_task@h - ori r2,r2,init_task@l - - /* ptr to phys current thread */ - tophys(r4,r2) - addi r4,r4,THREAD /* init task's THREAD */ - mtspr SPRN_SPRG3,r4 - li r3,0 - mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */ - - /* stack */ - lis r1,init_thread_union@ha - addi r1,r1,init_thread_union@l - li r0,0 - stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) - - bl early_init /* We have to do this with MMU on */ - -/* - * Decide what sort of machine this is and initialize the MMU. - */ - mr r3,r31 - mr r4,r30 - mr r5,r29 - mr r6,r28 - mr r7,r27 - bl machine_init - bl MMU_init - -/* - * Go back to running unmapped so we can load up new values - * and change to using our exception vectors. - * On the 8xx, all we have to do is invalidate the TLB to clear - * the old 8M byte TLB mappings and load the page table base register. - */ - /* The right way to do this would be to track it down through - * init's THREAD like the context switch code does, but this is - * easier......until someone changes init's static structures. - */ - lis r6, swapper_pg_dir@h - ori r6, r6, swapper_pg_dir@l - tophys(r6,r6) -#ifdef CONFIG_8xx_CPU6 - lis r4, cpu6_errata_word@h - ori r4, r4, cpu6_errata_word@l - li r3, 0x3980 - stw r3, 12(r4) - lwz r3, 12(r4) -#endif - mtspr SPRN_M_TWB, r6 - lis r4,2f@h - ori r4,r4,2f@l - tophys(r4,r4) - li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) - mtspr SPRN_SRR0,r4 - mtspr SPRN_SRR1,r3 - rfi -/* Load up the kernel context */ -2: - SYNC /* Force all PTE updates to finish */ - tlbia /* Clear all TLB entries */ - sync /* wait for tlbia/tlbie to finish */ - TLBSYNC /* ... on all CPUs */ - - /* set up the PTE pointers for the Abatron bdiGDB. - */ - tovirt(r6,r6) - lis r5, abatron_pteptrs@h - ori r5, r5, abatron_pteptrs@l - stw r5, 0xf0(r0) /* Must match your Abatron config file */ - tophys(r5,r5) - stw r6, 0(r5) - -/* Now turn on the MMU for real! */ - li r4,MSR_KERNEL - lis r3,start_kernel@h - ori r3,r3,start_kernel@l - mtspr SPRN_SRR0,r3 - mtspr SPRN_SRR1,r4 - rfi /* enable MMU and jump to start_kernel */ - -/* Set up the initial MMU state so we can do the first level of - * kernel initialization. This maps the first 8 MBytes of memory 1:1 - * virtual to physical. Also, set the cache mode since that is defined - * by TLB entries and perform any additional mapping (like of the IMMR). - * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel, - * 24 Mbytes of data, and the 8M IMMR space. Anything not covered by - * these mappings is mapped by page tables. - */ -initial_mmu: - tlbia /* Invalidate all TLB entries */ -#ifdef CONFIG_PIN_TLB - lis r8, MI_RSV4I@h - ori r8, r8, 0x1c00 -#else - li r8, 0 -#endif - mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */ - -#ifdef CONFIG_PIN_TLB - lis r10, (MD_RSV4I | MD_RESETVAL)@h - ori r10, r10, 0x1c00 - mr r8, r10 -#else - lis r10, MD_RESETVAL@h -#endif -#ifndef CONFIG_8xx_COPYBACK - oris r10, r10, MD_WTDEF@h -#endif - mtspr SPRN_MD_CTR, r10 /* Set data TLB control */ - - /* Now map the lower 8 Meg into the TLBs. For this quick hack, - * we can load the instruction and data TLB registers with the - * same values. - */ - lis r8, KERNELBASE@h /* Create vaddr for TLB */ - ori r8, r8, MI_EVALID /* Mark it valid */ - mtspr SPRN_MI_EPN, r8 - mtspr SPRN_MD_EPN, r8 - li r8, MI_PS8MEG /* Set 8M byte page */ - ori r8, r8, MI_SVALID /* Make it valid */ - mtspr SPRN_MI_TWC, r8 - mtspr SPRN_MD_TWC, r8 - li r8, MI_BOOTINIT /* Create RPN for address 0 */ - mtspr SPRN_MI_RPN, r8 /* Store TLB entry */ - mtspr SPRN_MD_RPN, r8 - lis r8, MI_Kp@h /* Set the protection mode */ - mtspr SPRN_MI_AP, r8 - mtspr SPRN_MD_AP, r8 - - /* Map another 8 MByte at the IMMR to get the processor - * internal registers (among other things). - */ -#ifdef CONFIG_PIN_TLB - addi r10, r10, 0x0100 - mtspr SPRN_MD_CTR, r10 -#endif - mfspr r9, 638 /* Get current IMMR */ - andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */ - - mr r8, r9 /* Create vaddr for TLB */ - ori r8, r8, MD_EVALID /* Mark it valid */ - mtspr SPRN_MD_EPN, r8 - li r8, MD_PS8MEG /* Set 8M byte page */ - ori r8, r8, MD_SVALID /* Make it valid */ - mtspr SPRN_MD_TWC, r8 - mr r8, r9 /* Create paddr for TLB */ - ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */ - mtspr SPRN_MD_RPN, r8 - -#ifdef CONFIG_PIN_TLB - /* Map two more 8M kernel data pages. - */ - addi r10, r10, 0x0100 - mtspr SPRN_MD_CTR, r10 - - lis r8, KERNELBASE@h /* Create vaddr for TLB */ - addis r8, r8, 0x0080 /* Add 8M */ - ori r8, r8, MI_EVALID /* Mark it valid */ - mtspr SPRN_MD_EPN, r8 - li r9, MI_PS8MEG /* Set 8M byte page */ - ori r9, r9, MI_SVALID /* Make it valid */ - mtspr SPRN_MD_TWC, r9 - li r11, MI_BOOTINIT /* Create RPN for address 0 */ - addis r11, r11, 0x0080 /* Add 8M */ - mtspr SPRN_MD_RPN, r8 - - addis r8, r8, 0x0080 /* Add 8M */ - mtspr SPRN_MD_EPN, r8 - mtspr SPRN_MD_TWC, r9 - addis r11, r11, 0x0080 /* Add 8M */ - mtspr SPRN_MD_RPN, r8 -#endif - - /* Since the cache is enabled according to the information we - * just loaded into the TLB, invalidate and enable the caches here. - * We should probably check/set other modes....later. - */ - lis r8, IDC_INVALL@h - mtspr SPRN_IC_CST, r8 - mtspr SPRN_DC_CST, r8 - lis r8, IDC_ENABLE@h - mtspr SPRN_IC_CST, r8 -#ifdef CONFIG_8xx_COPYBACK - mtspr SPRN_DC_CST, r8 -#else - /* For a debug option, I left this here to easily enable - * the write through cache mode - */ - lis r8, DC_SFWT@h - mtspr SPRN_DC_CST, r8 - lis r8, IDC_ENABLE@h - mtspr SPRN_DC_CST, r8 -#endif - blr - - -/* - * Set up to use a given MMU context. - * r3 is context number, r4 is PGD pointer. - * - * We place the physical address of the new task page directory loaded - * into the MMU base register, and set the ASID compare register with - * the new "context." - */ -_GLOBAL(set_context) - -#ifdef CONFIG_BDI_SWITCH - /* Context switch the PTE pointer for the Abatron BDI2000. - * The PGDIR is passed as second argument. - */ - lis r5, KERNELBASE@h - lwz r5, 0xf0(r5) - stw r4, 0x4(r5) -#endif - -#ifdef CONFIG_8xx_CPU6 - lis r6, cpu6_errata_word@h - ori r6, r6, cpu6_errata_word@l - tophys (r4, r4) - li r7, 0x3980 - stw r7, 12(r6) - lwz r7, 12(r6) - mtspr SPRN_M_TWB, r4 /* Update MMU base address */ - li r7, 0x3380 - stw r7, 12(r6) - lwz r7, 12(r6) - mtspr SPRN_M_CASID, r3 /* Update context */ -#else - mtspr SPRN_M_CASID,r3 /* Update context */ - tophys (r4, r4) - mtspr SPRN_M_TWB, r4 /* and pgd */ -#endif - SYNC - blr - -#ifdef CONFIG_8xx_CPU6 -/* It's here because it is unique to the 8xx. - * It is important we get called with interrupts disabled. I used to - * do that, but it appears that all code that calls this already had - * interrupt disabled. - */ - .globl set_dec_cpu6 -set_dec_cpu6: - lis r7, cpu6_errata_word@h - ori r7, r7, cpu6_errata_word@l - li r4, 0x2c00 - stw r4, 8(r7) - lwz r4, 8(r7) - mtspr 22, r3 /* Update Decrementer */ - SYNC - blr -#endif - -/* - * We put a few things here that have to be page-aligned. - * This stuff goes at the beginning of the data segment, - * which is page-aligned. - */ - .data - .globl sdata -sdata: - .globl empty_zero_page -empty_zero_page: - .space 4096 - - .globl swapper_pg_dir -swapper_pg_dir: - .space 4096 - -/* - * This space gets a copy of optional info passed to us by the bootstrap - * Used to pass parameters into the kernel like root=/dev/sda1, etc. - */ - .globl cmd_line -cmd_line: - .space 512 - -/* Room for two PTE table poiners, usually the kernel and current user - * pointer to their respective root page table (pgdir). - */ -abatron_pteptrs: - .space 8 - -#ifdef CONFIG_8xx_CPU6 - .globl cpu6_errata_word -cpu6_errata_word: - .space 16 -#endif - diff --git a/arch/ppc/kernel/head_booke.h b/arch/ppc/kernel/head_booke.h deleted file mode 100644 index aeb349b47af..00000000000 --- a/arch/ppc/kernel/head_booke.h +++ /dev/null @@ -1,363 +0,0 @@ -#ifndef __HEAD_BOOKE_H__ -#define __HEAD_BOOKE_H__ - -/* - * Macros used for common Book-e exception handling - */ - -#define SET_IVOR(vector_number, vector_label) \ - li r26,vector_label@l; \ - mtspr SPRN_IVOR##vector_number,r26; \ - sync - -#define NORMAL_EXCEPTION_PROLOG \ - mtspr SPRN_SPRG0,r10; /* save two registers to work with */\ - mtspr SPRN_SPRG1,r11; \ - mtspr SPRN_SPRG4W,r1; \ - mfcr r10; /* save CR in r10 for now */\ - mfspr r11,SPRN_SRR1; /* check whether user or kernel */\ - andi. r11,r11,MSR_PR; \ - beq 1f; \ - mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\ - lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\ - addi r1,r1,THREAD_SIZE; \ -1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\ - mr r11,r1; \ - stw r10,_CCR(r11); /* save various registers */\ - stw r12,GPR12(r11); \ - stw r9,GPR9(r11); \ - mfspr r10,SPRN_SPRG0; \ - stw r10,GPR10(r11); \ - mfspr r12,SPRN_SPRG1; \ - stw r12,GPR11(r11); \ - mflr r10; \ - stw r10,_LINK(r11); \ - mfspr r10,SPRN_SPRG4R; \ - mfspr r12,SPRN_SRR0; \ - stw r10,GPR1(r11); \ - mfspr r9,SPRN_SRR1; \ - stw r10,0(r11); \ - rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ - stw r0,GPR0(r11); \ - SAVE_4GPRS(3, r11); \ - SAVE_2GPRS(7, r11) - -/* To handle the additional exception priority levels on 40x and Book-E - * processors we allocate a 4k stack per additional priority level. The various - * head_xxx.S files allocate space (exception_stack_top) for each priority's - * stack times the number of CPUs - * - * On 40x critical is the only additional level - * On 44x/e500 we have critical and machine check - * On e200 we have critical and debug (machine check occurs via critical) - * - * Additionally we reserve a SPRG for each priority level so we can free up a - * GPR to use as the base for indirect access to the exception stacks. This - * is necessary since the MMU is always on, for Book-E parts, and the stacks - * are offset from KERNELBASE. - * - */ -#define BOOKE_EXCEPTION_STACK_SIZE (8192) - -/* CRIT_SPRG only used in critical exception handling */ -#define CRIT_SPRG SPRN_SPRG2 -/* MCHECK_SPRG only used in machine check exception handling */ -#define MCHECK_SPRG SPRN_SPRG6W - -#define MCHECK_STACK_TOP (exception_stack_top - 4096) -#define CRIT_STACK_TOP (exception_stack_top) - -/* only on e200 for now */ -#define DEBUG_STACK_TOP (exception_stack_top - 4096) -#define DEBUG_SPRG SPRN_SPRG6W - -#ifdef CONFIG_SMP -#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ - mfspr r8,SPRN_PIR; \ - mulli r8,r8,BOOKE_EXCEPTION_STACK_SIZE; \ - neg r8,r8; \ - addis r8,r8,level##_STACK_TOP@ha; \ - addi r8,r8,level##_STACK_TOP@l -#else -#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ - lis r8,level##_STACK_TOP@h; \ - ori r8,r8,level##_STACK_TOP@l -#endif - -/* - * Exception prolog for critical/machine check exceptions. This is a - * little different from the normal exception prolog above since a - * critical/machine check exception can potentially occur at any point - * during normal exception processing. Thus we cannot use the same SPRG - * registers as the normal prolog above. Instead we use a portion of the - * critical/machine check exception stack at low physical addresses. - */ -#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, exc_level_srr0, exc_level_srr1) \ - mtspr exc_level##_SPRG,r8; \ - BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \ - stw r10,GPR10-INT_FRAME_SIZE(r8); \ - stw r11,GPR11-INT_FRAME_SIZE(r8); \ - mfcr r10; /* save CR in r10 for now */\ - mfspr r11,exc_level_srr1; /* check whether user or kernel */\ - andi. r11,r11,MSR_PR; \ - mr r11,r8; \ - mfspr r8,exc_level##_SPRG; \ - beq 1f; \ - /* COMING FROM USER MODE */ \ - mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\ - lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ - addi r11,r11,THREAD_SIZE; \ -1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\ - stw r10,_CCR(r11); /* save various registers */\ - stw r12,GPR12(r11); \ - stw r9,GPR9(r11); \ - mflr r10; \ - stw r10,_LINK(r11); \ - mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\ - stw r12,_DEAR(r11); /* since they may have had stuff */\ - mfspr r9,SPRN_ESR; /* in them at the point where the */\ - stw r9,_ESR(r11); /* exception was taken */\ - mfspr r12,exc_level_srr0; \ - stw r1,GPR1(r11); \ - mfspr r9,exc_level_srr1; \ - stw r1,0(r11); \ - mr r1,r11; \ - rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ - stw r0,GPR0(r11); \ - SAVE_4GPRS(3, r11); \ - SAVE_2GPRS(7, r11) - -#define CRITICAL_EXCEPTION_PROLOG \ - EXC_LEVEL_EXCEPTION_PROLOG(CRIT, SPRN_CSRR0, SPRN_CSRR1) -#define DEBUG_EXCEPTION_PROLOG \ - EXC_LEVEL_EXCEPTION_PROLOG(DEBUG, SPRN_DSRR0, SPRN_DSRR1) -#define MCHECK_EXCEPTION_PROLOG \ - EXC_LEVEL_EXCEPTION_PROLOG(MCHECK, SPRN_MCSRR0, SPRN_MCSRR1) - -/* - * Exception vectors. - */ -#define START_EXCEPTION(label) \ - .align 5; \ -label: - -#define FINISH_EXCEPTION(func) \ - bl transfer_to_handler_full; \ - .long func; \ - .long ret_from_except_full - -#define EXCEPTION(n, label, hdlr, xfer) \ - START_EXCEPTION(label); \ - NORMAL_EXCEPTION_PROLOG; \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - xfer(n, hdlr) - -#define CRITICAL_EXCEPTION(n, label, hdlr) \ - START_EXCEPTION(label); \ - CRITICAL_EXCEPTION_PROLOG; \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ - NOCOPY, crit_transfer_to_handler, \ - ret_from_crit_exc) - -#define MCHECK_EXCEPTION(n, label, hdlr) \ - START_EXCEPTION(label); \ - MCHECK_EXCEPTION_PROLOG; \ - mfspr r5,SPRN_ESR; \ - stw r5,_ESR(r11); \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ - NOCOPY, mcheck_transfer_to_handler, \ - ret_from_mcheck_exc) - -#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \ - li r10,trap; \ - stw r10,TRAP(r11); \ - lis r10,msr@h; \ - ori r10,r10,msr@l; \ - copyee(r10, r9); \ - bl tfer; \ - .long hdlr; \ - .long ret - -#define COPY_EE(d, s) rlwimi d,s,0,16,16 -#define NOCOPY(d, s) - -#define EXC_XFER_STD(n, hdlr) \ - EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \ - ret_from_except_full) - -#define EXC_XFER_LITE(n, hdlr) \ - EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \ - ret_from_except) - -#define EXC_XFER_EE(n, hdlr) \ - EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \ - ret_from_except_full) - -#define EXC_XFER_EE_LITE(n, hdlr) \ - EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \ - ret_from_except) - -/* Check for a single step debug exception while in an exception - * handler before state has been saved. This is to catch the case - * where an instruction that we are trying to single step causes - * an exception (eg ITLB/DTLB miss) and thus the first instruction of - * the exception handler generates a single step debug exception. - * - * If we get a debug trap on the first instruction of an exception handler, - * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is - * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR). - * The exception handler was handling a non-critical interrupt, so it will - * save (and later restore) the MSR via SPRN_CSRR1, which will still have - * the MSR_DE bit set. - */ -#ifdef CONFIG_E200 -#define DEBUG_EXCEPTION \ - START_EXCEPTION(Debug); \ - DEBUG_EXCEPTION_PROLOG; \ - \ - /* \ - * If there is a single step or branch-taken exception in an \ - * exception entry sequence, it was probably meant to apply to \ - * the code where the exception occurred (since exception entry \ - * doesn't turn off DE automatically). We simulate the effect \ - * of turning off DE on entry to an exception handler by turning \ - * off DE in the CSRR1 value and clearing the debug status. \ - */ \ - mfspr r10,SPRN_DBSR; /* check single-step/branch taken */ \ - andis. r10,r10,DBSR_IC@h; \ - beq+ 2f; \ - \ - lis r10,KERNELBASE@h; /* check if exception in vectors */ \ - ori r10,r10,KERNELBASE@l; \ - cmplw r12,r10; \ - blt+ 2f; /* addr below exception vectors */ \ - \ - lis r10,Debug@h; \ - ori r10,r10,Debug@l; \ - cmplw r12,r10; \ - bgt+ 2f; /* addr above exception vectors */ \ - \ - /* here it looks like we got an inappropriate debug exception. */ \ -1: rlwinm r9,r9,0,~MSR_DE; /* clear DE in the CDRR1 value */ \ - lis r10,DBSR_IC@h; /* clear the IC event */ \ - mtspr SPRN_DBSR,r10; \ - /* restore state and get out */ \ - lwz r10,_CCR(r11); \ - lwz r0,GPR0(r11); \ - lwz r1,GPR1(r11); \ - mtcrf 0x80,r10; \ - mtspr SPRN_DSRR0,r12; \ - mtspr SPRN_DSRR1,r9; \ - lwz r9,GPR9(r11); \ - lwz r12,GPR12(r11); \ - mtspr DEBUG_SPRG,r8; \ - BOOKE_LOAD_EXC_LEVEL_STACK(DEBUG); /* r8 points to the debug stack */ \ - lwz r10,GPR10-INT_FRAME_SIZE(r8); \ - lwz r11,GPR11-INT_FRAME_SIZE(r8); \ - mfspr r8,DEBUG_SPRG; \ - \ - RFDI; \ - b .; \ - \ - /* continue normal handling for a critical exception... */ \ -2: mfspr r4,SPRN_DBSR; \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, debug_transfer_to_handler, ret_from_debug_exc) -#else -#define DEBUG_EXCEPTION \ - START_EXCEPTION(Debug); \ - CRITICAL_EXCEPTION_PROLOG; \ - \ - /* \ - * If there is a single step or branch-taken exception in an \ - * exception entry sequence, it was probably meant to apply to \ - * the code where the exception occurred (since exception entry \ - * doesn't turn off DE automatically). We simulate the effect \ - * of turning off DE on entry to an exception handler by turning \ - * off DE in the CSRR1 value and clearing the debug status. \ - */ \ - mfspr r10,SPRN_DBSR; /* check single-step/branch taken */ \ - andis. r10,r10,DBSR_IC@h; \ - beq+ 2f; \ - \ - lis r10,KERNELBASE@h; /* check if exception in vectors */ \ - ori r10,r10,KERNELBASE@l; \ - cmplw r12,r10; \ - blt+ 2f; /* addr below exception vectors */ \ - \ - lis r10,Debug@h; \ - ori r10,r10,Debug@l; \ - cmplw r12,r10; \ - bgt+ 2f; /* addr above exception vectors */ \ - \ - /* here it looks like we got an inappropriate debug exception. */ \ -1: rlwinm r9,r9,0,~MSR_DE; /* clear DE in the CSRR1 value */ \ - lis r10,DBSR_IC@h; /* clear the IC event */ \ - mtspr SPRN_DBSR,r10; \ - /* restore state and get out */ \ - lwz r10,_CCR(r11); \ - lwz r0,GPR0(r11); \ - lwz r1,GPR1(r11); \ - mtcrf 0x80,r10; \ - mtspr SPRN_CSRR0,r12; \ - mtspr SPRN_CSRR1,r9; \ - lwz r9,GPR9(r11); \ - lwz r12,GPR12(r11); \ - mtspr CRIT_SPRG,r8; \ - BOOKE_LOAD_EXC_LEVEL_STACK(CRIT); /* r8 points to the debug stack */ \ - lwz r10,GPR10-INT_FRAME_SIZE(r8); \ - lwz r11,GPR11-INT_FRAME_SIZE(r8); \ - mfspr r8,CRIT_SPRG; \ - \ - rfci; \ - b .; \ - \ - /* continue normal handling for a critical exception... */ \ -2: mfspr r4,SPRN_DBSR; \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) -#endif - -#define INSTRUCTION_STORAGE_EXCEPTION \ - START_EXCEPTION(InstructionStorage) \ - NORMAL_EXCEPTION_PROLOG; \ - mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \ - stw r5,_ESR(r11); \ - mr r4,r12; /* Pass SRR0 as arg2 */ \ - li r5,0; /* Pass zero as arg3 */ \ - EXC_XFER_EE_LITE(0x0400, handle_page_fault) - -#define ALIGNMENT_EXCEPTION \ - START_EXCEPTION(Alignment) \ - NORMAL_EXCEPTION_PROLOG; \ - mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \ - stw r4,_DEAR(r11); \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_EE(0x0600, alignment_exception) - -#define PROGRAM_EXCEPTION \ - START_EXCEPTION(Program) \ - NORMAL_EXCEPTION_PROLOG; \ - mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \ - stw r4,_ESR(r11); \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_STD(0x0700, program_check_exception) - -#define DECREMENTER_EXCEPTION \ - START_EXCEPTION(Decrementer) \ - NORMAL_EXCEPTION_PROLOG; \ - lis r0,TSR_DIS@h; /* Setup the DEC interrupt mask */ \ - mtspr SPRN_TSR,r0; /* Clear the DEC interrupt */ \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_LITE(0x0900, timer_interrupt) - -#define FP_UNAVAILABLE_EXCEPTION \ - START_EXCEPTION(FloatingPointUnavailable) \ - NORMAL_EXCEPTION_PROLOG; \ - bne load_up_fpu; /* if from user, just load it up */ \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_EE_LITE(0x800, KernelFP) - -#endif /* __HEAD_BOOKE_H__ */ diff --git a/arch/ppc/kernel/head_fsl_booke.S b/arch/ppc/kernel/head_fsl_booke.S deleted file mode 100644 index 5063c603fad..00000000000 --- a/arch/ppc/kernel/head_fsl_booke.S +++ /dev/null @@ -1,1063 +0,0 @@ -/* - * arch/ppc/kernel/head_fsl_booke.S - * - * Kernel execution entry point code. - * - * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> - * Initial PowerPC version. - * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> - * Rewritten for PReP - * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> - * Low-level exception handers, MMU support, and rewrite. - * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> - * PowerPC 8xx modifications. - * Copyright (c) 1998-1999 TiVo, Inc. - * PowerPC 403GCX modifications. - * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> - * PowerPC 403GCX/405GP modifications. - * Copyright 2000 MontaVista Software Inc. - * PPC405 modifications - * PowerPC 403GCX/405GP modifications. - * Author: MontaVista Software, Inc. - * frank_rowand@mvista.com or source@mvista.com - * debbie_chu@mvista.com - * Copyright 2002-2004 MontaVista Software, Inc. - * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> - * Copyright 2004 Freescale Semiconductor, Inc - * PowerPC e500 modifications, Kumar Gala <kumar.gala@freescale.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <linux/config.h> -#include <linux/threads.h> -#include <asm/processor.h> -#include <asm/page.h> -#include <asm/mmu.h> -#include <asm/pgtable.h> -#include <asm/cputable.h> -#include <asm/thread_info.h> -#include <asm/ppc_asm.h> -#include <asm/asm-offsets.h> -#include "head_booke.h" - -/* As with the other PowerPC ports, it is expected that when code - * execution begins here, the following registers contain valid, yet - * optional, information: - * - * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) - * r4 - Starting address of the init RAM disk - * r5 - Ending address of the init RAM disk - * r6 - Start of kernel command line string (e.g. "mem=128") - * r7 - End of kernel command line string - * - */ - .text -_GLOBAL(_stext) -_GLOBAL(_start) - /* - * Reserve a word at a fixed location to store the address - * of abatron_pteptrs - */ - nop -/* - * Save parameters we are passed - */ - mr r31,r3 - mr r30,r4 - mr r29,r5 - mr r28,r6 - mr r27,r7 - li r24,0 /* CPU number */ - -/* We try to not make any assumptions about how the boot loader - * setup or used the TLBs. We invalidate all mappings from the - * boot loader and load a single entry in TLB1[0] to map the - * first 16M of kernel memory. Any boot info passed from the - * bootloader needs to live in this first 16M. - * - * Requirement on bootloader: - * - The page we're executing in needs to reside in TLB1 and - * have IPROT=1. If not an invalidate broadcast could - * evict the entry we're currently executing in. - * - * r3 = Index of TLB1 were executing in - * r4 = Current MSR[IS] - * r5 = Index of TLB1 temp mapping - * - * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0] - * if needed - */ - -/* 1. Find the index of the entry we're executing in */ - bl invstr /* Find our address */ -invstr: mflr r6 /* Make it accessible */ - mfmsr r7 - rlwinm r4,r7,27,31,31 /* extract MSR[IS] */ - mfspr r7, SPRN_PID0 - slwi r7,r7,16 - or r7,r7,r4 - mtspr SPRN_MAS6,r7 - tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */ -#ifndef CONFIG_E200 - mfspr r7,SPRN_MAS1 - andis. r7,r7,MAS1_VALID@h - bne match_TLB - mfspr r7,SPRN_PID1 - slwi r7,r7,16 - or r7,r7,r4 - mtspr SPRN_MAS6,r7 - tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */ - mfspr r7,SPRN_MAS1 - andis. r7,r7,MAS1_VALID@h - bne match_TLB - mfspr r7, SPRN_PID2 - slwi r7,r7,16 - or r7,r7,r4 - mtspr SPRN_MAS6,r7 - tlbsx 0,r6 /* Fall through, we had to match */ -#endif -match_TLB: - mfspr r7,SPRN_MAS0 - rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */ - - mfspr r7,SPRN_MAS1 /* Insure IPROT set */ - oris r7,r7,MAS1_IPROT@h - mtspr SPRN_MAS1,r7 - tlbwe - -/* 2. Invalidate all entries except the entry we're executing in */ - mfspr r9,SPRN_TLB1CFG - andi. r9,r9,0xfff - li r6,0 /* Set Entry counter to 0 */ -1: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ - rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ - mtspr SPRN_MAS0,r7 - tlbre - mfspr r7,SPRN_MAS1 - rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ - cmpw r3,r6 - beq skpinv /* Dont update the current execution TLB */ - mtspr SPRN_MAS1,r7 - tlbwe - isync -skpinv: addi r6,r6,1 /* Increment */ - cmpw r6,r9 /* Are we done? */ - bne 1b /* If not, repeat */ - - /* Invalidate TLB0 */ - li r6,0x04 - tlbivax 0,r6 -#ifdef CONFIG_SMP - tlbsync -#endif - /* Invalidate TLB1 */ - li r6,0x0c - tlbivax 0,r6 -#ifdef CONFIG_SMP - tlbsync -#endif - msync - -/* 3. Setup a temp mapping and jump to it */ - andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */ - addi r5, r5, 0x1 - lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ - rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ - mtspr SPRN_MAS0,r7 - tlbre - - /* Just modify the entry ID and EPN for the temp mapping */ - lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ - rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ - mtspr SPRN_MAS0,r7 - xori r6,r4,1 /* Setup TMP mapping in the other Address space */ - slwi r6,r6,12 - oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h - ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l - mtspr SPRN_MAS1,r6 - mfspr r6,SPRN_MAS2 - li r7,0 /* temp EPN = 0 */ - rlwimi r7,r6,0,20,31 - mtspr SPRN_MAS2,r7 - tlbwe - - xori r6,r4,1 - slwi r6,r6,5 /* setup new context with other address space */ - bl 1f /* Find our address */ -1: mflr r9 - rlwimi r7,r9,0,20,31 - addi r7,r7,24 - mtspr SPRN_SRR0,r7 - mtspr SPRN_SRR1,r6 - rfi - -/* 4. Clear out PIDs & Search info */ - li r6,0 - mtspr SPRN_PID0,r6 -#ifndef CONFIG_E200 - mtspr SPRN_PID1,r6 - mtspr SPRN_PID2,r6 -#endif - mtspr SPRN_MAS6,r6 - -/* 5. Invalidate mapping we started in */ - lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ - rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ - mtspr SPRN_MAS0,r7 - tlbre - li r6,0 - mtspr SPRN_MAS1,r6 - tlbwe - /* Invalidate TLB1 */ - li r9,0x0c - tlbivax 0,r9 -#ifdef CONFIG_SMP - tlbsync -#endif - msync - -/* 6. Setup KERNELBASE mapping in TLB1[0] */ - lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ - mtspr SPRN_MAS0,r6 - lis r6,(MAS1_VALID|MAS1_IPROT)@h - ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l - mtspr SPRN_MAS1,r6 - li r7,0 - lis r6,KERNELBASE@h - ori r6,r6,KERNELBASE@l - rlwimi r6,r7,0,20,31 - mtspr SPRN_MAS2,r6 - li r7,(MAS3_SX|MAS3_SW|MAS3_SR) - mtspr SPRN_MAS3,r7 - tlbwe - -/* 7. Jump to KERNELBASE mapping */ - lis r7,MSR_KERNEL@h - ori r7,r7,MSR_KERNEL@l - bl 1f /* Find our address */ -1: mflr r9 - rlwimi r6,r9,0,20,31 - addi r6,r6,24 - mtspr SPRN_SRR0,r6 - mtspr SPRN_SRR1,r7 - rfi /* start execution out of TLB1[0] entry */ - -/* 8. Clear out the temp mapping */ - lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ - rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ - mtspr SPRN_MAS0,r7 - tlbre - mtspr SPRN_MAS1,r8 - tlbwe - /* Invalidate TLB1 */ - li r9,0x0c - tlbivax 0,r9 -#ifdef CONFIG_SMP - tlbsync -#endif - msync - - /* Establish the interrupt vector offsets */ - SET_IVOR(0, CriticalInput); - SET_IVOR(1, MachineCheck); - SET_IVOR(2, DataStorage); - SET_IVOR(3, InstructionStorage); - SET_IVOR(4, ExternalInput); - SET_IVOR(5, Alignment); - SET_IVOR(6, Program); - SET_IVOR(7, FloatingPointUnavailable); - SET_IVOR(8, SystemCall); - SET_IVOR(9, AuxillaryProcessorUnavailable); - SET_IVOR(10, Decrementer); - SET_IVOR(11, FixedIntervalTimer); - SET_IVOR(12, WatchdogTimer); - SET_IVOR(13, DataTLBError); - SET_IVOR(14, InstructionTLBError); - SET_IVOR(15, Debug); - SET_IVOR(32, SPEUnavailable); - SET_IVOR(33, SPEFloatingPointData); - SET_IVOR(34, SPEFloatingPointRound); -#ifndef CONFIG_E200 - SET_IVOR(35, PerformanceMonitor); -#endif - - /* Establish the interrupt vector base */ - lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ - mtspr SPRN_IVPR,r4 - - /* Setup the defaults for TLB entries */ - li r2,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l -#ifdef CONFIG_E200 - oris r2,r2,MAS4_TLBSELD(1)@h -#endif - mtspr SPRN_MAS4, r2 - -#if 0 - /* Enable DOZE */ - mfspr r2,SPRN_HID0 - oris r2,r2,HID0_DOZE@h - mtspr SPRN_HID0, r2 -#endif -#ifdef CONFIG_E200 - /* enable dedicated debug exception handling resources (Debug APU) */ - mfspr r2,SPRN_HID0 - ori r2,r2,HID0_DAPUEN@l - mtspr SPRN_HID0,r2 -#endif - -#if !defined(CONFIG_BDI_SWITCH) - /* - * The Abatron BDI JTAG debugger does not tolerate others - * mucking with the debug registers. - */ - lis r2,DBCR0_IDM@h - mtspr SPRN_DBCR0,r2 - /* clear any residual debug events */ - li r2,-1 - mtspr SPRN_DBSR,r2 -#endif - - /* - * This is where the main kernel code starts. - */ - - /* ptr to current */ - lis r2,init_task@h - ori r2,r2,init_task@l - - /* ptr to current thread */ - addi r4,r2,THREAD /* init task's THREAD */ - mtspr SPRN_SPRG3,r4 - - /* stack */ - lis r1,init_thread_union@h - ori r1,r1,init_thread_union@l - li r0,0 - stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) - - bl early_init - - mfspr r3,SPRN_TLB1CFG - andi. r3,r3,0xfff - lis r4,num_tlbcam_entries@ha - stw r3,num_tlbcam_entries@l(r4) -/* - * Decide what sort of machine this is and initialize the MMU. - */ - mr r3,r31 - mr r4,r30 - mr r5,r29 - mr r6,r28 - mr r7,r27 - bl machine_init - bl MMU_init - - /* Setup PTE pointers for the Abatron bdiGDB */ - lis r6, swapper_pg_dir@h - ori r6, r6, swapper_pg_dir@l - lis r5, abatron_pteptrs@h - ori r5, r5, abatron_pteptrs@l - lis r4, KERNELBASE@h - ori r4, r4, KERNELBASE@l - stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ - stw r6, 0(r5) - - /* Let's move on */ - lis r4,start_kernel@h - ori r4,r4,start_kernel@l - lis r3,MSR_KERNEL@h - ori r3,r3,MSR_KERNEL@l - mtspr SPRN_SRR0,r4 - mtspr SPRN_SRR1,r3 - rfi /* change context and jump to start_kernel */ - -/* Macros to hide the PTE size differences - * - * FIND_PTE -- walks the page tables given EA & pgdir pointer - * r10 -- EA of fault - * r11 -- PGDIR pointer - * r12 -- free - * label 2: is the bailout case - * - * if we find the pte (fall through): - * r11 is low pte word - * r12 is pointer to the pte - */ -#ifdef CONFIG_PTE_64BIT -#define PTE_FLAGS_OFFSET 4 -#define FIND_PTE \ - rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ - lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ - rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ - beq 2f; /* Bail if no table */ \ - rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ - lwz r11, 4(r12); /* Get pte entry */ -#else -#define PTE_FLAGS_OFFSET 0 -#define FIND_PTE \ - rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \ - lwz r11, 0(r11); /* Get L1 entry */ \ - rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \ - beq 2f; /* Bail if no table */ \ - rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \ - lwz r11, 0(r12); /* Get Linux PTE */ -#endif - -/* - * Interrupt vector entry code - * - * The Book E MMUs are always on so we don't need to handle - * interrupts in real mode as with previous PPC processors. In - * this case we handle interrupts in the kernel virtual address - * space. - * - * Interrupt vectors are dynamically placed relative to the - * interrupt prefix as determined by the address of interrupt_base. - * The interrupt vectors offsets are programmed using the labels - * for each interrupt vector entry. - * - * Interrupt vectors must be aligned on a 16 byte boundary. - * We align on a 32 byte cache line boundary for good measure. - */ - -interrupt_base: - /* Critical Input Interrupt */ - CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception) - - /* Machine Check Interrupt */ -#ifdef CONFIG_E200 - /* no RFMCI, MCSRRs on E200 */ - CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) -#else - MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception) -#endif - - /* Data Storage Interrupt */ - START_EXCEPTION(DataStorage) - mtspr SPRN_SPRG0, r10 /* Save some working registers */ - mtspr SPRN_SPRG1, r11 - mtspr SPRN_SPRG4W, r12 - mtspr SPRN_SPRG5W, r13 - mfcr r11 - mtspr SPRN_SPRG7W, r11 - - /* - * Check if it was a store fault, if not then bail - * because a user tried to access a kernel or - * read-protected page. Otherwise, get the - * offending address and handle it. - */ - mfspr r10, SPRN_ESR - andis. r10, r10, ESR_ST@h - beq 2f - - mfspr r10, SPRN_DEAR /* Get faulting address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - lis r11, TASK_SIZE@h - ori r11, r11, TASK_SIZE@l - cmplw 0, r10, r11 - bge 2f - - /* Get the PGD for the current thread */ -3: - mfspr r11,SPRN_SPRG3 - lwz r11,PGDIR(r11) -4: - FIND_PTE - - /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */ - andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE - cmpwi 0, r13, _PAGE_RW|_PAGE_USER - bne 2f /* Bail if not */ - - /* Update 'changed'. */ - ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE - stw r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */ - - /* MAS2 not updated as the entry does exist in the tlb, this - fault taken to detect state transition (eg: COW -> DIRTY) - */ - andi. r11, r11, _PAGE_HWEXEC - rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */ - ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */ - - /* update search PID in MAS6, AS = 0 */ - mfspr r12, SPRN_PID0 - slwi r12, r12, 16 - mtspr SPRN_MAS6, r12 - - /* find the TLB index that caused the fault. It has to be here. */ - tlbsx 0, r10 - - /* only update the perm bits, assume the RPN is fine */ - mfspr r12, SPRN_MAS3 - rlwimi r12, r11, 0, 20, 31 - mtspr SPRN_MAS3,r12 - tlbwe - - /* Done...restore registers and get out of here. */ - mfspr r11, SPRN_SPRG7R - mtcr r11 - mfspr r13, SPRN_SPRG5R - mfspr r12, SPRN_SPRG4R - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - rfi /* Force context change */ - -2: - /* - * The bailout. Restore registers to pre-exception conditions - * and call the heavyweights to help us out. - */ - mfspr r11, SPRN_SPRG7R - mtcr r11 - mfspr r13, SPRN_SPRG5R - mfspr r12, SPRN_SPRG4R - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - b data_access - - /* Instruction Storage Interrupt */ - INSTRUCTION_STORAGE_EXCEPTION - - /* External Input Interrupt */ - EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) - - /* Alignment Interrupt */ - ALIGNMENT_EXCEPTION - - /* Program Interrupt */ - PROGRAM_EXCEPTION - - /* Floating Point Unavailable Interrupt */ -#ifdef CONFIG_PPC_FPU - FP_UNAVAILABLE_EXCEPTION -#else -#ifdef CONFIG_E200 - /* E200 treats 'normal' floating point instructions as FP Unavail exception */ - EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE) -#else - EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) -#endif -#endif - - /* System Call Interrupt */ - START_EXCEPTION(SystemCall) - NORMAL_EXCEPTION_PROLOG - EXC_XFER_EE_LITE(0x0c00, DoSyscall) - - /* Auxillary Processor Unavailable Interrupt */ - EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) - - /* Decrementer Interrupt */ - DECREMENTER_EXCEPTION - - /* Fixed Internal Timer Interrupt */ - /* TODO: Add FIT support */ - EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE) - - /* Watchdog Timer Interrupt */ -#ifdef CONFIG_BOOKE_WDT - CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException) -#else - CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception) -#endif - - /* Data TLB Error Interrupt */ - START_EXCEPTION(DataTLBError) - mtspr SPRN_SPRG0, r10 /* Save some working registers */ - mtspr SPRN_SPRG1, r11 - mtspr SPRN_SPRG4W, r12 - mtspr SPRN_SPRG5W, r13 - mfcr r11 - mtspr SPRN_SPRG7W, r11 - mfspr r10, SPRN_DEAR /* Get faulting address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - lis r11, TASK_SIZE@h - ori r11, r11, TASK_SIZE@l - cmplw 5, r10, r11 - blt 5, 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - - mfspr r12,SPRN_MAS1 /* Set TID to 0 */ - rlwinm r12,r12,0,16,1 - mtspr SPRN_MAS1,r12 - - b 4f - - /* Get the PGD for the current thread */ -3: - mfspr r11,SPRN_SPRG3 - lwz r11,PGDIR(r11) - -4: - FIND_PTE - andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ - beq 2f /* Bail if not present */ - -#ifdef CONFIG_PTE_64BIT - lwz r13, 0(r12) -#endif - ori r11, r11, _PAGE_ACCESSED - stw r11, PTE_FLAGS_OFFSET(r12) - - /* Jump to common tlb load */ - b finish_tlb_load -2: - /* The bailout. Restore registers to pre-exception conditions - * and call the heavyweights to help us out. - */ - mfspr r11, SPRN_SPRG7R - mtcr r11 - mfspr r13, SPRN_SPRG5R - mfspr r12, SPRN_SPRG4R - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - b data_access - - /* Instruction TLB Error Interrupt */ - /* - * Nearly the same as above, except we get our - * information from different registers and bailout - * to a different point. - */ - START_EXCEPTION(InstructionTLBError) - mtspr SPRN_SPRG0, r10 /* Save some working registers */ - mtspr SPRN_SPRG1, r11 - mtspr SPRN_SPRG4W, r12 - mtspr SPRN_SPRG5W, r13 - mfcr r11 - mtspr SPRN_SPRG7W, r11 - mfspr r10, SPRN_SRR0 /* Get faulting address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - lis r11, TASK_SIZE@h - ori r11, r11, TASK_SIZE@l - cmplw 5, r10, r11 - blt 5, 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - - mfspr r12,SPRN_MAS1 /* Set TID to 0 */ - rlwinm r12,r12,0,16,1 - mtspr SPRN_MAS1,r12 - - b 4f - - /* Get the PGD for the current thread */ -3: - mfspr r11,SPRN_SPRG3 - lwz r11,PGDIR(r11) - -4: - FIND_PTE - andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ - beq 2f /* Bail if not present */ - -#ifdef CONFIG_PTE_64BIT - lwz r13, 0(r12) -#endif - ori r11, r11, _PAGE_ACCESSED - stw r11, PTE_FLAGS_OFFSET(r12) - - /* Jump to common TLB load point */ - b finish_tlb_load - -2: - /* The bailout. Restore registers to pre-exception conditions - * and call the heavyweights to help us out. - */ - mfspr r11, SPRN_SPRG7R - mtcr r11 - mfspr r13, SPRN_SPRG5R - mfspr r12, SPRN_SPRG4R - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - b InstructionStorage - -#ifdef CONFIG_SPE - /* SPE Unavailable */ - START_EXCEPTION(SPEUnavailable) - NORMAL_EXCEPTION_PROLOG - bne load_up_spe - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_EE_LITE(0x2010, KernelSPE) -#else - EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE) -#endif /* CONFIG_SPE */ - - /* SPE Floating Point Data */ -#ifdef CONFIG_SPE - EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); -#else - EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE) -#endif /* CONFIG_SPE */ - - /* SPE Floating Point Round */ - EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE) - - /* Performance Monitor */ - EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD) - - - /* Debug Interrupt */ - DEBUG_EXCEPTION - -/* - * Local functions - */ - - /* - * Data TLB exceptions will bail out to this point - * if they can't resolve the lightweight TLB fault. - */ -data_access: - NORMAL_EXCEPTION_PROLOG - mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ - stw r5,_ESR(r11) - mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ - andis. r10,r5,(ESR_ILK|ESR_DLK)@h - bne 1f - EXC_XFER_EE_LITE(0x0300, handle_page_fault) -1: - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_EE_LITE(0x0300, CacheLockingException) - -/* - - * Both the instruction and data TLB miss get to this - * point to load the TLB. - * r10 - EA of fault - * r11 - TLB (info from Linux PTE) - * r12, r13 - available to use - * CR5 - results of addr < TASK_SIZE - * MAS0, MAS1 - loaded with proper value when we get here - * MAS2, MAS3 - will need additional info from Linux PTE - * Upon exit, we reload everything and RFI. - */ -finish_tlb_load: - /* - * We set execute, because we don't have the granularity to - * properly set this at the page level (Linux problem). - * Many of these bits are software only. Bits we don't set - * here we (properly should) assume have the appropriate value. - */ - - mfspr r12, SPRN_MAS2 -#ifdef CONFIG_PTE_64BIT - rlwimi r12, r11, 26, 24, 31 /* extract ...WIMGE from pte */ -#else - rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ -#endif - mtspr SPRN_MAS2, r12 - - bge 5, 1f - - /* is user addr */ - andi. r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC) - andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */ - srwi r10, r12, 1 - or r12, r12, r10 /* Copy user perms into supervisor */ - iseleq r12, 0, r12 - b 2f - - /* is kernel addr */ -1: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */ - ori r12, r12, (MAS3_SX | MAS3_SR) - -#ifdef CONFIG_PTE_64BIT -2: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */ - rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */ - mtspr SPRN_MAS3, r12 -BEGIN_FTR_SECTION - srwi r10, r13, 8 /* grab RPN[8:31] */ - mtspr SPRN_MAS7, r10 -END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS) -#else -2: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ - mtspr SPRN_MAS3, r11 -#endif -#ifdef CONFIG_E200 - /* Round robin TLB1 entries assignment */ - mfspr r12, SPRN_MAS0 - - /* Extract TLB1CFG(NENTRY) */ - mfspr r11, SPRN_TLB1CFG - andi. r11, r11, 0xfff - - /* Extract MAS0(NV) */ - andi. r13, r12, 0xfff - addi r13, r13, 1 - cmpw 0, r13, r11 - addi r12, r12, 1 - - /* check if we need to wrap */ - blt 7f - - /* wrap back to first free tlbcam entry */ - lis r13, tlbcam_index@ha - lwz r13, tlbcam_index@l(r13) - rlwimi r12, r13, 0, 20, 31 -7: - mtspr SPRN_MAS0,r12 -#endif /* CONFIG_E200 */ - - tlbwe - - /* Done...restore registers and get out of here. */ - mfspr r11, SPRN_SPRG7R - mtcr r11 - mfspr r13, SPRN_SPRG5R - mfspr r12, SPRN_SPRG4R - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - rfi /* Force context change */ - -#ifdef CONFIG_SPE -/* Note that the SPE support is closely modeled after the AltiVec - * support. Changes to one are likely to be applicable to the - * other! */ -load_up_spe: -/* - * Disable SPE for the task which had SPE previously, - * and save its SPE registers in its thread_struct. - * Enables SPE for use in the kernel on return. - * On SMP we know the SPE units are free, since we give it up every - * switch. -- Kumar - */ - mfmsr r5 - oris r5,r5,MSR_SPE@h - mtmsr r5 /* enable use of SPE now */ - isync -/* - * For SMP, we don't do lazy SPE switching because it just gets too - * horrendously complex, especially when a task switches from one CPU - * to another. Instead we call giveup_spe in switch_to. - */ -#ifndef CONFIG_SMP - lis r3,last_task_used_spe@ha - lwz r4,last_task_used_spe@l(r3) - cmpi 0,r4,0 - beq 1f - addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ - SAVE_32EVRS(0,r10,r4) - evxor evr10, evr10, evr10 /* clear out evr10 */ - evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ - li r5,THREAD_ACC - evstddx evr10, r4, r5 /* save off accumulator */ - lwz r5,PT_REGS(r4) - lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) - lis r10,MSR_SPE@h - andc r4,r4,r10 /* disable SPE for previous task */ - stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) -1: -#endif /* CONFIG_SMP */ - /* enable use of SPE after return */ - oris r9,r9,MSR_SPE@h - mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ - li r4,1 - li r10,THREAD_ACC - stw r4,THREAD_USED_SPE(r5) - evlddx evr4,r10,r5 - evmra evr4,evr4 - REST_32EVRS(0,r10,r5) -#ifndef CONFIG_SMP - subi r4,r5,THREAD - stw r4,last_task_used_spe@l(r3) -#endif /* CONFIG_SMP */ - /* restore registers and return */ -2: REST_4GPRS(3, r11) - lwz r10,_CCR(r11) - REST_GPR(1, r11) - mtcr r10 - lwz r10,_LINK(r11) - mtlr r10 - REST_GPR(10, r11) - mtspr SPRN_SRR1,r9 - mtspr SPRN_SRR0,r12 - REST_GPR(9, r11) - REST_GPR(12, r11) - lwz r11,GPR11(r11) - SYNC - rfi - -/* - * SPE unavailable trap from kernel - print a message, but let - * the task use SPE in the kernel until it returns to user mode. - */ -KernelSPE: - lwz r3,_MSR(r1) - oris r3,r3,MSR_SPE@h - stw r3,_MSR(r1) /* enable use of SPE after return */ - lis r3,87f@h - ori r3,r3,87f@l - mr r4,r2 /* current */ - lwz r5,_NIP(r1) - bl printk - b ret_from_except -87: .string "SPE used in kernel (task=%p, pc=%x) \n" - .align 4,0 - -#endif /* CONFIG_SPE */ - -/* - * Global functions - */ - -/* - * extern void loadcam_entry(unsigned int index) - * - * Load TLBCAM[index] entry in to the L2 CAM MMU - */ -_GLOBAL(loadcam_entry) - lis r4,TLBCAM@ha - addi r4,r4,TLBCAM@l - mulli r5,r3,20 - add r3,r5,r4 - lwz r4,0(r3) - mtspr SPRN_MAS0,r4 - lwz r4,4(r3) - mtspr SPRN_MAS1,r4 - lwz r4,8(r3) - mtspr SPRN_MAS2,r4 - lwz r4,12(r3) - mtspr SPRN_MAS3,r4 - tlbwe - isync - blr - -/* - * extern void giveup_altivec(struct task_struct *prev) - * - * The e500 core does not have an AltiVec unit. - */ -_GLOBAL(giveup_altivec) - blr - -#ifdef CONFIG_SPE -/* - * extern void giveup_spe(struct task_struct *prev) - * - */ -_GLOBAL(giveup_spe) - mfmsr r5 - oris r5,r5,MSR_SPE@h - SYNC - mtmsr r5 /* enable use of SPE now */ - isync - cmpi 0,r3,0 - beqlr- /* if no previous owner, done */ - addi r3,r3,THREAD /* want THREAD of task */ - lwz r5,PT_REGS(r3) - cmpi 0,r5,0 - SAVE_32EVRS(0, r4, r3) - evxor evr6, evr6, evr6 /* clear out evr6 */ - evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ - li r4,THREAD_ACC - evstddx evr6, r4, r3 /* save off accumulator */ - mfspr r6,SPRN_SPEFSCR - stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */ - beq 1f - lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) - lis r3,MSR_SPE@h - andc r4,r4,r3 /* disable SPE for previous task */ - stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) -1: -#ifndef CONFIG_SMP - li r5,0 - lis r4,last_task_used_spe@ha - stw r5,last_task_used_spe@l(r4) -#endif /* CONFIG_SMP */ - blr -#endif /* CONFIG_SPE */ - -/* - * extern void giveup_fpu(struct task_struct *prev) - * - * Not all FSL Book-E cores have an FPU - */ -#ifndef CONFIG_PPC_FPU -_GLOBAL(giveup_fpu) - blr -#endif - -/* - * extern void abort(void) - * - * At present, this routine just applies a system reset. - */ -_GLOBAL(abort) - li r13,0 - mtspr SPRN_DBCR0,r13 /* disable all debug events */ - mfmsr r13 - ori r13,r13,MSR_DE@l /* Enable Debug Events */ - mtmsr r13 - mfspr r13,SPRN_DBCR0 - lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h - mtspr SPRN_DBCR0,r13 - -_GLOBAL(set_context) - -#ifdef CONFIG_BDI_SWITCH - /* Context switch the PTE pointer for the Abatron BDI2000. - * The PGDIR is the second parameter. - */ - lis r5, abatron_pteptrs@h - ori r5, r5, abatron_pteptrs@l - stw r4, 0x4(r5) -#endif - mtspr SPRN_PID,r3 - isync /* Force context change */ - blr - -/* - * We put a few things here that have to be page-aligned. This stuff - * goes at the beginning of the data segment, which is page-aligned. - */ - .data - .align 12 - .globl sdata -sdata: - .globl empty_zero_page -empty_zero_page: - .space 4096 - .globl swapper_pg_dir -swapper_pg_dir: - .space 4096 - -/* Reserved 4k for the critical exception stack & 4k for the machine - * check stack per CPU for kernel mode exceptions */ - .section .bss - .align 12 -exception_stack_bottom: - .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS - .globl exception_stack_top -exception_stack_top: - -/* - * This space gets a copy of optional info passed to us by the bootstrap - * which is used to pass parameters into the kernel like root=/dev/sda1, etc. - */ - .globl cmd_line -cmd_line: - .space 512 - -/* - * Room for two PTE pointers, usually the kernel and current user pointers - * to their respective root page table. - */ -abatron_pteptrs: - .space 8 diff --git a/arch/ppc/kernel/idle.c b/arch/ppc/kernel/idle.c deleted file mode 100644 index 11e5b44713f..00000000000 --- a/arch/ppc/kernel/idle.c +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Idle daemon for PowerPC. Idle daemon will handle any action - * that needs to be taken when the system becomes idle. - * - * Written by Cort Dougan (cort@cs.nmt.edu). Subsequently hacked - * on by Tom Rini, Armin Kuster, Paul Mackerras and others. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#include <linux/config.h> -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/smp.h> -#include <linux/smp_lock.h> -#include <linux/stddef.h> -#include <linux/unistd.h> -#include <linux/ptrace.h> -#include <linux/slab.h> -#include <linux/sysctl.h> -#include <linux/cpu.h> - -#include <asm/pgtable.h> -#include <asm/uaccess.h> -#include <asm/system.h> -#include <asm/io.h> -#include <asm/mmu.h> -#include <asm/cache.h> -#include <asm/cputable.h> -#include <asm/machdep.h> -#include <asm/smp.h> - -void default_idle(void) -{ - void (*powersave)(void); - int cpu = smp_processor_id(); - - powersave = ppc_md.power_save; - - if (!need_resched()) { - if (powersave != NULL) - powersave(); -#ifdef CONFIG_SMP - else { - set_thread_flag(TIF_POLLING_NRFLAG); - while (!need_resched() && !cpu_is_offline(cpu)) - barrier(); - clear_thread_flag(TIF_POLLING_NRFLAG); - } -#endif - } - if (need_resched()) - schedule(); - if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) - cpu_die(); -} - -/* - * The body of the idle task. - */ -void cpu_idle(void) -{ - for (;;) - if (ppc_md.idle != NULL) - ppc_md.idle(); - else - default_idle(); -} - -#if defined(CONFIG_SYSCTL) && defined(CONFIG_6xx) -/* - * Register the sysctl to set/clear powersave_nap. - */ -extern int powersave_nap; - -static ctl_table powersave_nap_ctl_table[]={ - { - .ctl_name = KERN_PPC_POWERSAVE_NAP, - .procname = "powersave-nap", - .data = &powersave_nap, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { 0, }, -}; -static ctl_table powersave_nap_sysctl_root[] = { - { 1, "kernel", NULL, 0, 0755, powersave_nap_ctl_table, }, - { 0,}, -}; - -static int __init -register_powersave_nap_sysctl(void) -{ - register_sysctl_table(powersave_nap_sysctl_root, 0); - - return 0; -} - -__initcall(register_powersave_nap_sysctl); -#endif diff --git a/arch/ppc/kernel/idle_6xx.S b/arch/ppc/kernel/idle_6xx.S deleted file mode 100644 index 1a2194cf682..00000000000 --- a/arch/ppc/kernel/idle_6xx.S +++ /dev/null @@ -1,233 +0,0 @@ -/* - * This file contains the power_save function for 6xx & 7xxx CPUs - * rewritten in assembler - * - * Warning ! This code assumes that if your machine has a 750fx - * it will have PLL 1 set to low speed mode (used during NAP/DOZE). - * if this is not the case some additional changes will have to - * be done to check a runtime var (a bit like powersave-nap) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/config.h> -#include <linux/threads.h> -#include <asm/processor.h> -#include <asm/page.h> -#include <asm/cputable.h> -#include <asm/thread_info.h> -#include <asm/ppc_asm.h> -#include <asm/asm-offsets.h> - -#undef DEBUG - - .text - -/* - * Init idle, called at early CPU setup time from head.S for each CPU - * Make sure no rest of NAP mode remains in HID0, save default - * values for some CPU specific registers. Called with r24 - * containing CPU number and r3 reloc offset - */ -_GLOBAL(init_idle_6xx) -BEGIN_FTR_SECTION - mfspr r4,SPRN_HID0 - rlwinm r4,r4,0,10,8 /* Clear NAP */ - mtspr SPRN_HID0, r4 - b 1f -END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) - blr -1: - slwi r5,r24,2 - add r5,r5,r3 -BEGIN_FTR_SECTION - mfspr r4,SPRN_MSSCR0 - addis r6,r5, nap_save_msscr0@ha - stw r4,nap_save_msscr0@l(r6) -END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) -BEGIN_FTR_SECTION - mfspr r4,SPRN_HID1 - addis r6,r5,nap_save_hid1@ha - stw r4,nap_save_hid1@l(r6) -END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) - blr - -/* - * Here is the power_save_6xx function. This could eventually be - * split into several functions & changing the function pointer - * depending on the various features. - */ -_GLOBAL(ppc6xx_idle) - /* Check if we can nap or doze, put HID0 mask in r3 - */ - lis r3, 0 -BEGIN_FTR_SECTION - lis r3,HID0_DOZE@h -END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) -BEGIN_FTR_SECTION - /* We must dynamically check for the NAP feature as it - * can be cleared by CPU init after the fixups are done - */ - lis r4,cur_cpu_spec@ha - lwz r4,cur_cpu_spec@l(r4) - lwz r4,CPU_SPEC_FEATURES(r4) - andi. r0,r4,CPU_FTR_CAN_NAP - beq 1f - /* Now check if user or arch enabled NAP mode */ - lis r4,powersave_nap@ha - lwz r4,powersave_nap@l(r4) - cmpwi 0,r4,0 - beq 1f - lis r3,HID0_NAP@h -1: -END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) - cmpwi 0,r3,0 - beqlr - - /* Clear MSR:EE */ - mfmsr r7 - rlwinm r0,r7,0,17,15 - mtmsr r0 - - /* Check current_thread_info()->flags */ - rlwinm r4,r1,0,0,18 - lwz r4,TI_FLAGS(r4) - andi. r0,r4,_TIF_NEED_RESCHED - beq 1f - mtmsr r7 /* out of line this ? */ - blr -1: - /* Some pre-nap cleanups needed on some CPUs */ - andis. r0,r3,HID0_NAP@h - beq 2f -BEGIN_FTR_SECTION - /* Disable L2 prefetch on some 745x and try to ensure - * L2 prefetch engines are idle. As explained by errata - * text, we can't be sure they are, we just hope very hard - * that well be enough (sic !). At least I noticed Apple - * doesn't even bother doing the dcbf's here... - */ - mfspr r4,SPRN_MSSCR0 - rlwinm r4,r4,0,0,29 - sync - mtspr SPRN_MSSCR0,r4 - sync - isync - lis r4,KERNELBASE@h - dcbf 0,r4 - dcbf 0,r4 - dcbf 0,r4 - dcbf 0,r4 -END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) -#ifdef DEBUG - lis r6,nap_enter_count@ha - lwz r4,nap_enter_count@l(r6) - addi r4,r4,1 - stw r4,nap_enter_count@l(r6) -#endif -2: -BEGIN_FTR_SECTION - /* Go to low speed mode on some 750FX */ - lis r4,powersave_lowspeed@ha - lwz r4,powersave_lowspeed@l(r4) - cmpwi 0,r4,0 - beq 1f - mfspr r4,SPRN_HID1 - oris r4,r4,0x0001 - mtspr SPRN_HID1,r4 -1: -END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) - - /* Go to NAP or DOZE now */ - mfspr r4,SPRN_HID0 - lis r5,(HID0_NAP|HID0_SLEEP)@h -BEGIN_FTR_SECTION - oris r5,r5,HID0_DOZE@h -END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) - andc r4,r4,r5 - or r4,r4,r3 -BEGIN_FTR_SECTION - oris r4,r4,HID0_DPM@h /* that should be done once for all */ -END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) - mtspr SPRN_HID0,r4 -BEGIN_FTR_SECTION - DSSALL - sync -END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - ori r7,r7,MSR_EE /* Could be ommited (already set) */ - oris r7,r7,MSR_POW@h - sync - isync - mtmsr r7 - isync - sync - blr - -/* - * Return from NAP/DOZE mode, restore some CPU specific registers, - * we are called with DR/IR still off and r2 containing physical - * address of current. - */ -_GLOBAL(power_save_6xx_restore) - mfspr r11,SPRN_HID0 - rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */ - cror 4*cr1+eq,4*cr0+eq,4*cr0+eq -BEGIN_FTR_SECTION - rlwinm r11,r11,0,9,7 /* Clear DOZE */ -END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) - mtspr SPRN_HID0, r11 - -#ifdef DEBUG - beq cr1,1f - lis r11,(nap_return_count-KERNELBASE)@ha - lwz r9,nap_return_count@l(r11) - addi r9,r9,1 - stw r9,nap_return_count@l(r11) -1: -#endif - - rlwinm r9,r1,0,0,18 - tophys(r9,r9) - lwz r11,TI_CPU(r9) - slwi r11,r11,2 - /* Todo make sure all these are in the same page - * and load r22 (@ha part + CPU offset) only once - */ -BEGIN_FTR_SECTION - beq cr1,1f - addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha - lwz r9,nap_save_msscr0@l(r9) - mtspr SPRN_MSSCR0, r9 - sync - isync -1: -END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) -BEGIN_FTR_SECTION - addis r9,r11,(nap_save_hid1-KERNELBASE)@ha - lwz r9,nap_save_hid1@l(r9) - mtspr SPRN_HID1, r9 -END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) - b transfer_to_handler_cont - - .data - -_GLOBAL(nap_save_msscr0) - .space 4*NR_CPUS - -_GLOBAL(nap_save_hid1) - .space 4*NR_CPUS - -_GLOBAL(powersave_nap) - .long 0 -_GLOBAL(powersave_lowspeed) - .long 0 - -#ifdef DEBUG -_GLOBAL(nap_enter_count) - .space 4 -_GLOBAL(nap_return_count) - .space 4 -#endif diff --git a/arch/ppc/kernel/idle_power4.S b/arch/ppc/kernel/idle_power4.S deleted file mode 100644 index cc0d535365c..00000000000 --- a/arch/ppc/kernel/idle_power4.S +++ /dev/null @@ -1,91 +0,0 @@ -/* - * This file contains the power_save function for 6xx & 7xxx CPUs - * rewritten in assembler - * - * Warning ! This code assumes that if your machine has a 750fx - * it will have PLL 1 set to low speed mode (used during NAP/DOZE). - * if this is not the case some additional changes will have to - * be done to check a runtime var (a bit like powersave-nap) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/config.h> -#include <linux/threads.h> -#include <asm/processor.h> -#include <asm/page.h> -#include <asm/cputable.h> -#include <asm/thread_info.h> -#include <asm/ppc_asm.h> -#include <asm/asm-offsets.h> - -#undef DEBUG - - .text - -/* - * Init idle, called at early CPU setup time from head.S for each CPU - * So nothing for now. Called with r24 containing CPU number and r3 - * reloc offset - */ - .globl init_idle_power4 -init_idle_power4: - blr - -/* - * Here is the power_save_6xx function. This could eventually be - * split into several functions & changing the function pointer - * depending on the various features. - */ - .globl power4_idle -power4_idle: -BEGIN_FTR_SECTION - blr -END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) - /* We must dynamically check for the NAP feature as it - * can be cleared by CPU init after the fixups are done - */ - lis r4,cur_cpu_spec@ha - lwz r4,cur_cpu_spec@l(r4) - lwz r4,CPU_SPEC_FEATURES(r4) - andi. r0,r4,CPU_FTR_CAN_NAP - beqlr - /* Now check if user or arch enabled NAP mode */ - lis r4,powersave_nap@ha - lwz r4,powersave_nap@l(r4) - cmpwi 0,r4,0 - beqlr - - /* Clear MSR:EE */ - mfmsr r7 - rlwinm r0,r7,0,17,15 - mtmsr r0 - - /* Check current_thread_info()->flags */ - rlwinm r4,r1,0,0,18 - lwz r4,TI_FLAGS(r4) - andi. r0,r4,_TIF_NEED_RESCHED - beq 1f - mtmsr r7 /* out of line this ? */ - blr -1: - /* Go to NAP now */ -BEGIN_FTR_SECTION - DSSALL - sync -END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - ori r7,r7,MSR_EE /* Could be ommited (already set) */ - oris r7,r7,MSR_POW@h - sync - isync - mtmsr r7 - isync - sync - blr - - .globl powersave_nap -powersave_nap: - .long 0 diff --git a/arch/ppc/kernel/irq.c b/arch/ppc/kernel/irq.c deleted file mode 100644 index fbb2b9f8922..00000000000 --- a/arch/ppc/kernel/irq.c +++ /dev/null @@ -1,165 +0,0 @@ -/* - * arch/ppc/kernel/irq.c - * - * Derived from arch/i386/kernel/irq.c - * Copyright (C) 1992 Linus Torvalds - * Adapted from arch/i386 by Gary Thomas - * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) - * Updated and modified by Cort Dougan <cort@fsmlabs.com> - * Copyright (C) 1996-2001 Cort Dougan - * Adapted for Power Macintosh by Paul Mackerras - * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) - * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). - * - * This file contains the code used by various IRQ handling routines: - * asking for different IRQ's should be done through these routines - * instead of just grabbing them. Thus setups with different IRQ numbers - * shouldn't result in any weird surprises, and installing new handlers - * should be easier. - * - * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the - * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit - * mask register (of which only 16 are defined), hence the weird shifting - * and complement of the cached_irq_mask. I want to be able to stuff - * this right into the SIU SMASK register. - * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx - * to reduce code space and undefined function references. - */ - -#include <linux/errno.h> -#include <linux/module.h> -#include <linux/threads.h> -#include <linux/kernel_stat.h> -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/ptrace.h> -#include <linux/ioport.h> -#include <linux/interrupt.h> -#include <linux/timex.h> -#include <linux/config.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/pci.h> -#include <linux/delay.h> -#include <linux/irq.h> -#include <linux/proc_fs.h> -#include <linux/random.h> -#include <linux/seq_file.h> -#include <linux/cpumask.h> -#include <linux/profile.h> -#include <linux/bitops.h> - -#include <asm/uaccess.h> -#include <asm/system.h> -#include <asm/io.h> -#include <asm/pgtable.h> -#include <asm/irq.h> -#include <asm/cache.h> -#include <asm/prom.h> -#include <asm/ptrace.h> -#include <asm/machdep.h> - -#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) - -extern atomic_t ipi_recv; -extern atomic_t ipi_sent; - -#define MAXCOUNT 10000000 - -int ppc_spurious_interrupts = 0; -struct irqaction *ppc_irq_action[NR_IRQS]; -unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; -unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; -atomic_t ppc_n_lost_interrupts; - -#ifdef CONFIG_TAU_INT -extern int tau_initialized; -extern int tau_interrupts(int); -#endif - -int show_interrupts(struct seq_file *p, void *v) -{ - int i = *(loff_t *) v, j; - struct irqaction * action; - unsigned long flags; - - if (i == 0) { - seq_puts(p, " "); - for (j=0; j<NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "CPU%d ", j); - seq_putc(p, '\n'); - } - - if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); - action = irq_desc[i].action; - if ( !action || !action->handler ) - goto skip; - seq_printf(p, "%3d: ", i); -#ifdef CONFIG_SMP - for (j = 0; j < NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "%10u ", - kstat_cpu(j).irqs[i]); -#else - seq_printf(p, "%10u ", kstat_irqs(i)); -#endif /* CONFIG_SMP */ - if (irq_desc[i].handler) - seq_printf(p, " %s ", irq_desc[i].handler->typename); - else - seq_puts(p, " None "); - seq_printf(p, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge "); - seq_printf(p, " %s", action->name); - for (action = action->next; action; action = action->next) - seq_printf(p, ", %s", action->name); - seq_putc(p, '\n'); -skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); - } else if (i == NR_IRQS) { -#ifdef CONFIG_TAU_INT - if (tau_initialized){ - seq_puts(p, "TAU: "); - for (j = 0; j < NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "%10u ", tau_interrupts(j)); - seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); - } -#endif -#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE) - /* should this be per processor send/receive? */ - seq_printf(p, "IPI (recv/sent): %10u/%u\n", - atomic_read(&ipi_recv), atomic_read(&ipi_sent)); -#endif - seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); - } - return 0; -} - -void do_IRQ(struct pt_regs *regs) -{ - int irq, first = 1; - irq_enter(); - - /* - * Every platform is required to implement ppc_md.get_irq. - * This function will either return an irq number or -1 to - * indicate there are no more pending. But the first time - * through the loop this means there wasn't and IRQ pending. - * The value -2 is for buggy hardware and means that this IRQ - * has already been handled. -- Tom - */ - while ((irq = ppc_md.get_irq(regs)) >= 0) { - __do_IRQ(irq, regs); - first = 0; - } - if (irq != -2 && first) - /* That's not SMP safe ... but who cares ? */ - ppc_spurious_interrupts++; - irq_exit(); -} - -void __init init_IRQ(void) -{ - ppc_md.init_IRQ(); -} diff --git a/arch/ppc/kernel/l2cr.S b/arch/ppc/kernel/l2cr.S deleted file mode 100644 index d7f4e982b53..00000000000 --- a/arch/ppc/kernel/l2cr.S +++ /dev/null @@ -1,471 +0,0 @@ -/* - L2CR functions - Copyright © 1997-1998 by PowerLogix R & D, Inc. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -*/ -/* - Thur, Dec. 12, 1998. - - First public release, contributed by PowerLogix. - *********** - Sat, Aug. 7, 1999. - - Terry: Made sure code disabled interrupts before running. (Previously - it was assumed interrupts were already disabled). - - Terry: Updated for tentative G4 support. 4MB of memory is now flushed - instead of 2MB. (Prob. only 3 is necessary). - - Terry: Updated for workaround to HID0[DPM] processor bug - during global invalidates. - *********** - Thu, July 13, 2000. - - Terry: Added isync to correct for an errata. - - 22 August 2001. - - DanM: Finally added the 7450 patch I've had for the past - several months. The L2CR is similar, but I'm going - to assume the user of this functions knows what they - are doing. - - Author: Terry Greeniaus (tgree@phys.ualberta.ca) - Please e-mail updates to this file to me, thanks! -*/ -#include <linux/config.h> -#include <asm/processor.h> -#include <asm/cputable.h> -#include <asm/ppc_asm.h> -#include <asm/cache.h> -#include <asm/page.h> - -/* Usage: - - When setting the L2CR register, you must do a few special - things. If you are enabling the cache, you must perform a - global invalidate. If you are disabling the cache, you must - flush the cache contents first. This routine takes care of - doing these things. When first enabling the cache, make sure - you pass in the L2CR you want, as well as passing in the - global invalidate bit set. A global invalidate will only be - performed if the L2I bit is set in applyThis. When enabling - the cache, you should also set the L2E bit in applyThis. If - you want to modify the L2CR contents after the cache has been - enabled, the recommended procedure is to first call - __setL2CR(0) to disable the cache and then call it again with - the new values for L2CR. Examples: - - _setL2CR(0) - disables the cache - _setL2CR(0xB3A04000) - enables my G3 upgrade card: - - L2E set to turn on the cache - - L2SIZ set to 1MB - - L2CLK set to 1:1 - - L2RAM set to pipelined synchronous late-write - - L2I set to perform a global invalidation - - L2OH set to 0.5 nS - - L2DF set because this upgrade card - requires it - - A similar call should work for your card. You need to know - the correct setting for your card and then place them in the - fields I have outlined above. Other fields support optional - features, such as L2DO which caches only data, or L2TS which - causes cache pushes from the L1 cache to go to the L2 cache - instead of to main memory. - -IMPORTANT: - Starting with the 7450, the bits in this register have moved - or behave differently. The Enable, Parity Enable, Size, - and L2 Invalidate are the only bits that have not moved. - The size is read-only for these processors with internal L2 - cache, and the invalidate is a control as well as status. - -- Dan - -*/ -/* - * Summary: this procedure ignores the L2I bit in the value passed in, - * flushes the cache if it was already enabled, always invalidates the - * cache, then enables the cache if the L2E bit is set in the value - * passed in. - * -- paulus. - */ -_GLOBAL(_set_L2CR) - /* Make sure this is a 750 or 7400 chip */ -BEGIN_FTR_SECTION - li r3,-1 - blr -END_FTR_SECTION_IFCLR(CPU_FTR_L2CR) - - mflr r9 - - /* Stop DST streams */ -BEGIN_FTR_SECTION - DSSALL - sync -END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - - /* Turn off interrupts and data relocation. */ - mfmsr r7 /* Save MSR in r7 */ - rlwinm r4,r7,0,17,15 - rlwinm r4,r4,0,28,26 /* Turn off DR bit */ - sync - mtmsr r4 - isync - - /* Before we perform the global invalidation, we must disable dynamic - * power management via HID0[DPM] to work around a processor bug where - * DPM can possibly interfere with the state machine in the processor - * that invalidates the L2 cache tags. - */ - mfspr r8,SPRN_HID0 /* Save HID0 in r8 */ - rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */ - sync - mtspr SPRN_HID0,r4 /* Disable DPM */ - sync - - /* Get the current enable bit of the L2CR into r4 */ - mfspr r4,SPRN_L2CR - - /* Tweak some bits */ - rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */ - rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */ - rlwinm r3,r3,0,1,31 /* Turn off the enable bit */ - - /* Check to see if we need to flush */ - rlwinm. r4,r4,0,0,0 - beq 2f - - /* Flush the cache. First, read the first 4MB of memory (physical) to - * put new data in the cache. (Actually we only need - * the size of the L2 cache plus the size of the L1 cache, but 4MB will - * cover everything just to be safe). - */ - - /**** Might be a good idea to set L2DO here - to prevent instructions - from getting into the cache. But since we invalidate - the next time we enable the cache it doesn't really matter. - Don't do this unless you accomodate all processor variations. - The bit moved on the 7450..... - ****/ - -BEGIN_FTR_SECTION - /* Disable L2 prefetch on some 745x and try to ensure - * L2 prefetch engines are idle. As explained by errata - * text, we can't be sure they are, we just hope very hard - * that well be enough (sic !). At least I noticed Apple - * doesn't even bother doing the dcbf's here... - */ - mfspr r4,SPRN_MSSCR0 - rlwinm r4,r4,0,0,29 - sync - mtspr SPRN_MSSCR0,r4 - sync - isync - lis r4,KERNELBASE@h - dcbf 0,r4 - dcbf 0,r4 - dcbf 0,r4 - dcbf 0,r4 -END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) - - /* TODO: use HW flush assist when available */ - - lis r4,0x0002 - mtctr r4 - li r4,0 -1: - lwzx r0,r0,r4 - addi r4,r4,32 /* Go to start of next cache line */ - bdnz 1b - isync - - /* Now, flush the first 4MB of memory */ - lis r4,0x0002 - mtctr r4 - li r4,0 - sync -1: - dcbf 0,r4 - addi r4,r4,32 /* Go to start of next cache line */ - bdnz 1b - -2: - /* Set up the L2CR configuration bits (and switch L2 off) */ - /* CPU errata: Make sure the mtspr below is already in the - * L1 icache - */ - b 20f - .balign L1_CACHE_BYTES -22: - sync - mtspr SPRN_L2CR,r3 - sync - b 23f -20: - b 21f -21: sync - isync - b 22b - -23: - /* Perform a global invalidation */ - oris r3,r3,0x0020 - sync - mtspr SPRN_L2CR,r3 - sync - isync /* For errata */ - -BEGIN_FTR_SECTION - /* On the 7450, we wait for the L2I bit to clear...... - */ -10: mfspr r3,SPRN_L2CR - andis. r4,r3,0x0020 - bne 10b - b 11f -END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) - - /* Wait for the invalidation to complete */ -3: mfspr r3,SPRN_L2CR - rlwinm. r4,r3,0,31,31 - bne 3b - -11: rlwinm r3,r3,0,11,9 /* Turn off the L2I bit */ - sync - mtspr SPRN_L2CR,r3 - sync - - /* See if we need to enable the cache */ - cmplwi r5,0 - beq 4f - - /* Enable the cache */ - oris r3,r3,0x8000 - mtspr SPRN_L2CR,r3 - sync - - /* Enable L2 HW prefetch on 744x/745x */ -BEGIN_FTR_SECTION - mfspr r3,SPRN_MSSCR0 - ori r3,r3,3 - sync - mtspr SPRN_MSSCR0,r3 - sync - isync -END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) -4: - - /* Restore HID0[DPM] to whatever it was before */ - sync - mtspr 1008,r8 - sync - - /* Restore MSR (restores EE and DR bits to original state) */ - SYNC - mtmsr r7 - isync - - mtlr r9 - blr - -_GLOBAL(_get_L2CR) - /* Return the L2CR contents */ - li r3,0 -BEGIN_FTR_SECTION - mfspr r3,SPRN_L2CR -END_FTR_SECTION_IFSET(CPU_FTR_L2CR) - blr - - -/* - * Here is a similar routine for dealing with the L3 cache - * on the 745x family of chips - */ - -_GLOBAL(_set_L3CR) - /* Make sure this is a 745x chip */ -BEGIN_FTR_SECTION - li r3,-1 - blr -END_FTR_SECTION_IFCLR(CPU_FTR_L3CR) - - /* Turn off interrupts and data relocation. */ - mfmsr r7 /* Save MSR in r7 */ - rlwinm r4,r7,0,17,15 - rlwinm r4,r4,0,28,26 /* Turn off DR bit */ - sync - mtmsr r4 - isync - - /* Stop DST streams */ - DSSALL - sync - - /* Get the current enable bit of the L3CR into r4 */ - mfspr r4,SPRN_L3CR - - /* Tweak some bits */ - rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */ - rlwinm r3,r3,0,22,20 /* Turn off the invalidate bit */ - rlwinm r3,r3,0,2,31 /* Turn off the enable & PE bits */ - rlwinm r3,r3,0,5,3 /* Turn off the clken bit */ - /* Check to see if we need to flush */ - rlwinm. r4,r4,0,0,0 - beq 2f - - /* Flush the cache. - */ - - /* TODO: use HW flush assist */ - - lis r4,0x0008 - mtctr r4 - li r4,0 -1: - lwzx r0,r0,r4 - dcbf 0,r4 - addi r4,r4,32 /* Go to start of next cache line */ - bdnz 1b - -2: - /* Set up the L3CR configuration bits (and switch L3 off) */ - sync - mtspr SPRN_L3CR,r3 - sync - - oris r3,r3,L3CR_L3RES@h /* Set reserved bit 5 */ - mtspr SPRN_L3CR,r3 - sync - oris r3,r3,L3CR_L3CLKEN@h /* Set clken */ - mtspr SPRN_L3CR,r3 - sync - - /* Wait for stabilize */ - li r0,256 - mtctr r0 -1: bdnz 1b - - /* Perform a global invalidation */ - ori r3,r3,0x0400 - sync - mtspr SPRN_L3CR,r3 - sync - isync - - /* We wait for the L3I bit to clear...... */ -10: mfspr r3,SPRN_L3CR - andi. r4,r3,0x0400 - bne 10b - - /* Clear CLKEN */ - rlwinm r3,r3,0,5,3 /* Turn off the clken bit */ - mtspr SPRN_L3CR,r3 - sync - - /* Wait for stabilize */ - li r0,256 - mtctr r0 -1: bdnz 1b - - /* See if we need to enable the cache */ - cmplwi r5,0 - beq 4f - - /* Enable the cache */ - oris r3,r3,(L3CR_L3E | L3CR_L3CLKEN)@h - mtspr SPRN_L3CR,r3 - sync - - /* Wait for stabilize */ - li r0,256 - mtctr r0 -1: bdnz 1b - - /* Restore MSR (restores EE and DR bits to original state) */ -4: SYNC - mtmsr r7 - isync - blr - -_GLOBAL(_get_L3CR) - /* Return the L3CR contents */ - li r3,0 -BEGIN_FTR_SECTION - mfspr r3,SPRN_L3CR -END_FTR_SECTION_IFSET(CPU_FTR_L3CR) - blr - -/* --- End of PowerLogix code --- - */ - - -/* flush_disable_L1() - Flush and disable L1 cache - * - * clobbers r0, r3, ctr, cr0 - * Must be called with interrupts disabled and MMU enabled. - */ -_GLOBAL(__flush_disable_L1) - /* Stop pending alitvec streams and memory accesses */ -BEGIN_FTR_SECTION - DSSALL -END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - sync - - /* Load counter to 0x4000 cache lines (512k) and - * load cache with datas - */ - li r3,0x4000 /* 512kB / 32B */ - mtctr r3 - lis r3,KERNELBASE@h -1: - lwz r0,0(r3) - addi r3,r3,0x0020 /* Go to start of next cache line */ - bdnz 1b - isync - sync - - /* Now flush those cache lines */ - li r3,0x4000 /* 512kB / 32B */ - mtctr r3 - lis r3,KERNELBASE@h -1: - dcbf 0,r3 - addi r3,r3,0x0020 /* Go to start of next cache line */ - bdnz 1b - sync - - /* We can now disable the L1 cache (HID0:DCE, HID0:ICE) */ - mfspr r3,SPRN_HID0 - rlwinm r3,r3,0,18,15 - mtspr SPRN_HID0,r3 - sync - isync - blr - -/* inval_enable_L1 - Invalidate and enable L1 cache - * - * Assumes L1 is already disabled and MSR:EE is off - * - * clobbers r3 - */ -_GLOBAL(__inval_enable_L1) - /* Enable and then Flash inval the instruction & data cache */ - mfspr r3,SPRN_HID0 - ori r3,r3, HID0_ICE|HID0_ICFI|HID0_DCE|HID0_DCI - sync - isync - mtspr SPRN_HID0,r3 - xori r3,r3, HID0_ICFI|HID0_DCI - mtspr SPRN_HID0,r3 - sync - - blr - - diff --git a/arch/ppc/kernel/machine_kexec.c b/arch/ppc/kernel/machine_kexec.c deleted file mode 100644 index a882b0dbe8d..00000000000 --- a/arch/ppc/kernel/machine_kexec.c +++ /dev/null @@ -1,124 +0,0 @@ -/* - * machine_kexec.c - handle transition of Linux booting another kernel - * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> - * - * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz - * - * This source code is licensed under the GNU General Public License, - * Version 2. See the file COPYING for more details. - */ - -#include <linux/mm.h> -#include <linux/kexec.h> -#include <linux/delay.h> -#include <linux/reboot.h> -#include <asm/pgtable.h> -#include <asm/pgalloc.h> -#include <asm/mmu_context.h> -#include <asm/io.h> -#include <asm/hw_irq.h> -#include <asm/cacheflush.h> -#include <asm/machdep.h> - -typedef NORET_TYPE void (*relocate_new_kernel_t)( - unsigned long indirection_page, - unsigned long reboot_code_buffer, - unsigned long start_address) ATTRIB_NORET; - -const extern unsigned char relocate_new_kernel[]; -const extern unsigned int relocate_new_kernel_size; - -/* - * Provide a dummy crash_notes definition while crash dump arrives to ppc. - * This prevents breakage of crash_notes attribute in kernel/ksysfs.c. - */ -note_buf_t crash_notes[NR_CPUS]; - -void machine_shutdown(void) -{ - if (ppc_md.machine_shutdown) - ppc_md.machine_shutdown(); -} - -void machine_crash_shutdown(struct pt_regs *regs) -{ - if (ppc_md.machine_crash_shutdown) - ppc_md.machine_crash_shutdown(); -} - -/* - * Do what every setup is needed on image and the - * reboot code buffer to allow us to avoid allocations - * later. - */ -int machine_kexec_prepare(struct kimage *image) -{ - if (ppc_md.machine_kexec_prepare) - return ppc_md.machine_kexec_prepare(image); - /* - * Fail if platform doesn't provide its own machine_kexec_prepare - * implementation. - */ - return -ENOSYS; -} - -void machine_kexec_cleanup(struct kimage *image) -{ - if (ppc_md.machine_kexec_cleanup) - ppc_md.machine_kexec_cleanup(image); -} - -/* - * Do not allocate memory (or fail in any way) in machine_kexec(). - * We are past the point of no return, committed to rebooting now. - */ -NORET_TYPE void machine_kexec(struct kimage *image) -{ - if (ppc_md.machine_kexec) - ppc_md.machine_kexec(image); - else { - /* - * Fall back to normal restart if platform doesn't provide - * its own kexec function, and user insist to kexec... - */ - machine_restart(NULL); - } - for(;;); -} - -/* - * This is a generic machine_kexec function suitable at least for - * non-OpenFirmware embedded platforms. - * It merely copies the image relocation code to the control page and - * jumps to it. - * A platform specific function may just call this one. - */ -void machine_kexec_simple(struct kimage *image) -{ - unsigned long page_list; - unsigned long reboot_code_buffer, reboot_code_buffer_phys; - relocate_new_kernel_t rnk; - - /* Interrupts aren't acceptable while we reboot */ - local_irq_disable(); - - page_list = image->head; - - /* we need both effective and real address here */ - reboot_code_buffer = - (unsigned long)page_address(image->control_code_page); - reboot_code_buffer_phys = virt_to_phys((void *)reboot_code_buffer); - - /* copy our kernel relocation code to the control code page */ - memcpy((void *)reboot_code_buffer, relocate_new_kernel, - relocate_new_kernel_size); - - flush_icache_range(reboot_code_buffer, - reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE); - printk(KERN_INFO "Bye!\n"); - - /* now call it */ - rnk = (relocate_new_kernel_t) reboot_code_buffer; - (*rnk)(page_list, reboot_code_buffer_phys, image->start); -} - diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S deleted file mode 100644 index 3056ede2424..00000000000 --- a/arch/ppc/kernel/misc.S +++ /dev/null @@ -1,1300 +0,0 @@ -/* - * This file contains miscellaneous low-level functions. - * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) - * - * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) - * and Paul Mackerras. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include <linux/config.h> -#include <linux/sys.h> -#include <asm/unistd.h> -#include <asm/errno.h> -#include <asm/processor.h> -#include <asm/page.h> -#include <asm/cache.h> -#include <asm/cputable.h> -#include <asm/mmu.h> -#include <asm/ppc_asm.h> -#include <asm/thread_info.h> -#include <asm/asm-offsets.h> - - .text - - .align 5 -_GLOBAL(__delay) - cmpwi 0,r3,0 - mtctr r3 - beqlr -1: bdnz 1b - blr - -/* - * Returns (address we're running at) - (address we were linked at) - * for use before the text and data are mapped to KERNELBASE. - */ -_GLOBAL(reloc_offset) - mflr r0 - bl 1f -1: mflr r3 - lis r4,1b@ha - addi r4,r4,1b@l - subf r3,r4,r3 - mtlr r0 - blr - -/* - * add_reloc_offset(x) returns x + reloc_offset(). - */ -_GLOBAL(add_reloc_offset) - mflr r0 - bl 1f -1: mflr r5 - lis r4,1b@ha - addi r4,r4,1b@l - subf r5,r4,r5 - add r3,r3,r5 - mtlr r0 - blr - -/* - * sub_reloc_offset(x) returns x - reloc_offset(). - */ -_GLOBAL(sub_reloc_offset) - mflr r0 - bl 1f -1: mflr r5 - lis r4,1b@ha - addi r4,r4,1b@l - subf r5,r4,r5 - subf r3,r5,r3 - mtlr r0 - blr - -/* - * reloc_got2 runs through the .got2 section adding an offset - * to each entry. - */ -_GLOBAL(reloc_got2) - mflr r11 - lis r7,__got2_start@ha - addi r7,r7,__got2_start@l - lis r8,__got2_end@ha - addi r8,r8,__got2_end@l - subf r8,r7,r8 - srwi. r8,r8,2 - beqlr - mtctr r8 - bl 1f -1: mflr r0 - lis r4,1b@ha - addi r4,r4,1b@l - subf r0,r4,r0 - add r7,r0,r7 -2: lwz r0,0(r7) - add r0,r0,r3 - stw r0,0(r7) - addi r7,r7,4 - bdnz 2b - mtlr r11 - blr - -/* - * identify_cpu, - * called with r3 = data offset and r4 = CPU number - * doesn't change r3 - */ -_GLOBAL(identify_cpu) - addis r8,r3,cpu_specs@ha - addi r8,r8,cpu_specs@l - mfpvr r7 -1: - lwz r5,CPU_SPEC_PVR_MASK(r8) - and r5,r5,r7 - lwz r6,CPU_SPEC_PVR_VALUE(r8) - cmplw 0,r6,r5 - beq 1f - addi r8,r8,CPU_SPEC_ENTRY_SIZE - b 1b -1: - addis r6,r3,cur_cpu_spec@ha - addi r6,r6,cur_cpu_spec@l - sub r8,r8,r3 - stw r8,0(r6) - blr - -/* - * do_cpu_ftr_fixups - goes through the list of CPU feature fixups - * and writes nop's over sections of code that don't apply for this cpu. - * r3 = data offset (not changed) - */ -_GLOBAL(do_cpu_ftr_fixups) - /* Get CPU 0 features */ - addis r6,r3,cur_cpu_spec@ha - addi r6,r6,cur_cpu_spec@l - lwz r4,0(r6) - add r4,r4,r3 - lwz r4,CPU_SPEC_FEATURES(r4) - - /* Get the fixup table */ - addis r6,r3,__start___ftr_fixup@ha - addi r6,r6,__start___ftr_fixup@l - addis r7,r3,__stop___ftr_fixup@ha - addi r7,r7,__stop___ftr_fixup@l - - /* Do the fixup */ -1: cmplw 0,r6,r7 - bgelr - addi r6,r6,16 - lwz r8,-16(r6) /* mask */ - and r8,r8,r4 - lwz r9,-12(r6) /* value */ - cmplw 0,r8,r9 - beq 1b - lwz r8,-8(r6) /* section begin */ - lwz r9,-4(r6) /* section end */ - subf. r9,r8,r9 - beq 1b - /* write nops over the section of code */ - /* todo: if large section, add a branch at the start of it */ - srwi r9,r9,2 - mtctr r9 - add r8,r8,r3 - lis r0,0x60000000@h /* nop */ -3: stw r0,0(r8) - andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l - beq 2f - dcbst 0,r8 /* suboptimal, but simpler */ - sync - icbi 0,r8 -2: addi r8,r8,4 - bdnz 3b - sync /* additional sync needed on g4 */ - isync - b 1b - -/* - * call_setup_cpu - call the setup_cpu function for this cpu - * r3 = data offset, r24 = cpu number - * - * Setup function is called with: - * r3 = data offset - * r4 = ptr to CPU spec (relocated) - */ -_GLOBAL(call_setup_cpu) - addis r4,r3,cur_cpu_spec@ha - addi r4,r4,cur_cpu_spec@l - lwz r4,0(r4) - add r4,r4,r3 - lwz r5,CPU_SPEC_SETUP(r4) - cmpi 0,r5,0 - add r5,r5,r3 - beqlr - mtctr r5 - bctr - -#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx) - -/* This gets called by via-pmu.c to switch the PLL selection - * on 750fx CPU. This function should really be moved to some - * other place (as most of the cpufreq code in via-pmu - */ -_GLOBAL(low_choose_750fx_pll) - /* Clear MSR:EE */ - mfmsr r7 - rlwinm r0,r7,0,17,15 - mtmsr r0 - - /* If switching to PLL1, disable HID0:BTIC */ - cmplwi cr0,r3,0 - beq 1f - mfspr r5,SPRN_HID0 - rlwinm r5,r5,0,27,25 - sync - mtspr SPRN_HID0,r5 - isync - sync - -1: - /* Calc new HID1 value */ - mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */ - rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */ - rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */ - or r4,r4,r5 - mtspr SPRN_HID1,r4 - - /* Store new HID1 image */ - rlwinm r6,r1,0,0,18 - lwz r6,TI_CPU(r6) - slwi r6,r6,2 - addis r6,r6,nap_save_hid1@ha - stw r4,nap_save_hid1@l(r6) - - /* If switching to PLL0, enable HID0:BTIC */ - cmplwi cr0,r3,0 - bne 1f - mfspr r5,SPRN_HID0 - ori r5,r5,HID0_BTIC - sync - mtspr SPRN_HID0,r5 - isync - sync - -1: - /* Return */ - mtmsr r7 - blr - -_GLOBAL(low_choose_7447a_dfs) - /* Clear MSR:EE */ - mfmsr r7 - rlwinm r0,r7,0,17,15 - mtmsr r0 - - /* Calc new HID1 value */ - mfspr r4,SPRN_HID1 - insrwi r4,r3,1,9 /* insert parameter into bit 9 */ - sync - mtspr SPRN_HID1,r4 - sync - isync - - /* Return */ - mtmsr r7 - blr - -#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */ - -/* - * complement mask on the msr then "or" some values on. - * _nmask_and_or_msr(nmask, value_to_or) - */ -_GLOBAL(_nmask_and_or_msr) - mfmsr r0 /* Get current msr */ - andc r0,r0,r3 /* And off the bits set in r3 (first parm) */ - or r0,r0,r4 /* Or on the bits in r4 (second parm) */ - SYNC /* Some chip revs have problems here... */ - mtmsr r0 /* Update machine state */ - isync - blr /* Done */ - - -/* - * Flush MMU TLB - */ -_GLOBAL(_tlbia) -#if defined(CONFIG_40x) - sync /* Flush to memory before changing mapping */ - tlbia - isync /* Flush shadow TLB */ -#elif defined(CONFIG_44x) - li r3,0 - sync - - /* Load high watermark */ - lis r4,tlb_44x_hwater@ha - lwz r5,tlb_44x_hwater@l(r4) - -1: tlbwe r3,r3,PPC44x_TLB_PAGEID - addi r3,r3,1 - cmpw 0,r3,r5 - ble 1b - - isync -#elif defined(CONFIG_FSL_BOOKE) - /* Invalidate all entries in TLB0 */ - li r3, 0x04 - tlbivax 0,3 - /* Invalidate all entries in TLB1 */ - li r3, 0x0c - tlbivax 0,3 - /* Invalidate all entries in TLB2 */ - li r3, 0x14 - tlbivax 0,3 - /* Invalidate all entries in TLB3 */ - li r3, 0x1c - tlbivax 0,3 - msync -#ifdef CONFIG_SMP - tlbsync -#endif /* CONFIG_SMP */ -#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */ -#if defined(CONFIG_SMP) - rlwinm r8,r1,0,0,18 - lwz r8,TI_CPU(r8) - oris r8,r8,10 - mfmsr r10 - SYNC - rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ - rlwinm r0,r0,0,28,26 /* clear DR */ - mtmsr r0 - SYNC_601 - isync - lis r9,mmu_hash_lock@h - ori r9,r9,mmu_hash_lock@l - tophys(r9,r9) -10: lwarx r7,0,r9 - cmpwi 0,r7,0 - bne- 10b - stwcx. r8,0,r9 - bne- 10b - sync - tlbia - sync - TLBSYNC - li r0,0 - stw r0,0(r9) /* clear mmu_hash_lock */ - mtmsr r10 - SYNC_601 - isync -#else /* CONFIG_SMP */ - sync - tlbia - sync -#endif /* CONFIG_SMP */ -#endif /* ! defined(CONFIG_40x) */ - blr - -/* - * Flush MMU TLB for a particular address - */ -_GLOBAL(_tlbie) -#if defined(CONFIG_40x) - tlbsx. r3, 0, r3 - bne 10f - sync - /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. - * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate - * the TLB entry. */ - tlbwe r3, r3, TLB_TAG - isync -10: -#elif defined(CONFIG_44x) - mfspr r4,SPRN_MMUCR - mfspr r5,SPRN_PID /* Get PID */ - rlwimi r4,r5,0,24,31 /* Set TID */ - mtspr SPRN_MMUCR,r4 - - tlbsx. r3, 0, r3 - bne 10f - sync - /* There are only 64 TLB entries, so r3 < 64, - * which means bit 22, is clear. Since 22 is - * the V bit in the TLB_PAGEID, loading this - * value will invalidate the TLB entry. - */ - tlbwe r3, r3, PPC44x_TLB_PAGEID - isync -10: -#elif defined(CONFIG_FSL_BOOKE) - rlwinm r4, r3, 0, 0, 19 - ori r5, r4, 0x08 /* TLBSEL = 1 */ - ori r6, r4, 0x10 /* TLBSEL = 2 */ - ori r7, r4, 0x18 /* TLBSEL = 3 */ - tlbivax 0, r4 - tlbivax 0, r5 - tlbivax 0, r6 - tlbivax 0, r7 - msync -#if defined(CONFIG_SMP) - tlbsync -#endif /* CONFIG_SMP */ -#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */ -#if defined(CONFIG_SMP) - rlwinm r8,r1,0,0,18 - lwz r8,TI_CPU(r8) - oris r8,r8,11 - mfmsr r10 - SYNC - rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ - rlwinm r0,r0,0,28,26 /* clear DR */ - mtmsr r0 - SYNC_601 - isync - lis r9,mmu_hash_lock@h - ori r9,r9,mmu_hash_lock@l - tophys(r9,r9) -10: lwarx r7,0,r9 - cmpwi 0,r7,0 - bne- 10b - stwcx. r8,0,r9 - bne- 10b - eieio - tlbie r3 - sync - TLBSYNC - li r0,0 - stw r0,0(r9) /* clear mmu_hash_lock */ - mtmsr r10 - SYNC_601 - isync -#else /* CONFIG_SMP */ - tlbie r3 - sync -#endif /* CONFIG_SMP */ -#endif /* ! CONFIG_40x */ - blr - -/* - * Flush instruction cache. - * This is a no-op on the 601. - */ -_GLOBAL(flush_instruction_cache) -#if defined(CONFIG_8xx) - isync - lis r5, IDC_INVALL@h - mtspr SPRN_IC_CST, r5 -#elif defined(CONFIG_4xx) -#ifdef CONFIG_403GCX - li r3, 512 - mtctr r3 - lis r4, KERNELBASE@h -1: iccci 0, r4 - addi r4, r4, 16 - bdnz 1b -#else - lis r3, KERNELBASE@h - iccci 0,r3 -#endif -#elif CONFIG_FSL_BOOKE -BEGIN_FTR_SECTION - mfspr r3,SPRN_L1CSR0 - ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC - /* msync; isync recommended here */ - mtspr SPRN_L1CSR0,r3 - isync - blr -END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) - mfspr r3,SPRN_L1CSR1 - ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR - mtspr SPRN_L1CSR1,r3 -#else - mfspr r3,SPRN_PVR - rlwinm r3,r3,16,16,31 - cmpwi 0,r3,1 - beqlr /* for 601, do nothing */ - /* 603/604 processor - use invalidate-all bit in HID0 */ - mfspr r3,SPRN_HID0 - ori r3,r3,HID0_ICFI - mtspr SPRN_HID0,r3 -#endif /* CONFIG_8xx/4xx */ - isync - blr - -/* - * Write any modified data cache blocks out to memory - * and invalidate the corresponding instruction cache blocks. - * This is a no-op on the 601. - * - * flush_icache_range(unsigned long start, unsigned long stop) - */ -_GLOBAL(flush_icache_range) -BEGIN_FTR_SECTION - blr /* for 601, do nothing */ -END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) - li r5,L1_CACHE_BYTES-1 - andc r3,r3,r5 - subf r4,r3,r4 - add r4,r4,r5 - srwi. r4,r4,L1_CACHE_SHIFT - beqlr - mtctr r4 - mr r6,r3 -1: dcbst 0,r3 - addi r3,r3,L1_CACHE_BYTES - bdnz 1b - sync /* wait for dcbst's to get to ram */ - mtctr r4 -2: icbi 0,r6 - addi r6,r6,L1_CACHE_BYTES - bdnz 2b - sync /* additional sync needed on g4 */ - isync - blr -/* - * Write any modified data cache blocks out to memory. - * Does not invalidate the corresponding cache lines (especially for - * any corresponding instruction cache). - * - * clean_dcache_range(unsigned long start, unsigned long stop) - */ -_GLOBAL(clean_dcache_range) - li r5,L1_CACHE_BYTES-1 - andc r3,r3,r5 - subf r4,r3,r4 - add r4,r4,r5 - srwi. r4,r4,L1_CACHE_SHIFT - beqlr - mtctr r4 - -1: dcbst 0,r3 - addi r3,r3,L1_CACHE_BYTES - bdnz 1b - sync /* wait for dcbst's to get to ram */ - blr - -/* - * Write any modified data cache blocks out to memory and invalidate them. - * Does not invalidate the corresponding instruction cache blocks. - * - * flush_dcache_range(unsigned long start, unsigned long stop) - */ -_GLOBAL(flush_dcache_range) - li r5,L1_CACHE_BYTES-1 - andc r3,r3,r5 - subf r4,r3,r4 - add r4,r4,r5 - srwi. r4,r4,L1_CACHE_SHIFT - beqlr - mtctr r4 - -1: dcbf 0,r3 - addi r3,r3,L1_CACHE_BYTES - bdnz 1b - sync /* wait for dcbst's to get to ram */ - blr - -/* - * Like above, but invalidate the D-cache. This is used by the 8xx - * to invalidate the cache so the PPC core doesn't get stale data - * from the CPM (no cache snooping here :-). - * - * invalidate_dcache_range(unsigned long start, unsigned long stop) - */ -_GLOBAL(invalidate_dcache_range) - li r5,L1_CACHE_BYTES-1 - andc r3,r3,r5 - subf r4,r3,r4 - add r4,r4,r5 - srwi. r4,r4,L1_CACHE_SHIFT - beqlr - mtctr r4 - -1: dcbi 0,r3 - addi r3,r3,L1_CACHE_BYTES - bdnz 1b - sync /* wait for dcbi's to get to ram */ - blr - -#ifdef CONFIG_NOT_COHERENT_CACHE -/* - * 40x cores have 8K or 16K dcache and 32 byte line size. - * 44x has a 32K dcache and 32 byte line size. - * 8xx has 1, 2, 4, 8K variants. - * For now, cover the worst case of the 44x. - * Must be called with external interrupts disabled. - */ -#define CACHE_NWAYS 64 -#define CACHE_NLINES 16 - -_GLOBAL(flush_dcache_all) - li r4, (2 * CACHE_NWAYS * CACHE_NLINES) - mtctr r4 - lis r5, KERNELBASE@h -1: lwz r3, 0(r5) /* Load one word from every line */ - addi r5, r5, L1_CACHE_BYTES - bdnz 1b - blr -#endif /* CONFIG_NOT_COHERENT_CACHE */ - -/* - * Flush a particular page from the data cache to RAM. - * Note: this is necessary because the instruction cache does *not* - * snoop from the data cache. - * This is a no-op on the 601 which has a unified cache. - * - * void __flush_dcache_icache(void *page) - */ -_GLOBAL(__flush_dcache_icache) -BEGIN_FTR_SECTION - blr /* for 601, do nothing */ -END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) - rlwinm r3,r3,0,0,19 /* Get page base address */ - li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ - mtctr r4 - mr r6,r3 -0: dcbst 0,r3 /* Write line to ram */ - addi r3,r3,L1_CACHE_BYTES - bdnz 0b - sync - mtctr r4 -1: icbi 0,r6 - addi r6,r6,L1_CACHE_BYTES - bdnz 1b - sync - isync - blr - -/* - * Flush a particular page from the data cache to RAM, identified - * by its physical address. We turn off the MMU so we can just use - * the physical address (this may be a highmem page without a kernel - * mapping). - * - * void __flush_dcache_icache_phys(unsigned long physaddr) - */ -_GLOBAL(__flush_dcache_icache_phys) -BEGIN_FTR_SECTION - blr /* for 601, do nothing */ -END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) - mfmsr r10 - rlwinm r0,r10,0,28,26 /* clear DR */ - mtmsr r0 - isync - rlwinm r3,r3,0,0,19 /* Get page base address */ - li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ - mtctr r4 - mr r6,r3 -0: dcbst 0,r3 /* Write line to ram */ - addi r3,r3,L1_CACHE_BYTES - bdnz 0b - sync - mtctr r4 -1: icbi 0,r6 - addi r6,r6,L1_CACHE_BYTES - bdnz 1b - sync - mtmsr r10 /* restore DR */ - isync - blr - -/* - * Clear pages using the dcbz instruction, which doesn't cause any - * memory traffic (except to write out any cache lines which get - * displaced). This only works on cacheable memory. - * - * void clear_pages(void *page, int order) ; - */ -_GLOBAL(clear_pages) - li r0,4096/L1_CACHE_BYTES - slw r0,r0,r4 - mtctr r0 -#ifdef CONFIG_8xx - li r4, 0 -1: stw r4, 0(r3) - stw r4, 4(r3) - stw r4, 8(r3) - stw r4, 12(r3) -#else -1: dcbz 0,r3 -#endif - addi r3,r3,L1_CACHE_BYTES - bdnz 1b - blr - -/* - * Copy a whole page. We use the dcbz instruction on the destination - * to reduce memory traffic (it eliminates the unnecessary reads of - * the destination into cache). This requires that the destination - * is cacheable. - */ -#define COPY_16_BYTES \ - lwz r6,4(r4); \ - lwz r7,8(r4); \ - lwz r8,12(r4); \ - lwzu r9,16(r4); \ - stw r6,4(r3); \ - stw r7,8(r3); \ - stw r8,12(r3); \ - stwu r9,16(r3) - -_GLOBAL(copy_page) - addi r3,r3,-4 - addi r4,r4,-4 - -#ifdef CONFIG_8xx - /* don't use prefetch on 8xx */ - li r0,4096/L1_CACHE_BYTES - mtctr r0 -1: COPY_16_BYTES - bdnz 1b - blr - -#else /* not 8xx, we can prefetch */ - li r5,4 - -#if MAX_COPY_PREFETCH > 1 - li r0,MAX_COPY_PREFETCH - li r11,4 - mtctr r0 -11: dcbt r11,r4 - addi r11,r11,L1_CACHE_BYTES - bdnz 11b -#else /* MAX_COPY_PREFETCH == 1 */ - dcbt r5,r4 - li r11,L1_CACHE_BYTES+4 -#endif /* MAX_COPY_PREFETCH */ - li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH - crclr 4*cr0+eq -2: - mtctr r0 -1: - dcbt r11,r4 - dcbz r5,r3 - COPY_16_BYTES -#if L1_CACHE_BYTES >= 32 - COPY_16_BYTES -#if L1_CACHE_BYTES >= 64 - COPY_16_BYTES - COPY_16_BYTES -#if L1_CACHE_BYTES >= 128 - COPY_16_BYTES - COPY_16_BYTES - COPY_16_BYTES - COPY_16_BYTES -#endif -#endif -#endif - bdnz 1b - beqlr - crnot 4*cr0+eq,4*cr0+eq - li r0,MAX_COPY_PREFETCH - li r11,4 - b 2b -#endif /* CONFIG_8xx */ - -/* - * void atomic_clear_mask(atomic_t mask, atomic_t *addr) - * void atomic_set_mask(atomic_t mask, atomic_t *addr); - */ -_GLOBAL(atomic_clear_mask) -10: lwarx r5,0,r4 - andc r5,r5,r3 - PPC405_ERR77(0,r4) - stwcx. r5,0,r4 - bne- 10b - blr -_GLOBAL(atomic_set_mask) -10: lwarx r5,0,r4 - or r5,r5,r3 - PPC405_ERR77(0,r4) - stwcx. r5,0,r4 - bne- 10b - blr - -/* - * I/O string operations - * - * insb(port, buf, len) - * outsb(port, buf, len) - * insw(port, buf, len) - * outsw(port, buf, len) - * insl(port, buf, len) - * outsl(port, buf, len) - * insw_ns(port, buf, len) - * outsw_ns(port, buf, len) - * insl_ns(port, buf, len) - * outsl_ns(port, buf, len) - * - * The *_ns versions don't do byte-swapping. - */ -_GLOBAL(_insb) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,1 - blelr- -00: lbz r5,0(r3) - eieio - stbu r5,1(r4) - bdnz 00b - blr - -_GLOBAL(_outsb) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,1 - blelr- -00: lbzu r5,1(r4) - stb r5,0(r3) - eieio - bdnz 00b - blr - -_GLOBAL(_insw) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,2 - blelr- -00: lhbrx r5,0,r3 - eieio - sthu r5,2(r4) - bdnz 00b - blr - -_GLOBAL(_outsw) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,2 - blelr- -00: lhzu r5,2(r4) - eieio - sthbrx r5,0,r3 - bdnz 00b - blr - -_GLOBAL(_insl) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,4 - blelr- -00: lwbrx r5,0,r3 - eieio - stwu r5,4(r4) - bdnz 00b - blr - -_GLOBAL(_outsl) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,4 - blelr- -00: lwzu r5,4(r4) - stwbrx r5,0,r3 - eieio - bdnz 00b - blr - -_GLOBAL(__ide_mm_insw) -_GLOBAL(_insw_ns) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,2 - blelr- -00: lhz r5,0(r3) - eieio - sthu r5,2(r4) - bdnz 00b - blr - -_GLOBAL(__ide_mm_outsw) -_GLOBAL(_outsw_ns) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,2 - blelr- -00: lhzu r5,2(r4) - sth r5,0(r3) - eieio - bdnz 00b - blr - -_GLOBAL(__ide_mm_insl) -_GLOBAL(_insl_ns) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,4 - blelr- -00: lwz r5,0(r3) - eieio - stwu r5,4(r4) - bdnz 00b - blr - -_GLOBAL(__ide_mm_outsl) -_GLOBAL(_outsl_ns) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,4 - blelr- -00: lwzu r5,4(r4) - stw r5,0(r3) - eieio - bdnz 00b - blr - -/* - * Extended precision shifts. - * - * Updated to be valid for shift counts from 0 to 63 inclusive. - * -- Gabriel - * - * R3/R4 has 64 bit value - * R5 has shift count - * result in R3/R4 - * - * ashrdi3: arithmetic right shift (sign propagation) - * lshrdi3: logical right shift - * ashldi3: left shift - */ -_GLOBAL(__ashrdi3) - subfic r6,r5,32 - srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count - addi r7,r5,32 # could be xori, or addi with -32 - slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) - rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0 - sraw r7,r3,r7 # t2 = MSW >> (count-32) - or r4,r4,r6 # LSW |= t1 - slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2 - sraw r3,r3,r5 # MSW = MSW >> count - or r4,r4,r7 # LSW |= t2 - blr - -_GLOBAL(__ashldi3) - subfic r6,r5,32 - slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count - addi r7,r5,32 # could be xori, or addi with -32 - srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count) - slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32) - or r3,r3,r6 # MSW |= t1 - slw r4,r4,r5 # LSW = LSW << count - or r3,r3,r7 # MSW |= t2 - blr - -_GLOBAL(__lshrdi3) - subfic r6,r5,32 - srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count - addi r7,r5,32 # could be xori, or addi with -32 - slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) - srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32) - or r4,r4,r6 # LSW |= t1 - srw r3,r3,r5 # MSW = MSW >> count - or r4,r4,r7 # LSW |= t2 - blr - -_GLOBAL(abs) - srawi r4,r3,31 - xor r3,r3,r4 - sub r3,r3,r4 - blr - -_GLOBAL(_get_SP) - mr r3,r1 /* Close enough */ - blr - -/* - * Create a kernel thread - * kernel_thread(fn, arg, flags) - */ -_GLOBAL(kernel_thread) - stwu r1,-16(r1) - stw r30,8(r1) - stw r31,12(r1) - mr r30,r3 /* function */ - mr r31,r4 /* argument */ - ori r3,r5,CLONE_VM /* flags */ - oris r3,r3,CLONE_UNTRACED>>16 - li r4,0 /* new sp (unused) */ - li r0,__NR_clone - sc - cmpwi 0,r3,0 /* parent or child? */ - bne 1f /* return if parent */ - li r0,0 /* make top-level stack frame */ - stwu r0,-16(r1) - mtlr r30 /* fn addr in lr */ - mr r3,r31 /* load arg and call fn */ - PPC440EP_ERR42 - blrl - li r0,__NR_exit /* exit if function returns */ - li r3,0 - sc -1: lwz r30,8(r1) - lwz r31,12(r1) - addi r1,r1,16 - blr - -/* - * This routine is just here to keep GCC happy - sigh... - */ -_GLOBAL(__main) - blr - -#define SYSCALL(name) \ -_GLOBAL(name) \ - li r0,__NR_##name; \ - sc; \ - bnslr; \ - lis r4,errno@ha; \ - stw r3,errno@l(r4); \ - li r3,-1; \ - blr - -SYSCALL(execve) - -/* Why isn't this a) automatic, b) written in 'C'? */ - .data - .align 4 -_GLOBAL(sys_call_table) - .long sys_restart_syscall /* 0 */ - .long sys_exit - .long ppc_fork - .long sys_read - .long sys_write - .long sys_open /* 5 */ - .long sys_close - .long sys_waitpid - .long sys_creat - .long sys_link - .long sys_unlink /* 10 */ - .long sys_execve - .long sys_chdir - .long sys_time - .long sys_mknod - .long sys_chmod /* 15 */ - .long sys_lchown - .long sys_ni_syscall /* old break syscall holder */ - .long sys_stat - .long sys_lseek - .long sys_getpid /* 20 */ - .long sys_mount - .long sys_oldumount - .long sys_setuid - .long sys_getuid - .long sys_stime /* 25 */ - .long sys_ptrace - .long sys_alarm - .long sys_fstat - .long sys_pause - .long sys_utime /* 30 */ - .long sys_ni_syscall /* old stty syscall holder */ - .long sys_ni_syscall /* old gtty syscall holder */ - .long sys_access - .long sys_nice - .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */ - .long sys_sync - .long sys_kill - .long sys_rename - .long sys_mkdir - .long sys_rmdir /* 40 */ - .long sys_dup - .long sys_pipe - .long sys_times - .long sys_ni_syscall /* old prof syscall holder */ - .long sys_brk /* 45 */ - .long sys_setgid - .long sys_getgid - .long sys_signal - .long sys_geteuid - .long sys_getegid /* 50 */ - .long sys_acct - .long sys_umount /* recycled never used phys() */ - .long sys_ni_syscall /* old lock syscall holder */ - .long sys_ioctl - .long sys_fcntl /* 55 */ - .long sys_ni_syscall /* old mpx syscall holder */ - .long sys_setpgid - .long sys_ni_syscall /* old ulimit syscall holder */ - .long sys_olduname - .long sys_umask /* 60 */ - .long sys_chroot - .long sys_ustat - .long sys_dup2 - .long sys_getppid - .long sys_getpgrp /* 65 */ - .long sys_setsid - .long sys_sigaction - .long sys_sgetmask - .long sys_ssetmask - .long sys_setreuid /* 70 */ - .long sys_setregid - .long ppc_sigsuspend - .long sys_sigpending - .long sys_sethostname - .long sys_setrlimit /* 75 */ - .long sys_old_getrlimit - .long sys_getrusage - .long sys_gettimeofday - .long sys_settimeofday - .long sys_getgroups /* 80 */ - .long sys_setgroups - .long ppc_select - .long sys_symlink - .long sys_lstat - .long sys_readlink /* 85 */ - .long sys_uselib - .long sys_swapon - .long sys_reboot - .long old_readdir - .long sys_mmap /* 90 */ - .long sys_munmap - .long sys_truncate - .long sys_ftruncate - .long sys_fchmod - .long sys_fchown /* 95 */ - .long sys_getpriority - .long sys_setpriority - .long sys_ni_syscall /* old profil syscall holder */ - .long sys_statfs - .long sys_fstatfs /* 100 */ - .long sys_ni_syscall - .long sys_socketcall - .long sys_syslog - .long sys_setitimer - .long sys_getitimer /* 105 */ - .long sys_newstat - .long sys_newlstat - .long sys_newfstat - .long sys_uname - .long sys_ni_syscall /* 110 */ - .long sys_vhangup - .long sys_ni_syscall /* old 'idle' syscall */ - .long sys_ni_syscall - .long sys_wait4 - .long sys_swapoff /* 115 */ - .long sys_sysinfo - .long sys_ipc - .long sys_fsync - .long sys_sigreturn - .long ppc_clone /* 120 */ - .long sys_setdomainname - .long sys_newuname - .long sys_ni_syscall - .long sys_adjtimex - .long sys_mprotect /* 125 */ - .long sys_sigprocmask - .long sys_ni_syscall /* old sys_create_module */ - .long sys_init_module - .long sys_delete_module - .long sys_ni_syscall /* old sys_get_kernel_syms */ /* 130 */ - .long sys_quotactl - .long sys_getpgid - .long sys_fchdir - .long sys_bdflush - .long sys_sysfs /* 135 */ - .long sys_personality - .long sys_ni_syscall /* for afs_syscall */ - .long sys_setfsuid - .long sys_setfsgid - .long sys_llseek /* 140 */ - .long sys_getdents - .long ppc_select - .long sys_flock - .long sys_msync - .long sys_readv /* 145 */ - .long sys_writev - .long sys_getsid - .long sys_fdatasync - .long sys_sysctl - .long sys_mlock /* 150 */ - .long sys_munlock - .long sys_mlockall - .long sys_munlockall - .long sys_sched_setparam - .long sys_sched_getparam /* 155 */ - .long sys_sched_setscheduler - .long sys_sched_getscheduler - .long sys_sched_yield - .long sys_sched_get_priority_max - .long sys_sched_get_priority_min /* 160 */ - .long sys_sched_rr_get_interval - .long sys_nanosleep - .long sys_mremap - .long sys_setresuid - .long sys_getresuid /* 165 */ - .long sys_ni_syscall /* old sys_query_module */ - .long sys_poll - .long sys_nfsservctl - .long sys_setresgid - .long sys_getresgid /* 170 */ - .long sys_prctl - .long sys_rt_sigreturn - .long sys_rt_sigaction - .long sys_rt_sigprocmask - .long sys_rt_sigpending /* 175 */ - .long sys_rt_sigtimedwait - .long sys_rt_sigqueueinfo - .long ppc_rt_sigsuspend - .long sys_pread64 - .long sys_pwrite64 /* 180 */ - .long sys_chown - .long sys_getcwd - .long sys_capget - .long sys_capset - .long sys_sigaltstack /* 185 */ - .long sys_sendfile - .long sys_ni_syscall /* streams1 */ - .long sys_ni_syscall /* streams2 */ - .long ppc_vfork - .long sys_getrlimit /* 190 */ - .long sys_readahead - .long sys_mmap2 - .long sys_truncate64 - .long sys_ftruncate64 - .long sys_stat64 /* 195 */ - .long sys_lstat64 - .long sys_fstat64 - .long sys_pciconfig_read - .long sys_pciconfig_write - .long sys_pciconfig_iobase /* 200 */ - .long sys_ni_syscall /* 201 - reserved - MacOnLinux - new */ - .long sys_getdents64 - .long sys_pivot_root - .long sys_fcntl64 - .long sys_madvise /* 205 */ - .long sys_mincore - .long sys_gettid - .long sys_tkill - .long sys_setxattr - .long sys_lsetxattr /* 210 */ - .long sys_fsetxattr - .long sys_getxattr - .long sys_lgetxattr - .long sys_fgetxattr - .long sys_listxattr /* 215 */ - .long sys_llistxattr - .long sys_flistxattr - .long sys_removexattr - .long sys_lremovexattr - .long sys_fremovexattr /* 220 */ - .long sys_futex - .long sys_sched_setaffinity - .long sys_sched_getaffinity - .long sys_ni_syscall - .long sys_ni_syscall /* 225 - reserved for Tux */ - .long sys_sendfile64 - .long sys_io_setup - .long sys_io_destroy - .long sys_io_getevents - .long sys_io_submit /* 230 */ - .long sys_io_cancel - .long sys_set_tid_address - .long sys_fadvise64 - .long sys_exit_group - .long sys_lookup_dcookie /* 235 */ - .long sys_epoll_create - .long sys_epoll_ctl - .long sys_epoll_wait - .long sys_remap_file_pages - .long sys_timer_create /* 240 */ - .long sys_timer_settime - .long sys_timer_gettime - .long sys_timer_getoverrun - .long sys_timer_delete - .long sys_clock_settime /* 245 */ - .long sys_clock_gettime - .long sys_clock_getres - .long sys_clock_nanosleep - .long ppc_swapcontext - .long sys_tgkill /* 250 */ - .long sys_utimes - .long sys_statfs64 - .long sys_fstatfs64 - .long ppc_fadvise64_64 - .long sys_ni_syscall /* 255 - rtas (used on ppc64) */ - .long sys_debug_setcontext - .long sys_ni_syscall /* 257 reserved for vserver */ - .long sys_ni_syscall /* 258 reserved for new sys_remap_file_pages */ - .long sys_ni_syscall /* 259 reserved for new sys_mbind */ - .long sys_ni_syscall /* 260 reserved for new sys_get_mempolicy */ - .long sys_ni_syscall /* 261 reserved for new sys_set_mempolicy */ - .long sys_mq_open - .long sys_mq_unlink - .long sys_mq_timedsend - .long sys_mq_timedreceive /* 265 */ - .long sys_mq_notify - .long sys_mq_getsetattr - .long sys_kexec_load - .long sys_add_key - .long sys_request_key /* 270 */ - .long sys_keyctl - .long sys_waitid - .long sys_ioprio_set - .long sys_ioprio_get - .long sys_inotify_init /* 275 */ - .long sys_inotify_add_watch - .long sys_inotify_rm_watch diff --git a/arch/ppc/kernel/module.c b/arch/ppc/kernel/module.c deleted file mode 100644 index 92f4e5f64f0..00000000000 --- a/arch/ppc/kernel/module.c +++ /dev/null @@ -1,320 +0,0 @@ -/* Kernel module help for PPC. - Copyright (C) 2001 Rusty Russell. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -#include <linux/module.h> -#include <linux/moduleloader.h> -#include <linux/elf.h> -#include <linux/vmalloc.h> -#include <linux/fs.h> -#include <linux/string.h> -#include <linux/kernel.h> -#include <linux/cache.h> - -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(fmt , ...) -#endif - -LIST_HEAD(module_bug_list); - -void *module_alloc(unsigned long size) -{ - if (size == 0) - return NULL; - return vmalloc(size); -} - -/* Free memory returned from module_alloc */ -void module_free(struct module *mod, void *module_region) -{ - vfree(module_region); - /* FIXME: If module_region == mod->init_region, trim exception - table entries. */ -} - -/* Count how many different relocations (different symbol, different - addend) */ -static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num) -{ - unsigned int i, j, ret = 0; - - /* Sure, this is order(n^2), but it's usually short, and not - time critical */ - for (i = 0; i < num; i++) { - for (j = 0; j < i; j++) { - /* If this addend appeared before, it's - already been counted */ - if (ELF32_R_SYM(rela[i].r_info) - == ELF32_R_SYM(rela[j].r_info) - && rela[i].r_addend == rela[j].r_addend) - break; - } - if (j == i) ret++; - } - return ret; -} - -/* Get the potential trampolines size required of the init and - non-init sections */ -static unsigned long get_plt_size(const Elf32_Ehdr *hdr, - const Elf32_Shdr *sechdrs, - const char *secstrings, - int is_init) -{ - unsigned long ret = 0; - unsigned i; - - /* Everything marked ALLOC (this includes the exported - symbols) */ - for (i = 1; i < hdr->e_shnum; i++) { - /* If it's called *.init*, and we're not init, we're - not interested */ - if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0) - != is_init) - continue; - - /* We don't want to look at debug sections. */ - if (strstr(secstrings + sechdrs[i].sh_name, ".debug") != 0) - continue; - - if (sechdrs[i].sh_type == SHT_RELA) { - DEBUGP("Found relocations in section %u\n", i); - DEBUGP("Ptr: %p. Number: %u\n", - (void *)hdr + sechdrs[i].sh_offset, - sechdrs[i].sh_size / sizeof(Elf32_Rela)); - ret += count_relocs((void *)hdr - + sechdrs[i].sh_offset, - sechdrs[i].sh_size - / sizeof(Elf32_Rela)) - * sizeof(struct ppc_plt_entry); - } - } - - return ret; -} - -int module_frob_arch_sections(Elf32_Ehdr *hdr, - Elf32_Shdr *sechdrs, - char *secstrings, - struct module *me) -{ - unsigned int i; - - /* Find .plt and .init.plt sections */ - for (i = 0; i < hdr->e_shnum; i++) { - if (strcmp(secstrings + sechdrs[i].sh_name, ".init.plt") == 0) - me->arch.init_plt_section = i; - else if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0) - me->arch.core_plt_section = i; - } - if (!me->arch.core_plt_section || !me->arch.init_plt_section) { - printk("Module doesn't contain .plt or .init.plt sections.\n"); - return -ENOEXEC; - } - - /* Override their sizes */ - sechdrs[me->arch.core_plt_section].sh_size - = get_plt_size(hdr, sechdrs, secstrings, 0); - sechdrs[me->arch.init_plt_section].sh_size - = get_plt_size(hdr, sechdrs, secstrings, 1); - return 0; -} - -int apply_relocate(Elf32_Shdr *sechdrs, - const char *strtab, - unsigned int symindex, - unsigned int relsec, - struct module *module) -{ - printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n", - module->name); - return -ENOEXEC; -} - -static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) -{ - if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16) - && entry->jump[1] == 0x396b0000 + (val & 0xffff)) - return 1; - return 0; -} - -/* Set up a trampoline in the PLT to bounce us to the distant function */ -static uint32_t do_plt_call(void *location, - Elf32_Addr val, - Elf32_Shdr *sechdrs, - struct module *mod) -{ - struct ppc_plt_entry *entry; - - DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); - /* Init, or core PLT? */ - if (location >= mod->module_core - && location < mod->module_core + mod->core_size) - entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; - else - entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; - - /* Find this entry, or if that fails, the next avail. entry */ - while (entry->jump[0]) { - if (entry_matches(entry, val)) return (uint32_t)entry; - entry++; - } - - /* Stolen from Paul Mackerras as well... */ - entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */ - entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/ - entry->jump[2] = 0x7d6903a6; /* mtctr r11 */ - entry->jump[3] = 0x4e800420; /* bctr */ - - DEBUGP("Initialized plt for 0x%x at %p\n", val, entry); - return (uint32_t)entry; -} - -int apply_relocate_add(Elf32_Shdr *sechdrs, - const char *strtab, - unsigned int symindex, - unsigned int relsec, - struct module *module) -{ - unsigned int i; - Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; - Elf32_Sym *sym; - uint32_t *location; - uint32_t value; - - DEBUGP("Applying ADD relocate section %u to %u\n", relsec, - sechdrs[relsec].sh_info); - for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { - /* This is where to make the change */ - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr - + rela[i].r_offset; - /* This is the symbol it is referring to. Note that all - undefined symbols have been resolved. */ - sym = (Elf32_Sym *)sechdrs[symindex].sh_addr - + ELF32_R_SYM(rela[i].r_info); - /* `Everything is relative'. */ - value = sym->st_value + rela[i].r_addend; - - switch (ELF32_R_TYPE(rela[i].r_info)) { - case R_PPC_ADDR32: - /* Simply set it */ - *(uint32_t *)location = value; - break; - - case R_PPC_ADDR16_LO: - /* Low half of the symbol */ - *(uint16_t *)location = value; - break; - - case R_PPC_ADDR16_HA: - /* Sign-adjusted lower 16 bits: PPC ELF ABI says: - (((x >> 16) + ((x & 0x8000) ? 1 : 0))) & 0xFFFF. - This is the same, only sane. - */ - *(uint16_t *)location = (value + 0x8000) >> 16; - break; - - case R_PPC_REL24: - if ((int)(value - (uint32_t)location) < -0x02000000 - || (int)(value - (uint32_t)location) >= 0x02000000) - value = do_plt_call(location, value, - sechdrs, module); - - /* Only replace bits 2 through 26 */ - DEBUGP("REL24 value = %08X. location = %08X\n", - value, (uint32_t)location); - DEBUGP("Location before: %08X.\n", - *(uint32_t *)location); - *(uint32_t *)location - = (*(uint32_t *)location & ~0x03fffffc) - | ((value - (uint32_t)location) - & 0x03fffffc); - DEBUGP("Location after: %08X.\n", - *(uint32_t *)location); - DEBUGP("ie. jump to %08X+%08X = %08X\n", - *(uint32_t *)location & 0x03fffffc, - (uint32_t)location, - (*(uint32_t *)location & 0x03fffffc) - + (uint32_t)location); - break; - - case R_PPC_REL32: - /* 32-bit relative jump. */ - *(uint32_t *)location = value - (uint32_t)location; - break; - - default: - printk("%s: unknown ADD relocation: %u\n", - module->name, - ELF32_R_TYPE(rela[i].r_info)); - return -ENOEXEC; - } - } - return 0; -} - -int module_finalize(const Elf_Ehdr *hdr, - const Elf_Shdr *sechdrs, - struct module *me) -{ - char *secstrings; - unsigned int i; - - me->arch.bug_table = NULL; - me->arch.num_bugs = 0; - - /* Find the __bug_table section, if present */ - secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; - for (i = 1; i < hdr->e_shnum; i++) { - if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table")) - continue; - me->arch.bug_table = (void *) sechdrs[i].sh_addr; - me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry); - break; - } - - /* - * Strictly speaking this should have a spinlock to protect against - * traversals, but since we only traverse on BUG()s, a spinlock - * could potentially lead to deadlock and thus be counter-productive. - */ - list_add(&me->arch.bug_list, &module_bug_list); - - return 0; -} - -void module_arch_cleanup(struct module *mod) -{ - list_del(&mod->arch.bug_list); -} - -struct bug_entry *module_find_bug(unsigned long bugaddr) -{ - struct mod_arch_specific *mod; - unsigned int i; - struct bug_entry *bug; - - list_for_each_entry(mod, &module_bug_list, bug_list) { - bug = mod->bug_table; - for (i = 0; i < mod->num_bugs; ++i, ++bug) - if (bugaddr == bug->bug_addr) - return bug; - } - return NULL; -} diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c deleted file mode 100644 index e8f4e576750..00000000000 --- a/arch/ppc/kernel/pci.c +++ /dev/null @@ -1,1889 +0,0 @@ -/* - * Common pmac/prep/chrp pci routines. -- Cort - */ - -#include <linux/config.h> -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/delay.h> -#include <linux/string.h> -#include <linux/init.h> -#include <linux/capability.h> -#include <linux/sched.h> -#include <linux/errno.h> -#include <linux/bootmem.h> - -#include <asm/processor.h> -#include <asm/io.h> -#include <asm/prom.h> -#include <asm/sections.h> -#include <asm/pci-bridge.h> -#include <asm/byteorder.h> -#include <asm/irq.h> -#include <asm/uaccess.h> -#include <asm/machdep.h> - -#undef DEBUG - -#ifdef DEBUG -#define DBG(x...) printk(x) -#else -#define DBG(x...) -#endif - -unsigned long isa_io_base = 0; -unsigned long isa_mem_base = 0; -unsigned long pci_dram_offset = 0; -int pcibios_assign_bus_offset = 1; - -void pcibios_make_OF_bus_map(void); - -static int pci_relocate_bridge_resource(struct pci_bus *bus, int i); -static int probe_resource(struct pci_bus *parent, struct resource *pr, - struct resource *res, struct resource **conflict); -static void update_bridge_base(struct pci_bus *bus, int i); -static void pcibios_fixup_resources(struct pci_dev* dev); -static void fixup_broken_pcnet32(struct pci_dev* dev); -static int reparent_resources(struct resource *parent, struct resource *res); -static void fixup_rev1_53c810(struct pci_dev* dev); -static void fixup_cpc710_pci64(struct pci_dev* dev); -#ifdef CONFIG_PPC_OF -static u8* pci_to_OF_bus_map; -#endif - -/* By default, we don't re-assign bus numbers. We do this only on - * some pmacs - */ -int pci_assign_all_buses; - -struct pci_controller* hose_head; -struct pci_controller** hose_tail = &hose_head; - -static int pci_bus_count; - -static void -fixup_rev1_53c810(struct pci_dev* dev) -{ - /* rev 1 ncr53c810 chips don't set the class at all which means - * they don't get their resources remapped. Fix that here. - */ - - if ((dev->class == PCI_CLASS_NOT_DEFINED)) { - printk("NCR 53c810 rev 1 detected, setting PCI class.\n"); - dev->class = PCI_CLASS_STORAGE_SCSI; - } -} -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810); - -static void -fixup_broken_pcnet32(struct pci_dev* dev) -{ - if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) { - dev->vendor = PCI_VENDOR_ID_AMD; - pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD); - } -} -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32); - -static void -fixup_cpc710_pci64(struct pci_dev* dev) -{ - /* Hide the PCI64 BARs from the kernel as their content doesn't - * fit well in the resource management - */ - dev->resource[0].start = dev->resource[0].end = 0; - dev->resource[0].flags = 0; - dev->resource[1].start = dev->resource[1].end = 0; - dev->resource[1].flags = 0; -} -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64); - -static void -pcibios_fixup_resources(struct pci_dev *dev) -{ - struct pci_controller* hose = (struct pci_controller *)dev->sysdata; - int i; - unsigned long offset; - - if (!hose) { - printk(KERN_ERR "No hose for PCI dev %s!\n", pci_name(dev)); - return; - } - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { - struct resource *res = dev->resource + i; - if (!res->flags) - continue; - if (res->end == 0xffffffff) { - DBG("PCI:%s Resource %d [%08lx-%08lx] is unassigned\n", - pci_name(dev), i, res->start, res->end); - res->end -= res->start; - res->start = 0; - res->flags |= IORESOURCE_UNSET; - continue; - } - offset = 0; - if (res->flags & IORESOURCE_MEM) { - offset = hose->pci_mem_offset; - } else if (res->flags & IORESOURCE_IO) { - offset = (unsigned long) hose->io_base_virt - - isa_io_base; - } - if (offset != 0) { - res->start += offset; - res->end += offset; -#ifdef DEBUG - printk("Fixup res %d (%lx) of dev %s: %lx -> %lx\n", - i, res->flags, pci_name(dev), - res->start - offset, res->start); -#endif - } - } - - /* Call machine specific resource fixup */ - if (ppc_md.pcibios_fixup_resources) - ppc_md.pcibios_fixup_resources(dev); -} -DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources); - -void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, - struct resource *res) -{ - unsigned long offset = 0; - struct pci_controller *hose = dev->sysdata; - - if (hose && res->flags & IORESOURCE_IO) - offset = (unsigned long)hose->io_base_virt - isa_io_base; - else if (hose && res->flags & IORESOURCE_MEM) - offset = hose->pci_mem_offset; - region->start = res->start - offset; - region->end = res->end - offset; -} -EXPORT_SYMBOL(pcibios_resource_to_bus); - -void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, - struct pci_bus_region *region) -{ - unsigned long offset = 0; - struct pci_controller *hose = dev->sysdata; - - if (hose && res->flags & IORESOURCE_IO) - offset = (unsigned long)hose->io_base_virt - isa_io_base; - else if (hose && res->flags & IORESOURCE_MEM) - offset = hose->pci_mem_offset; - res->start = region->start + offset; - res->end = region->end + offset; -} -EXPORT_SYMBOL(pcibios_bus_to_resource); - -/* - * We need to avoid collisions with `mirrored' VGA ports - * and other strange ISA hardware, so we always want the - * addresses to be allocated in the 0x000-0x0ff region - * modulo 0x400. - * - * Why? Because some silly external IO cards only decode - * the low 10 bits of the IO address. The 0x00-0xff region - * is reserved for motherboard devices that decode all 16 - * bits, so it's ok to allocate at, say, 0x2800-0x28ff, - * but we want to try to avoid allocating at 0x2900-0x2bff - * which might have be mirrored at 0x0100-0x03ff.. - */ -void pcibios_align_resource(void *data, struct resource *res, unsigned long size, - unsigned long align) -{ - struct pci_dev *dev = data; - - if (res->flags & IORESOURCE_IO) { - unsigned long start = res->start; - - if (size > 0x100) { - printk(KERN_ERR "PCI: I/O Region %s/%d too large" - " (%ld bytes)\n", pci_name(dev), - dev->resource - res, size); - } - - if (start & 0x300) { - start = (start + 0x3ff) & ~0x3ff; - res->start = start; - } - } -} -EXPORT_SYMBOL(pcibios_align_resource); - -/* - * Handle resources of PCI devices. If the world were perfect, we could - * just allocate all the resource regions and do nothing more. It isn't. - * On the other hand, we cannot just re-allocate all devices, as it would - * require us to know lots of host bridge internals. So we attempt to - * keep as much of the original configuration as possible, but tweak it - * when it's found to be wrong. - * - * Known BIOS problems we have to work around: - * - I/O or memory regions not configured - * - regions configured, but not enabled in the command register - * - bogus I/O addresses above 64K used - * - expansion ROMs left enabled (this may sound harmless, but given - * the fact the PCI specs explicitly allow address decoders to be - * shared between expansion ROMs and other resource regions, it's - * at least dangerous) - * - * Our solution: - * (1) Allocate resources for all buses behind PCI-to-PCI bridges. - * This gives us fixed barriers on where we can allocate. - * (2) Allocate resources for all enabled devices. If there is - * a collision, just mark the resource as unallocated. Also - * disable expansion ROMs during this step. - * (3) Try to allocate resources for disabled devices. If the - * resources were assigned correctly, everything goes well, - * if they weren't, they won't disturb allocation of other - * resources. - * (4) Assign new addresses to resources which were either - * not configured at all or misconfigured. If explicitly - * requested by the user, configure expansion ROM address - * as well. - */ - -static void __init -pcibios_allocate_bus_resources(struct list_head *bus_list) -{ - struct pci_bus *bus; - int i; - struct resource *res, *pr; - - /* Depth-First Search on bus tree */ - list_for_each_entry(bus, bus_list, node) { - for (i = 0; i < 4; ++i) { - if ((res = bus->resource[i]) == NULL || !res->flags - || res->start > res->end) - continue; - if (bus->parent == NULL) - pr = (res->flags & IORESOURCE_IO)? - &ioport_resource: &iomem_resource; - else { - pr = pci_find_parent_resource(bus->self, res); - if (pr == res) { - /* this happens when the generic PCI - * code (wrongly) decides that this - * bridge is transparent -- paulus - */ - continue; - } - } - - DBG("PCI: bridge rsrc %lx..%lx (%lx), parent %p\n", - res->start, res->end, res->flags, pr); - if (pr) { - if (request_resource(pr, res) == 0) - continue; - /* - * Must be a conflict with an existing entry. - * Move that entry (or entries) under the - * bridge resource and try again. - */ - if (reparent_resources(pr, res) == 0) - continue; - } - printk(KERN_ERR "PCI: Cannot allocate resource region " - "%d of PCI bridge %d\n", i, bus->number); - if (pci_relocate_bridge_resource(bus, i)) - bus->resource[i] = NULL; - } - pcibios_allocate_bus_resources(&bus->children); - } -} - -/* - * Reparent resource children of pr that conflict with res - * under res, and make res replace those children. - */ -static int __init -reparent_resources(struct resource *parent, struct resource *res) -{ - struct resource *p, **pp; - struct resource **firstpp = NULL; - - for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) { - if (p->end < res->start) - continue; - if (res->end < p->start) - break; - if (p->start < res->start || p->end > res->end) - return -1; /* not completely contained */ - if (firstpp == NULL) - firstpp = pp; - } - if (firstpp == NULL) - return -1; /* didn't find any conflicting entries? */ - res->parent = parent; - res->child = *firstpp; - res->sibling = *pp; - *firstpp = res; - *pp = NULL; - for (p = res->child; p != NULL; p = p->sibling) { - p->parent = res; - DBG(KERN_INFO "PCI: reparented %s [%lx..%lx] under %s\n", - p->name, p->start, p->end, res->name); - } - return 0; -} - -/* - * A bridge has been allocated a range which is outside the range - * of its parent bridge, so it needs to be moved. - */ -static int __init -pci_relocate_bridge_resource(struct pci_bus *bus, int i) -{ - struct resource *res, *pr, *conflict; - unsigned long try, size; - int j; - struct pci_bus *parent = bus->parent; - - if (parent == NULL) { - /* shouldn't ever happen */ - printk(KERN_ERR "PCI: can't move host bridge resource\n"); - return -1; - } - res = bus->resource[i]; - if (res == NULL) - return -1; - pr = NULL; - for (j = 0; j < 4; j++) { - struct resource *r = parent->resource[j]; - if (!r) - continue; - if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) - continue; - if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) { - pr = r; - break; - } - if (res->flags & IORESOURCE_PREFETCH) - pr = r; - } - if (pr == NULL) - return -1; - size = res->end - res->start; - if (pr->start > pr->end || size > pr->end - pr->start) - return -1; - try = pr->end; - for (;;) { - res->start = try - size; - res->end = try; - if (probe_resource(bus->parent, pr, res, &conflict) == 0) - break; - if (conflict->start <= pr->start + size) - return -1; - try = conflict->start - 1; - } - if (request_resource(pr, res)) { - DBG(KERN_ERR "PCI: huh? couldn't move to %lx..%lx\n", - res->start, res->end); - return -1; /* "can't happen" */ - } - update_bridge_base(bus, i); - printk(KERN_INFO "PCI: bridge %d resource %d moved to %lx..%lx\n", - bus->number, i, res->start, res->end); - return 0; -} - -static int __init -probe_resource(struct pci_bus *parent, struct resource *pr, - struct resource *res, struct resource **conflict) -{ - struct pci_bus *bus; - struct pci_dev *dev; - struct resource *r; - int i; - - for (r = pr->child; r != NULL; r = r->sibling) { - if (r->end >= res->start && res->end >= r->start) { - *conflict = r; - return 1; - } - } - list_for_each_entry(bus, &parent->children, node) { - for (i = 0; i < 4; ++i) { - if ((r = bus->resource[i]) == NULL) - continue; - if (!r->flags || r->start > r->end || r == res) - continue; - if (pci_find_parent_resource(bus->self, r) != pr) - continue; - if (r->end >= res->start && res->end >= r->start) { - *conflict = r; - return 1; - } - } - } - list_for_each_entry(dev, &parent->devices, bus_list) { - for (i = 0; i < 6; ++i) { - r = &dev->resource[i]; - if (!r->flags || (r->flags & IORESOURCE_UNSET)) - continue; - if (pci_find_parent_resource(dev, r) != pr) - continue; - if (r->end >= res->start && res->end >= r->start) { - *conflict = r; - return 1; - } - } - } - return 0; -} - -static void __init -update_bridge_base(struct pci_bus *bus, int i) -{ - struct resource *res = bus->resource[i]; - u8 io_base_lo, io_limit_lo; - u16 mem_base, mem_limit; - u16 cmd; - unsigned long start, end, off; - struct pci_dev *dev = bus->self; - struct pci_controller *hose = dev->sysdata; - - if (!hose) { - printk("update_bridge_base: no hose?\n"); - return; - } - pci_read_config_word(dev, PCI_COMMAND, &cmd); - pci_write_config_word(dev, PCI_COMMAND, - cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY)); - if (res->flags & IORESOURCE_IO) { - off = (unsigned long) hose->io_base_virt - isa_io_base; - start = res->start - off; - end = res->end - off; - io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK; - io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK; - if (end > 0xffff) { - pci_write_config_word(dev, PCI_IO_BASE_UPPER16, - start >> 16); - pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16, - end >> 16); - io_base_lo |= PCI_IO_RANGE_TYPE_32; - } else - io_base_lo |= PCI_IO_RANGE_TYPE_16; - pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo); - pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo); - - } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH)) - == IORESOURCE_MEM) { - off = hose->pci_mem_offset; - mem_base = ((res->start - off) >> 16) & PCI_MEMORY_RANGE_MASK; - mem_limit = ((res->end - off) >> 16) & PCI_MEMORY_RANGE_MASK; - pci_write_config_word(dev, PCI_MEMORY_BASE, mem_base); - pci_write_config_word(dev, PCI_MEMORY_LIMIT, mem_limit); - - } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH)) - == (IORESOURCE_MEM | IORESOURCE_PREFETCH)) { - off = hose->pci_mem_offset; - mem_base = ((res->start - off) >> 16) & PCI_PREF_RANGE_MASK; - mem_limit = ((res->end - off) >> 16) & PCI_PREF_RANGE_MASK; - pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, mem_base); - pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, mem_limit); - - } else { - DBG(KERN_ERR "PCI: ugh, bridge %s res %d has flags=%lx\n", - pci_name(dev), i, res->flags); - } - pci_write_config_word(dev, PCI_COMMAND, cmd); -} - -static inline void alloc_resource(struct pci_dev *dev, int idx) -{ - struct resource *pr, *r = &dev->resource[idx]; - - DBG("PCI:%s: Resource %d: %08lx-%08lx (f=%lx)\n", - pci_name(dev), idx, r->start, r->end, r->flags); - pr = pci_find_parent_resource(dev, r); - if (!pr || request_resource(pr, r) < 0) { - printk(KERN_ERR "PCI: Cannot allocate resource region %d" - " of device %s\n", idx, pci_name(dev)); - if (pr) - DBG("PCI: parent is %p: %08lx-%08lx (f=%lx)\n", - pr, pr->start, pr->end, pr->flags); - /* We'll assign a new address later */ - r->flags |= IORESOURCE_UNSET; - r->end -= r->start; - r->start = 0; - } -} - -static void __init -pcibios_allocate_resources(int pass) -{ - struct pci_dev *dev = NULL; - int idx, disabled; - u16 command; - struct resource *r; - - while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { - pci_read_config_word(dev, PCI_COMMAND, &command); - for (idx = 0; idx < 6; idx++) { - r = &dev->resource[idx]; - if (r->parent) /* Already allocated */ - continue; - if (!r->flags || (r->flags & IORESOURCE_UNSET)) - continue; /* Not assigned at all */ - if (r->flags & IORESOURCE_IO) - disabled = !(command & PCI_COMMAND_IO); - else - disabled = !(command & PCI_COMMAND_MEMORY); - if (pass == disabled) - alloc_resource(dev, idx); - } - if (pass) - continue; - r = &dev->resource[PCI_ROM_RESOURCE]; - if (r->flags & IORESOURCE_ROM_ENABLE) { - /* Turn the ROM off, leave the resource region, but keep it unregistered. */ - u32 reg; - DBG("PCI: Switching off ROM of %s\n", pci_name(dev)); - r->flags &= ~IORESOURCE_ROM_ENABLE; - pci_read_config_dword(dev, dev->rom_base_reg, ®); - pci_write_config_dword(dev, dev->rom_base_reg, - reg & ~PCI_ROM_ADDRESS_ENABLE); - } - } -} - -static void __init -pcibios_assign_resources(void) -{ - struct pci_dev *dev = NULL; - int idx; - struct resource *r; - - while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { - int class = dev->class >> 8; - - /* Don't touch classless devices and host bridges */ - if (!class || class == PCI_CLASS_BRIDGE_HOST) - continue; - - for (idx = 0; idx < 6; idx++) { - r = &dev->resource[idx]; - - /* - * We shall assign a new address to this resource, - * either because the BIOS (sic) forgot to do so - * or because we have decided the old address was - * unusable for some reason. - */ - if ((r->flags & IORESOURCE_UNSET) && r->end && - (!ppc_md.pcibios_enable_device_hook || - !ppc_md.pcibios_enable_device_hook(dev, 1))) { - r->flags &= ~IORESOURCE_UNSET; - pci_assign_resource(dev, idx); - } - } - -#if 0 /* don't assign ROMs */ - r = &dev->resource[PCI_ROM_RESOURCE]; - r->end -= r->start; - r->start = 0; - if (r->end) - pci_assign_resource(dev, PCI_ROM_RESOURCE); -#endif - } -} - - -int -pcibios_enable_resources(struct pci_dev *dev, int mask) -{ - u16 cmd, old_cmd; - int idx; - struct resource *r; - - pci_read_config_word(dev, PCI_COMMAND, &cmd); - old_cmd = cmd; - for (idx=0; idx<6; idx++) { - /* Only set up the requested stuff */ - if (!(mask & (1<<idx))) - continue; - - r = &dev->resource[idx]; - if (r->flags & IORESOURCE_UNSET) { - printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev)); - return -EINVAL; - } - if (r->flags & IORESOURCE_IO) - cmd |= PCI_COMMAND_IO; - if (r->flags & IORESOURCE_MEM) - cmd |= PCI_COMMAND_MEMORY; - } - if (dev->resource[PCI_ROM_RESOURCE].start) - cmd |= PCI_COMMAND_MEMORY; - if (cmd != old_cmd) { - printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); - pci_write_config_word(dev, PCI_COMMAND, cmd); - } - return 0; -} - -static int next_controller_index; - -struct pci_controller * __init -pcibios_alloc_controller(void) -{ - struct pci_controller *hose; - - hose = (struct pci_controller *)alloc_bootmem(sizeof(*hose)); - memset(hose, 0, sizeof(struct pci_controller)); - - *hose_tail = hose; - hose_tail = &hose->next; - - hose->index = next_controller_index++; - - return hose; -} - -#ifdef CONFIG_PPC_OF -/* - * Functions below are used on OpenFirmware machines. - */ -static void -make_one_node_map(struct device_node* node, u8 pci_bus) -{ - int *bus_range; - int len; - - if (pci_bus >= pci_bus_count) - return; - bus_range = (int *) get_property(node, "bus-range", &len); - if (bus_range == NULL || len < 2 * sizeof(int)) { - printk(KERN_WARNING "Can't get bus-range for %s, " - "assuming it starts at 0\n", node->full_name); - pci_to_OF_bus_map[pci_bus] = 0; - } else - pci_to_OF_bus_map[pci_bus] = bus_range[0]; - - for (node=node->child; node != 0;node = node->sibling) { - struct pci_dev* dev; - unsigned int *class_code, *reg; - - class_code = (unsigned int *) get_property(node, "class-code", NULL); - if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && - (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) - continue; - reg = (unsigned int *)get_property(node, "reg", NULL); - if (!reg) - continue; - dev = pci_find_slot(pci_bus, ((reg[0] >> 8) & 0xff)); - if (!dev || !dev->subordinate) - continue; - make_one_node_map(node, dev->subordinate->number); - } -} - -void -pcibios_make_OF_bus_map(void) -{ - int i; - struct pci_controller* hose; - u8* of_prop_map; - - pci_to_OF_bus_map = (u8*)kmalloc(pci_bus_count, GFP_KERNEL); - if (!pci_to_OF_bus_map) { - printk(KERN_ERR "Can't allocate OF bus map !\n"); - return; - } - - /* We fill the bus map with invalid values, that helps - * debugging. - */ - for (i=0; i<pci_bus_count; i++) - pci_to_OF_bus_map[i] = 0xff; - - /* For each hose, we begin searching bridges */ - for(hose=hose_head; hose; hose=hose->next) { - struct device_node* node; - node = (struct device_node *)hose->arch_data; - if (!node) - continue; - make_one_node_map(node, hose->first_busno); - } - of_prop_map = get_property(find_path_device("/"), "pci-OF-bus-map", NULL); - if (of_prop_map) - memcpy(of_prop_map, pci_to_OF_bus_map, pci_bus_count); -#ifdef DEBUG - printk("PCI->OF bus map:\n"); - for (i=0; i<pci_bus_count; i++) { - if (pci_to_OF_bus_map[i] == 0xff) - continue; - printk("%d -> %d\n", i, pci_to_OF_bus_map[i]); - } -#endif -} - -typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data); - -static struct device_node* -scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data) -{ - struct device_node* sub_node; - - for (; node != 0;node = node->sibling) { - unsigned int *class_code; - - if (filter(node, data)) - return node; - - /* For PCI<->PCI bridges or CardBus bridges, we go down - * Note: some OFs create a parent node "multifunc-device" as - * a fake root for all functions of a multi-function device, - * we go down them as well. - */ - class_code = (unsigned int *) get_property(node, "class-code", NULL); - if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && - (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) && - strcmp(node->name, "multifunc-device")) - continue; - sub_node = scan_OF_pci_childs(node->child, filter, data); - if (sub_node) - return sub_node; - } - return NULL; -} - -static int -scan_OF_pci_childs_iterator(struct device_node* node, void* data) -{ - unsigned int *reg; - u8* fdata = (u8*)data; - - reg = (unsigned int *) get_property(node, "reg", NULL); - if (reg && ((reg[0] >> 8) & 0xff) == fdata[1] - && ((reg[0] >> 16) & 0xff) == fdata[0]) - return 1; - return 0; -} - -static struct device_node* -scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn) -{ - u8 filter_data[2] = {bus, dev_fn}; - - return scan_OF_pci_childs(node, scan_OF_pci_childs_iterator, filter_data); -} - -/* - * Scans the OF tree for a device node matching a PCI device - */ -struct device_node * -pci_busdev_to_OF_node(struct pci_bus *bus, int devfn) -{ - struct pci_controller *hose; - struct device_node *node; - int busnr; - - if (!have_of) - return NULL; - - /* Lookup the hose */ - busnr = bus->number; - hose = pci_bus_to_hose(busnr); - if (!hose) - return NULL; - - /* Check it has an OF node associated */ - node = (struct device_node *) hose->arch_data; - if (!node) - return NULL; - - /* Fixup bus number according to what OF think it is. */ -#ifdef CONFIG_PPC_PMAC - /* The G5 need a special case here. Basically, we don't remap all - * busses on it so we don't create the pci-OF-map. However, we do - * remap the AGP bus and so have to deal with it. A future better - * fix has to be done by making the remapping per-host and always - * filling the pci_to_OF map. --BenH - */ - if (_machine == _MACH_Pmac && busnr >= 0xf0) - busnr -= 0xf0; - else -#endif - if (pci_to_OF_bus_map) - busnr = pci_to_OF_bus_map[busnr]; - if (busnr == 0xff) - return NULL; - - /* Now, lookup childs of the hose */ - return scan_OF_childs_for_device(node->child, busnr, devfn); -} -EXPORT_SYMBOL(pci_busdev_to_OF_node); - -struct device_node* -pci_device_to_OF_node(struct pci_dev *dev) -{ - return pci_busdev_to_OF_node(dev->bus, dev->devfn); -} -EXPORT_SYMBOL(pci_device_to_OF_node); - -/* This routine is meant to be used early during boot, when the - * PCI bus numbers have not yet been assigned, and you need to - * issue PCI config cycles to an OF device. - * It could also be used to "fix" RTAS config cycles if you want - * to set pci_assign_all_buses to 1 and still use RTAS for PCI - * config cycles. - */ -struct pci_controller* -pci_find_hose_for_OF_device(struct device_node* node) -{ - if (!have_of) - return NULL; - while(node) { - struct pci_controller* hose; - for (hose=hose_head;hose;hose=hose->next) - if (hose->arch_data == node) - return hose; - node=node->parent; - } - return NULL; -} - -static int -find_OF_pci_device_filter(struct device_node* node, void* data) -{ - return ((void *)node == data); -} - -/* - * Returns the PCI device matching a given OF node - */ -int -pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn) -{ - unsigned int *reg; - struct pci_controller* hose; - struct pci_dev* dev = NULL; - - if (!have_of) - return -ENODEV; - /* Make sure it's really a PCI device */ - hose = pci_find_hose_for_OF_device(node); - if (!hose || !hose->arch_data) - return -ENODEV; - if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child, - find_OF_pci_device_filter, (void *)node)) - return -ENODEV; - reg = (unsigned int *) get_property(node, "reg", NULL); - if (!reg) - return -ENODEV; - *bus = (reg[0] >> 16) & 0xff; - *devfn = ((reg[0] >> 8) & 0xff); - - /* Ok, here we need some tweak. If we have already renumbered - * all busses, we can't rely on the OF bus number any more. - * the pci_to_OF_bus_map is not enough as several PCI busses - * may match the same OF bus number. - */ - if (!pci_to_OF_bus_map) - return 0; - while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { - if (pci_to_OF_bus_map[dev->bus->number] != *bus) - continue; - if (dev->devfn != *devfn) - continue; - *bus = dev->bus->number; - return 0; - } - return -ENODEV; -} -EXPORT_SYMBOL(pci_device_from_OF_node); - -void __init -pci_process_bridge_OF_ranges(struct pci_controller *hose, - struct device_node *dev, int primary) -{ - static unsigned int static_lc_ranges[256] __initdata; - unsigned int *dt_ranges, *lc_ranges, *ranges, *prev; - unsigned int size; - int rlen = 0, orig_rlen; - int memno = 0; - struct resource *res; - int np, na = prom_n_addr_cells(dev); - np = na + 5; - - /* First we try to merge ranges to fix a problem with some pmacs - * that can have more than 3 ranges, fortunately using contiguous - * addresses -- BenH - */ - dt_ranges = (unsigned int *) get_property(dev, "ranges", &rlen); - if (!dt_ranges) - return; - /* Sanity check, though hopefully that never happens */ - if (rlen > sizeof(static_lc_ranges)) { - printk(KERN_WARNING "OF ranges property too large !\n"); - rlen = sizeof(static_lc_ranges); - } - lc_ranges = static_lc_ranges; - memcpy(lc_ranges, dt_ranges, rlen); - orig_rlen = rlen; - - /* Let's work on a copy of the "ranges" property instead of damaging - * the device-tree image in memory - */ - ranges = lc_ranges; - prev = NULL; - while ((rlen -= np * sizeof(unsigned int)) >= 0) { - if (prev) { - if (prev[0] == ranges[0] && prev[1] == ranges[1] && - (prev[2] + prev[na+4]) == ranges[2] && - (prev[na+2] + prev[na+4]) == ranges[na+2]) { - prev[na+4] += ranges[na+4]; - ranges[0] = 0; - ranges += np; - continue; - } - } - prev = ranges; - ranges += np; - } - - /* - * The ranges property is laid out as an array of elements, - * each of which comprises: - * cells 0 - 2: a PCI address - * cells 3 or 3+4: a CPU physical address - * (size depending on dev->n_addr_cells) - * cells 4+5 or 5+6: the size of the range - */ - ranges = lc_ranges; - rlen = orig_rlen; - while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) { - res = NULL; - size = ranges[na+4]; - switch (ranges[0] >> 24) { - case 1: /* I/O space */ - if (ranges[2] != 0) - break; - hose->io_base_phys = ranges[na+2]; - /* limit I/O space to 16MB */ - if (size > 0x01000000) - size = 0x01000000; - hose->io_base_virt = ioremap(ranges[na+2], size); - if (primary) - isa_io_base = (unsigned long) hose->io_base_virt; - res = &hose->io_resource; - res->flags = IORESOURCE_IO; - res->start = ranges[2]; - break; - case 2: /* memory space */ - memno = 0; - if (ranges[1] == 0 && ranges[2] == 0 - && ranges[na+4] <= (16 << 20)) { - /* 1st 16MB, i.e. ISA memory area */ - if (primary) - isa_mem_base = ranges[na+2]; - memno = 1; - } - while (memno < 3 && hose->mem_resources[memno].flags) - ++memno; - if (memno == 0) - hose->pci_mem_offset = ranges[na+2] - ranges[2]; - if (memno < 3) { - res = &hose->mem_resources[memno]; - res->flags = IORESOURCE_MEM; - res->start = ranges[na+2]; - } - break; - } - if (res != NULL) { - res->name = dev->full_name; - res->end = res->start + size - 1; - res->parent = NULL; - res->sibling = NULL; - res->child = NULL; - } - ranges += np; - } -} - -/* We create the "pci-OF-bus-map" property now so it appears in the - * /proc device tree - */ -void __init -pci_create_OF_bus_map(void) -{ - struct property* of_prop; - - of_prop = (struct property*) alloc_bootmem(sizeof(struct property) + 256); - if (of_prop && find_path_device("/")) { - memset(of_prop, -1, sizeof(struct property) + 256); - of_prop->name = "pci-OF-bus-map"; - of_prop->length = 256; - of_prop->value = (unsigned char *)&of_prop[1]; - prom_add_property(find_path_device("/"), of_prop); - } -} - -static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct pci_dev *pdev; - struct device_node *np; - - pdev = to_pci_dev (dev); - np = pci_device_to_OF_node(pdev); - if (np == NULL || np->full_name == NULL) - return 0; - return sprintf(buf, "%s", np->full_name); -} -static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); - -#else /* CONFIG_PPC_OF */ -void pcibios_make_OF_bus_map(void) -{ -} -#endif /* CONFIG_PPC_OF */ - -/* Add sysfs properties */ -void pcibios_add_platform_entries(struct pci_dev *pdev) -{ -#ifdef CONFIG_PPC_OF - device_create_file(&pdev->dev, &dev_attr_devspec); -#endif /* CONFIG_PPC_OF */ -} - - -#ifdef CONFIG_PPC_PMAC -/* - * This set of routines checks for PCI<->PCI bridges that have closed - * IO resources and have child devices. It tries to re-open an IO - * window on them. - * - * This is a _temporary_ fix to workaround a problem with Apple's OF - * closing IO windows on P2P bridges when the OF drivers of cards - * below this bridge don't claim any IO range (typically ATI or - * Adaptec). - * - * A more complete fix would be to use drivers/pci/setup-bus.c, which - * involves a working pcibios_fixup_pbus_ranges(), some more care about - * ordering when creating the host bus resources, and maybe a few more - * minor tweaks - */ - -/* Initialize bridges with base/limit values we have collected */ -static void __init -do_update_p2p_io_resource(struct pci_bus *bus, int enable_vga) -{ - struct pci_dev *bridge = bus->self; - struct pci_controller* hose = (struct pci_controller *)bridge->sysdata; - u32 l; - u16 w; - struct resource res; - - if (bus->resource[0] == NULL) - return; - res = *(bus->resource[0]); - - DBG("Remapping Bus %d, bridge: %s\n", bus->number, pci_name(bridge)); - res.start -= ((unsigned long) hose->io_base_virt - isa_io_base); - res.end -= ((unsigned long) hose->io_base_virt - isa_io_base); - DBG(" IO window: %08lx-%08lx\n", res.start, res.end); - - /* Set up the top and bottom of the PCI I/O segment for this bus. */ - pci_read_config_dword(bridge, PCI_IO_BASE, &l); - l &= 0xffff000f; - l |= (res.start >> 8) & 0x00f0; - l |= res.end & 0xf000; - pci_write_config_dword(bridge, PCI_IO_BASE, l); - - if ((l & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { - l = (res.start >> 16) | (res.end & 0xffff0000); - pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, l); - } - - pci_read_config_word(bridge, PCI_COMMAND, &w); - w |= PCI_COMMAND_IO; - pci_write_config_word(bridge, PCI_COMMAND, w); - -#if 0 /* Enabling this causes XFree 4.2.0 to hang during PCI probe */ - if (enable_vga) { - pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &w); - w |= PCI_BRIDGE_CTL_VGA; - pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, w); - } -#endif -} - -/* This function is pretty basic and actually quite broken for the - * general case, it's enough for us right now though. It's supposed - * to tell us if we need to open an IO range at all or not and what - * size. - */ -static int __init -check_for_io_childs(struct pci_bus *bus, struct resource* res, int *found_vga) -{ - struct pci_dev *dev; - int i; - int rc = 0; - -#define push_end(res, size) do { unsigned long __sz = (size) ; \ - res->end = ((res->end + __sz) / (__sz + 1)) * (__sz + 1) + __sz; \ - } while (0) - - list_for_each_entry(dev, &bus->devices, bus_list) { - u16 class = dev->class >> 8; - - if (class == PCI_CLASS_DISPLAY_VGA || - class == PCI_CLASS_NOT_DEFINED_VGA) - *found_vga = 1; - if (class >> 8 == PCI_BASE_CLASS_BRIDGE && dev->subordinate) - rc |= check_for_io_childs(dev->subordinate, res, found_vga); - if (class == PCI_CLASS_BRIDGE_CARDBUS) - push_end(res, 0xfff); - - for (i=0; i<PCI_NUM_RESOURCES; i++) { - struct resource *r; - unsigned long r_size; - - if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI - && i >= PCI_BRIDGE_RESOURCES) - continue; - r = &dev->resource[i]; - r_size = r->end - r->start; - if (r_size < 0xfff) - r_size = 0xfff; - if (r->flags & IORESOURCE_IO && (r_size) != 0) { - rc = 1; - push_end(res, r_size); - } - } - } - - return rc; -} - -/* Here we scan all P2P bridges of a given level that have a closed - * IO window. Note that the test for the presence of a VGA card should - * be improved to take into account already configured P2P bridges, - * currently, we don't see them and might end up configuring 2 bridges - * with VGA pass through enabled - */ -static void __init -do_fixup_p2p_level(struct pci_bus *bus) -{ - struct pci_bus *b; - int i, parent_io; - int has_vga = 0; - - for (parent_io=0; parent_io<4; parent_io++) - if (bus->resource[parent_io] - && bus->resource[parent_io]->flags & IORESOURCE_IO) - break; - if (parent_io >= 4) - return; - - list_for_each_entry(b, &bus->children, node) { - struct pci_dev *d = b->self; - struct pci_controller* hose = (struct pci_controller *)d->sysdata; - struct resource *res = b->resource[0]; - struct resource tmp_res; - unsigned long max; - int found_vga = 0; - - memset(&tmp_res, 0, sizeof(tmp_res)); - tmp_res.start = bus->resource[parent_io]->start; - - /* We don't let low addresses go through that closed P2P bridge, well, - * that may not be necessary but I feel safer that way - */ - if (tmp_res.start == 0) - tmp_res.start = 0x1000; - - if (!list_empty(&b->devices) && res && res->flags == 0 && - res != bus->resource[parent_io] && - (d->class >> 8) == PCI_CLASS_BRIDGE_PCI && - check_for_io_childs(b, &tmp_res, &found_vga)) { - u8 io_base_lo; - - printk(KERN_INFO "Fixing up IO bus %s\n", b->name); - - if (found_vga) { - if (has_vga) { - printk(KERN_WARNING "Skipping VGA, already active" - " on bus segment\n"); - found_vga = 0; - } else - has_vga = 1; - } - pci_read_config_byte(d, PCI_IO_BASE, &io_base_lo); - - if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) - max = ((unsigned long) hose->io_base_virt - - isa_io_base) + 0xffffffff; - else - max = ((unsigned long) hose->io_base_virt - - isa_io_base) + 0xffff; - - *res = tmp_res; - res->flags = IORESOURCE_IO; - res->name = b->name; - - /* Find a resource in the parent where we can allocate */ - for (i = 0 ; i < 4; i++) { - struct resource *r = bus->resource[i]; - if (!r) - continue; - if ((r->flags & IORESOURCE_IO) == 0) - continue; - DBG("Trying to allocate from %08lx, size %08lx from parent" - " res %d: %08lx -> %08lx\n", - res->start, res->end, i, r->start, r->end); - - if (allocate_resource(r, res, res->end + 1, res->start, max, - res->end + 1, NULL, NULL) < 0) { - DBG("Failed !\n"); - continue; - } - do_update_p2p_io_resource(b, found_vga); - break; - } - } - do_fixup_p2p_level(b); - } -} - -static void -pcibios_fixup_p2p_bridges(void) -{ - struct pci_bus *b; - - list_for_each_entry(b, &pci_root_buses, node) - do_fixup_p2p_level(b); -} - -#endif /* CONFIG_PPC_PMAC */ - -static int __init -pcibios_init(void) -{ - struct pci_controller *hose; - struct pci_bus *bus; - int next_busno; - - printk(KERN_INFO "PCI: Probing PCI hardware\n"); - - /* Scan all of the recorded PCI controllers. */ - for (next_busno = 0, hose = hose_head; hose; hose = hose->next) { - if (pci_assign_all_buses) - hose->first_busno = next_busno; - hose->last_busno = 0xff; - bus = pci_scan_bus(hose->first_busno, hose->ops, hose); - hose->last_busno = bus->subordinate; - if (pci_assign_all_buses || next_busno <= hose->last_busno) - next_busno = hose->last_busno + pcibios_assign_bus_offset; - } - pci_bus_count = next_busno; - - /* OpenFirmware based machines need a map of OF bus - * numbers vs. kernel bus numbers since we may have to - * remap them. - */ - if (pci_assign_all_buses && have_of) - pcibios_make_OF_bus_map(); - - /* Do machine dependent PCI interrupt routing */ - if (ppc_md.pci_swizzle && ppc_md.pci_map_irq) - pci_fixup_irqs(ppc_md.pci_swizzle, ppc_md.pci_map_irq); - - /* Call machine dependent fixup */ - if (ppc_md.pcibios_fixup) - ppc_md.pcibios_fixup(); - - /* Allocate and assign resources */ - pcibios_allocate_bus_resources(&pci_root_buses); - pcibios_allocate_resources(0); - pcibios_allocate_resources(1); -#ifdef CONFIG_PPC_PMAC - pcibios_fixup_p2p_bridges(); -#endif /* CONFIG_PPC_PMAC */ - pcibios_assign_resources(); - - /* Call machine dependent post-init code */ - if (ppc_md.pcibios_after_init) - ppc_md.pcibios_after_init(); - - return 0; -} - -subsys_initcall(pcibios_init); - -unsigned char __init -common_swizzle(struct pci_dev *dev, unsigned char *pinp) -{ - struct pci_controller *hose = dev->sysdata; - - if (dev->bus->number != hose->first_busno) { - u8 pin = *pinp; - do { - pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); - /* Move up the chain of bridges. */ - dev = dev->bus->self; - } while (dev->bus->self); - *pinp = pin; - - /* The slot is the idsel of the last bridge. */ - } - return PCI_SLOT(dev->devfn); -} - -unsigned long resource_fixup(struct pci_dev * dev, struct resource * res, - unsigned long start, unsigned long size) -{ - return start; -} - -void __init pcibios_fixup_bus(struct pci_bus *bus) -{ - struct pci_controller *hose = (struct pci_controller *) bus->sysdata; - unsigned long io_offset; - struct resource *res; - int i; - - io_offset = (unsigned long)hose->io_base_virt - isa_io_base; - if (bus->parent == NULL) { - /* This is a host bridge - fill in its resources */ - hose->bus = bus; - - bus->resource[0] = res = &hose->io_resource; - if (!res->flags) { - if (io_offset) - printk(KERN_ERR "I/O resource not set for host" - " bridge %d\n", hose->index); - res->start = 0; - res->end = IO_SPACE_LIMIT; - res->flags = IORESOURCE_IO; - } - res->start += io_offset; - res->end += io_offset; - - for (i = 0; i < 3; ++i) { - res = &hose->mem_resources[i]; - if (!res->flags) { - if (i > 0) - continue; - printk(KERN_ERR "Memory resource not set for " - "host bridge %d\n", hose->index); - res->start = hose->pci_mem_offset; - res->end = ~0U; - res->flags = IORESOURCE_MEM; - } - bus->resource[i+1] = res; - } - } else { - /* This is a subordinate bridge */ - pci_read_bridge_bases(bus); - - for (i = 0; i < 4; ++i) { - if ((res = bus->resource[i]) == NULL) - continue; - if (!res->flags) - continue; - if (io_offset && (res->flags & IORESOURCE_IO)) { - res->start += io_offset; - res->end += io_offset; - } else if (hose->pci_mem_offset - && (res->flags & IORESOURCE_MEM)) { - res->start += hose->pci_mem_offset; - res->end += hose->pci_mem_offset; - } - } - } - - if (ppc_md.pcibios_fixup_bus) - ppc_md.pcibios_fixup_bus(bus); -} - -char __init *pcibios_setup(char *str) -{ - return str; -} - -/* the next one is stolen from the alpha port... */ -void __init -pcibios_update_irq(struct pci_dev *dev, int irq) -{ - pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); - /* XXX FIXME - update OF device tree node interrupt property */ -} - -int pcibios_enable_device(struct pci_dev *dev, int mask) -{ - u16 cmd, old_cmd; - int idx; - struct resource *r; - - if (ppc_md.pcibios_enable_device_hook) - if (ppc_md.pcibios_enable_device_hook(dev, 0)) - return -EINVAL; - - pci_read_config_word(dev, PCI_COMMAND, &cmd); - old_cmd = cmd; - for (idx=0; idx<6; idx++) { - r = &dev->resource[idx]; - if (r->flags & IORESOURCE_UNSET) { - printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev)); - return -EINVAL; - } - if (r->flags & IORESOURCE_IO) - cmd |= PCI_COMMAND_IO; - if (r->flags & IORESOURCE_MEM) - cmd |= PCI_COMMAND_MEMORY; - } - if (cmd != old_cmd) { - printk("PCI: Enabling device %s (%04x -> %04x)\n", - pci_name(dev), old_cmd, cmd); - pci_write_config_word(dev, PCI_COMMAND, cmd); - } - return 0; -} - -struct pci_controller* -pci_bus_to_hose(int bus) -{ - struct pci_controller* hose = hose_head; - - for (; hose; hose = hose->next) - if (bus >= hose->first_busno && bus <= hose->last_busno) - return hose; - return NULL; -} - -void __iomem * -pci_bus_io_base(unsigned int bus) -{ - struct pci_controller *hose; - - hose = pci_bus_to_hose(bus); - if (!hose) - return NULL; - return hose->io_base_virt; -} - -unsigned long -pci_bus_io_base_phys(unsigned int bus) -{ - struct pci_controller *hose; - - hose = pci_bus_to_hose(bus); - if (!hose) - return 0; - return hose->io_base_phys; -} - -unsigned long -pci_bus_mem_base_phys(unsigned int bus) -{ - struct pci_controller *hose; - - hose = pci_bus_to_hose(bus); - if (!hose) - return 0; - return hose->pci_mem_offset; -} - -unsigned long -pci_resource_to_bus(struct pci_dev *pdev, struct resource *res) -{ - /* Hack alert again ! See comments in chrp_pci.c - */ - struct pci_controller* hose = - (struct pci_controller *)pdev->sysdata; - if (hose && res->flags & IORESOURCE_MEM) - return res->start - hose->pci_mem_offset; - /* We may want to do something with IOs here... */ - return res->start; -} - - -static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, - unsigned long *offset, - enum pci_mmap_state mmap_state) -{ - struct pci_controller *hose = pci_bus_to_hose(dev->bus->number); - unsigned long io_offset = 0; - int i, res_bit; - - if (hose == 0) - return NULL; /* should never happen */ - - /* If memory, add on the PCI bridge address offset */ - if (mmap_state == pci_mmap_mem) { - *offset += hose->pci_mem_offset; - res_bit = IORESOURCE_MEM; - } else { - io_offset = hose->io_base_virt - ___IO_BASE; - *offset += io_offset; - res_bit = IORESOURCE_IO; - } - - /* - * Check that the offset requested corresponds to one of the - * resources of the device. - */ - for (i = 0; i <= PCI_ROM_RESOURCE; i++) { - struct resource *rp = &dev->resource[i]; - int flags = rp->flags; - - /* treat ROM as memory (should be already) */ - if (i == PCI_ROM_RESOURCE) - flags |= IORESOURCE_MEM; - - /* Active and same type? */ - if ((flags & res_bit) == 0) - continue; - - /* In the range of this resource? */ - if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) - continue; - - /* found it! construct the final physical address */ - if (mmap_state == pci_mmap_io) - *offset += hose->io_base_phys - io_offset; - return rp; - } - - return NULL; -} - -/* - * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci - * device mapping. - */ -static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, - pgprot_t protection, - enum pci_mmap_state mmap_state, - int write_combine) -{ - unsigned long prot = pgprot_val(protection); - - /* Write combine is always 0 on non-memory space mappings. On - * memory space, if the user didn't pass 1, we check for a - * "prefetchable" resource. This is a bit hackish, but we use - * this to workaround the inability of /sysfs to provide a write - * combine bit - */ - if (mmap_state != pci_mmap_mem) - write_combine = 0; - else if (write_combine == 0) { - if (rp->flags & IORESOURCE_PREFETCH) - write_combine = 1; - } - - /* XXX would be nice to have a way to ask for write-through */ - prot |= _PAGE_NO_CACHE; - if (write_combine) - prot &= ~_PAGE_GUARDED; - else - prot |= _PAGE_GUARDED; - - printk("PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start, - prot); - - return __pgprot(prot); -} - -/* - * This one is used by /dev/mem and fbdev who have no clue about the - * PCI device, it tries to find the PCI device first and calls the - * above routine - */ -pgprot_t pci_phys_mem_access_prot(struct file *file, - unsigned long pfn, - unsigned long size, - pgprot_t protection) -{ - struct pci_dev *pdev = NULL; - struct resource *found = NULL; - unsigned long prot = pgprot_val(protection); - unsigned long offset = pfn << PAGE_SHIFT; - int i; - - if (page_is_ram(pfn)) - return prot; - - prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; - - for_each_pci_dev(pdev) { - for (i = 0; i <= PCI_ROM_RESOURCE; i++) { - struct resource *rp = &pdev->resource[i]; - int flags = rp->flags; - - /* Active and same type? */ - if ((flags & IORESOURCE_MEM) == 0) - continue; - /* In the range of this resource? */ - if (offset < (rp->start & PAGE_MASK) || - offset > rp->end) - continue; - found = rp; - break; - } - if (found) - break; - } - if (found) { - if (found->flags & IORESOURCE_PREFETCH) - prot &= ~_PAGE_GUARDED; - pci_dev_put(pdev); - } - - DBG("non-PCI map for %lx, prot: %lx\n", offset, prot); - - return __pgprot(prot); -} - - -/* - * Perform the actual remap of the pages for a PCI device mapping, as - * appropriate for this architecture. The region in the process to map - * is described by vm_start and vm_end members of VMA, the base physical - * address is found in vm_pgoff. - * The pci device structure is provided so that architectures may make mapping - * decisions on a per-device or per-bus basis. - * - * Returns a negative error code on failure, zero on success. - */ -int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, - enum pci_mmap_state mmap_state, - int write_combine) -{ - unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; - struct resource *rp; - int ret; - - rp = __pci_mmap_make_offset(dev, &offset, mmap_state); - if (rp == NULL) - return -EINVAL; - - vma->vm_pgoff = offset >> PAGE_SHIFT; - vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO; - vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, - vma->vm_page_prot, - mmap_state, write_combine); - - ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, - vma->vm_end - vma->vm_start, vma->vm_page_prot); - - return ret; -} - -/* Obsolete functions. Should be removed once the symbios driver - * is fixed - */ -unsigned long -phys_to_bus(unsigned long pa) -{ - struct pci_controller *hose; - int i; - - for (hose = hose_head; hose; hose = hose->next) { - for (i = 0; i < 3; ++i) { - if (pa >= hose->mem_resources[i].start - && pa <= hose->mem_resources[i].end) { - /* - * XXX the hose->pci_mem_offset really - * only applies to mem_resources[0]. - * We need a way to store an offset for - * the others. -- paulus - */ - if (i == 0) - pa -= hose->pci_mem_offset; - return pa; - } - } - } - /* hmmm, didn't find it */ - return 0; -} - -unsigned long -pci_phys_to_bus(unsigned long pa, int busnr) -{ - struct pci_controller* hose = pci_bus_to_hose(busnr); - if (!hose) - return pa; - return pa - hose->pci_mem_offset; -} - -unsigned long -pci_bus_to_phys(unsigned int ba, int busnr) -{ - struct pci_controller* hose = pci_bus_to_hose(busnr); - if (!hose) - return ba; - return ba + hose->pci_mem_offset; -} - -/* Provide information on locations of various I/O regions in physical - * memory. Do this on a per-card basis so that we choose the right - * root bridge. - * Note that the returned IO or memory base is a physical address - */ - -long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) -{ - struct pci_controller* hose; - long result = -EOPNOTSUPP; - - /* Argh ! Please forgive me for that hack, but that's the - * simplest way to get existing XFree to not lockup on some - * G5 machines... So when something asks for bus 0 io base - * (bus 0 is HT root), we return the AGP one instead. - */ -#ifdef CONFIG_PPC_PMAC - if (_machine == _MACH_Pmac && machine_is_compatible("MacRISC4")) - if (bus == 0) - bus = 0xf0; -#endif /* CONFIG_PPC_PMAC */ - - hose = pci_bus_to_hose(bus); - if (!hose) - return -ENODEV; - - switch (which) { - case IOBASE_BRIDGE_NUMBER: - return (long)hose->first_busno; - case IOBASE_MEMORY: - return (long)hose->pci_mem_offset; - case IOBASE_IO: - return (long)hose->io_base_phys; - case IOBASE_ISA_IO: - return (long)isa_io_base; - case IOBASE_ISA_MEM: - return (long)isa_mem_base; - } - - return result; -} - -void pci_resource_to_user(const struct pci_dev *dev, int bar, - const struct resource *rsrc, - u64 *start, u64 *end) -{ - struct pci_controller *hose = pci_bus_to_hose(dev->bus->number); - unsigned long offset = 0; - - if (hose == NULL) - return; - - if (rsrc->flags & IORESOURCE_IO) - offset = ___IO_BASE - hose->io_base_virt + hose->io_base_phys; - - *start = rsrc->start + offset; - *end = rsrc->end + offset; -} - -void __init -pci_init_resource(struct resource *res, unsigned long start, unsigned long end, - int flags, char *name) -{ - res->start = start; - res->end = end; - res->flags = flags; - res->name = name; - res->parent = NULL; - res->sibling = NULL; - res->child = NULL; -} - -void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) -{ - unsigned long start = pci_resource_start(dev, bar); - unsigned long len = pci_resource_len(dev, bar); - unsigned long flags = pci_resource_flags(dev, bar); - - if (!len) - return NULL; - if (max && len > max) - len = max; - if (flags & IORESOURCE_IO) - return ioport_map(start, len); - if (flags & IORESOURCE_MEM) - /* Not checking IORESOURCE_CACHEABLE because PPC does - * not currently distinguish between ioremap and - * ioremap_nocache. - */ - return ioremap(start, len); - /* What? */ - return NULL; -} - -void pci_iounmap(struct pci_dev *dev, void __iomem *addr) -{ - /* Nothing to do */ -} -EXPORT_SYMBOL(pci_iomap); -EXPORT_SYMBOL(pci_iounmap); - - -/* - * Null PCI config access functions, for the case when we can't - * find a hose. - */ -#define NULL_PCI_OP(rw, size, type) \ -static int \ -null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \ -{ \ - return PCIBIOS_DEVICE_NOT_FOUND; \ -} - -static int -null_read_config(struct pci_bus *bus, unsigned int devfn, int offset, - int len, u32 *val) -{ - return PCIBIOS_DEVICE_NOT_FOUND; -} - -static int -null_write_config(struct pci_bus *bus, unsigned int devfn, int offset, - int len, u32 val) -{ - return PCIBIOS_DEVICE_NOT_FOUND; -} - -static struct pci_ops null_pci_ops = -{ - null_read_config, - null_write_config -}; - -/* - * These functions are used early on before PCI scanning is done - * and all of the pci_dev and pci_bus structures have been created. - */ -static struct pci_bus * -fake_pci_bus(struct pci_controller *hose, int busnr) -{ - static struct pci_bus bus; - - if (hose == 0) { - hose = pci_bus_to_hose(busnr); - if (hose == 0) - printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr); - } - bus.number = busnr; - bus.sysdata = hose; - bus.ops = hose? hose->ops: &null_pci_ops; - return &bus; -} - -#define EARLY_PCI_OP(rw, size, type) \ -int early_##rw##_config_##size(struct pci_controller *hose, int bus, \ - int devfn, int offset, type value) \ -{ \ - return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \ - devfn, offset, value); \ -} - -EARLY_PCI_OP(read, byte, u8 *) -EARLY_PCI_OP(read, word, u16 *) -EARLY_PCI_OP(read, dword, u32 *) -EARLY_PCI_OP(write, byte, u8) -EARLY_PCI_OP(write, word, u16) -EARLY_PCI_OP(write, dword, u32) diff --git a/arch/ppc/kernel/perfmon_fsl_booke.c b/arch/ppc/kernel/perfmon_fsl_booke.c deleted file mode 100644 index 32455dfcc36..00000000000 --- a/arch/ppc/kernel/perfmon_fsl_booke.c +++ /dev/null @@ -1,222 +0,0 @@ -/* kernel/perfmon_fsl_booke.c - * Freescale Book-E Performance Monitor code - * - * Author: Andy Fleming - * Copyright (c) 2004 Freescale Semiconductor, Inc - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/stddef.h> -#include <linux/unistd.h> -#include <linux/ptrace.h> -#include <linux/slab.h> -#include <linux/user.h> -#include <linux/a.out.h> -#include <linux/interrupt.h> -#include <linux/config.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/prctl.h> - -#include <asm/pgtable.h> -#include <asm/uaccess.h> -#include <asm/system.h> -#include <asm/io.h> -#include <asm/reg.h> -#include <asm/xmon.h> -#include <asm/pmc.h> - -static inline u32 get_pmlca(int ctr); -static inline void set_pmlca(int ctr, u32 pmlca); - -static inline u32 get_pmlca(int ctr) -{ - u32 pmlca; - - switch (ctr) { - case 0: - pmlca = mfpmr(PMRN_PMLCA0); - break; - case 1: - pmlca = mfpmr(PMRN_PMLCA1); - break; - case 2: - pmlca = mfpmr(PMRN_PMLCA2); - break; - case 3: - pmlca = mfpmr(PMRN_PMLCA3); - break; - default: - panic("Bad ctr number\n"); - } - - return pmlca; -} - -static inline void set_pmlca(int ctr, u32 pmlca) -{ - switch (ctr) { - case 0: - mtpmr(PMRN_PMLCA0, pmlca); - break; - case 1: - mtpmr(PMRN_PMLCA1, pmlca); - break; - case 2: - mtpmr(PMRN_PMLCA2, pmlca); - break; - case 3: - mtpmr(PMRN_PMLCA3, pmlca); - break; - default: - panic("Bad ctr number\n"); - } -} - -void init_pmc_stop(int ctr) -{ - u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU | - PMLCA_FCM1 | PMLCA_FCM0); - u32 pmlcb = 0; - - switch (ctr) { - case 0: - mtpmr(PMRN_PMLCA0, pmlca); - mtpmr(PMRN_PMLCB0, pmlcb); - break; - case 1: - mtpmr(PMRN_PMLCA1, pmlca); - mtpmr(PMRN_PMLCB1, pmlcb); - break; - case 2: - mtpmr(PMRN_PMLCA2, pmlca); - mtpmr(PMRN_PMLCB2, pmlcb); - break; - case 3: - mtpmr(PMRN_PMLCA3, pmlca); - mtpmr(PMRN_PMLCB3, pmlcb); - break; - default: - panic("Bad ctr number!\n"); - } -} - -void set_pmc_event(int ctr, int event) -{ - u32 pmlca; - - pmlca = get_pmlca(ctr); - - pmlca = (pmlca & ~PMLCA_EVENT_MASK) | - ((event << PMLCA_EVENT_SHIFT) & - PMLCA_EVENT_MASK); - - set_pmlca(ctr, pmlca); -} - -void set_pmc_user_kernel(int ctr, int user, int kernel) -{ - u32 pmlca; - - pmlca = get_pmlca(ctr); - - if(user) - pmlca &= ~PMLCA_FCU; - else - pmlca |= PMLCA_FCU; - - if(kernel) - pmlca &= ~PMLCA_FCS; - else - pmlca |= PMLCA_FCS; - - set_pmlca(ctr, pmlca); -} - -void set_pmc_marked(int ctr, int mark0, int mark1) -{ - u32 pmlca = get_pmlca(ctr); - - if(mark0) - pmlca &= ~PMLCA_FCM0; - else - pmlca |= PMLCA_FCM0; - - if(mark1) - pmlca &= ~PMLCA_FCM1; - else - pmlca |= PMLCA_FCM1; - - set_pmlca(ctr, pmlca); -} - -void pmc_start_ctr(int ctr, int enable) -{ - u32 pmlca = get_pmlca(ctr); - - pmlca &= ~PMLCA_FC; - - if (enable) - pmlca |= PMLCA_CE; - else - pmlca &= ~PMLCA_CE; - - set_pmlca(ctr, pmlca); -} - -void pmc_start_ctrs(int enable) -{ - u32 pmgc0 = mfpmr(PMRN_PMGC0); - - pmgc0 &= ~PMGC0_FAC; - pmgc0 |= PMGC0_FCECE; - - if (enable) - pmgc0 |= PMGC0_PMIE; - else - pmgc0 &= ~PMGC0_PMIE; - - mtpmr(PMRN_PMGC0, pmgc0); -} - -void pmc_stop_ctrs(void) -{ - u32 pmgc0 = mfpmr(PMRN_PMGC0); - - pmgc0 |= PMGC0_FAC; - - pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE); - - mtpmr(PMRN_PMGC0, pmgc0); -} - -void dump_pmcs(void) -{ - printk("pmgc0: %x\n", mfpmr(PMRN_PMGC0)); - printk("pmc\t\tpmlca\t\tpmlcb\n"); - printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC0), - mfpmr(PMRN_PMLCA0), mfpmr(PMRN_PMLCB0)); - printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC1), - mfpmr(PMRN_PMLCA1), mfpmr(PMRN_PMLCB1)); - printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC2), - mfpmr(PMRN_PMLCA2), mfpmr(PMRN_PMLCB2)); - printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC3), - mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3)); -} - -EXPORT_SYMBOL(init_pmc_stop); -EXPORT_SYMBOL(set_pmc_event); -EXPORT_SYMBOL(set_pmc_user_kernel); -EXPORT_SYMBOL(set_pmc_marked); -EXPORT_SYMBOL(pmc_start_ctr); -EXPORT_SYMBOL(pmc_start_ctrs); -EXPORT_SYMBOL(pmc_stop_ctrs); -EXPORT_SYMBOL(dump_pmcs); diff --git a/arch/ppc/kernel/ppc-stub.c b/arch/ppc/kernel/ppc-stub.c deleted file mode 100644 index d61889c2404..00000000000 --- a/arch/ppc/kernel/ppc-stub.c +++ /dev/null @@ -1,867 +0,0 @@ -/* - * ppc-stub.c: KGDB support for the Linux kernel. - * - * adapted from arch/sparc/kernel/sparc-stub.c for the PowerPC - * some stuff borrowed from Paul Mackerras' xmon - * Copyright (C) 1998 Michael AK Tesch (tesch@cs.wisc.edu) - * - * Modifications to run under Linux - * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) - * - * This file originally came from the gdb sources, and the - * copyright notices have been retained below. - */ - -/**************************************************************************** - - THIS SOFTWARE IS NOT COPYRIGHTED - - HP offers the following for use in the public domain. HP makes no - warranty with regard to the software or its performance and the - user accepts the software "AS IS" with all faults. - - HP DISCLAIMS ANY WARRANTIES, EXPRESS OR IMPLIED, WITH REGARD - TO THIS SOFTWARE INCLUDING BUT NOT LIMITED TO THE WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. - -****************************************************************************/ - -/**************************************************************************** - * Header: remcom.c,v 1.34 91/03/09 12:29:49 glenne Exp $ - * - * Module name: remcom.c $ - * Revision: 1.34 $ - * Date: 91/03/09 12:29:49 $ - * Contributor: Lake Stevens Instrument Division$ - * - * Description: low level support for gdb debugger. $ - * - * Considerations: only works on target hardware $ - * - * Written by: Glenn Engel $ - * ModuleState: Experimental $ - * - * NOTES: See Below $ - * - * Modified for SPARC by Stu Grossman, Cygnus Support. - * - * This code has been extensively tested on the Fujitsu SPARClite demo board. - * - * To enable debugger support, two things need to happen. One, a - * call to set_debug_traps() is necessary in order to allow any breakpoints - * or error conditions to be properly intercepted and reported to gdb. - * Two, a breakpoint needs to be generated to begin communication. This - * is most easily accomplished by a call to breakpoint(). Breakpoint() - * simulates a breakpoint by executing a trap #1. - * - ************* - * - * The following gdb commands are supported: - * - * command function Return value - * - * g return the value of the CPU registers hex data or ENN - * G set the value of the CPU registers OK or ENN - * qOffsets Get section offsets. Reply is Text=xxx;Data=yyy;Bss=zzz - * - * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN - * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN - * - * c Resume at current address SNN ( signal NN) - * cAA..AA Continue at address AA..AA SNN - * - * s Step one instruction SNN - * sAA..AA Step one instruction from AA..AA SNN - * - * k kill - * - * ? What was the last sigval ? SNN (signal NN) - * - * bBB..BB Set baud rate to BB..BB OK or BNN, then sets - * baud rate - * - * All commands and responses are sent with a packet which includes a - * checksum. A packet consists of - * - * $<packet info>#<checksum>. - * - * where - * <packet info> :: <characters representing the command or response> - * <checksum> :: <two hex digits computed as modulo 256 sum of <packetinfo>> - * - * When a packet is received, it is first acknowledged with either '+' or '-'. - * '+' indicates a successful transfer. '-' indicates a failed transfer. - * - * Example: - * - * Host: Reply: - * $m0,10#2a +$00010203040506070809101112131415#42 - * - ****************************************************************************/ - -#include <linux/config.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/smp.h> -#include <linux/smp_lock.h> -#include <linux/init.h> -#include <linux/sysrq.h> - -#include <asm/cacheflush.h> -#include <asm/system.h> -#include <asm/signal.h> -#include <asm/kgdb.h> -#include <asm/pgtable.h> -#include <asm/ptrace.h> - -void breakinst(void); - -/* - * BUFMAX defines the maximum number of characters in inbound/outbound buffers - * at least NUMREGBYTES*2 are needed for register packets - */ -#define BUFMAX 2048 -static char remcomInBuffer[BUFMAX]; -static char remcomOutBuffer[BUFMAX]; - -static int initialized; -static int kgdb_active; -static int kgdb_started; -static u_int fault_jmp_buf[100]; -static int kdebug; - - -static const char hexchars[]="0123456789abcdef"; - -/* Place where we save old trap entries for restoration - sparc*/ -/* struct tt_entry kgdb_savettable[256]; */ -/* typedef void (*trapfunc_t)(void); */ - -static void kgdb_fault_handler(struct pt_regs *regs); -static int handle_exception (struct pt_regs *regs); - -#if 0 -/* Install an exception handler for kgdb */ -static void exceptionHandler(int tnum, unsigned int *tfunc) -{ - /* We are dorking with a live trap table, all irqs off */ -} -#endif - -int -kgdb_setjmp(long *buf) -{ - asm ("mflr 0; stw 0,0(%0);" - "stw 1,4(%0); stw 2,8(%0);" - "mfcr 0; stw 0,12(%0);" - "stmw 13,16(%0)" - : : "r" (buf)); - /* XXX should save fp regs as well */ - return 0; -} -void -kgdb_longjmp(long *buf, int val) -{ - if (val == 0) - val = 1; - asm ("lmw 13,16(%0);" - "lwz 0,12(%0); mtcrf 0x38,0;" - "lwz 0,0(%0); lwz 1,4(%0); lwz 2,8(%0);" - "mtlr 0; mr 3,%1" - : : "r" (buf), "r" (val)); -} -/* Convert ch from a hex digit to an int */ -static int -hex(unsigned char ch) -{ - if (ch >= 'a' && ch <= 'f') - return ch-'a'+10; - if (ch >= '0' && ch <= '9') - return ch-'0'; - if (ch >= 'A' && ch <= 'F') - return ch-'A'+10; - return -1; -} - -/* Convert the memory pointed to by mem into hex, placing result in buf. - * Return a pointer to the last char put in buf (null), in case of mem fault, - * return 0. - */ -static unsigned char * -mem2hex(const char *mem, char *buf, int count) -{ - unsigned char ch; - unsigned short tmp_s; - unsigned long tmp_l; - - if (kgdb_setjmp((long*)fault_jmp_buf) == 0) { - debugger_fault_handler = kgdb_fault_handler; - - /* Accessing 16 bit and 32 bit objects in a single - ** load instruction is required to avoid bad side - ** effects for some IO registers. - */ - - if ((count == 2) && (((long)mem & 1) == 0)) { - tmp_s = *(unsigned short *)mem; - mem += 2; - *buf++ = hexchars[(tmp_s >> 12) & 0xf]; - *buf++ = hexchars[(tmp_s >> 8) & 0xf]; - *buf++ = hexchars[(tmp_s >> 4) & 0xf]; - *buf++ = hexchars[tmp_s & 0xf]; - - } else if ((count == 4) && (((long)mem & 3) == 0)) { - tmp_l = *(unsigned int *)mem; - mem += 4; - *buf++ = hexchars[(tmp_l >> 28) & 0xf]; - *buf++ = hexchars[(tmp_l >> 24) & 0xf]; - *buf++ = hexchars[(tmp_l >> 20) & 0xf]; - *buf++ = hexchars[(tmp_l >> 16) & 0xf]; - *buf++ = hexchars[(tmp_l >> 12) & 0xf]; - *buf++ = hexchars[(tmp_l >> 8) & 0xf]; - *buf++ = hexchars[(tmp_l >> 4) & 0xf]; - *buf++ = hexchars[tmp_l & 0xf]; - - } else { - while (count-- > 0) { - ch = *mem++; - *buf++ = hexchars[ch >> 4]; - *buf++ = hexchars[ch & 0xf]; - } - } - - } else { - /* error condition */ - } - debugger_fault_handler = NULL; - *buf = 0; - return buf; -} - -/* convert the hex array pointed to by buf into binary to be placed in mem - * return a pointer to the character AFTER the last byte written. -*/ -static char * -hex2mem(char *buf, char *mem, int count) -{ - unsigned char ch; - int i; - char *orig_mem; - unsigned short tmp_s; - unsigned long tmp_l; - - orig_mem = mem; - - if (kgdb_setjmp((long*)fault_jmp_buf) == 0) { - debugger_fault_handler = kgdb_fault_handler; - - /* Accessing 16 bit and 32 bit objects in a single - ** store instruction is required to avoid bad side - ** effects for some IO registers. - */ - - if ((count == 2) && (((long)mem & 1) == 0)) { - tmp_s = hex(*buf++) << 12; - tmp_s |= hex(*buf++) << 8; - tmp_s |= hex(*buf++) << 4; - tmp_s |= hex(*buf++); - - *(unsigned short *)mem = tmp_s; - mem += 2; - - } else if ((count == 4) && (((long)mem & 3) == 0)) { - tmp_l = hex(*buf++) << 28; - tmp_l |= hex(*buf++) << 24; - tmp_l |= hex(*buf++) << 20; - tmp_l |= hex(*buf++) << 16; - tmp_l |= hex(*buf++) << 12; - tmp_l |= hex(*buf++) << 8; - tmp_l |= hex(*buf++) << 4; - tmp_l |= hex(*buf++); - - *(unsigned long *)mem = tmp_l; - mem += 4; - - } else { - for (i=0; i<count; i++) { - ch = hex(*buf++) << 4; - ch |= hex(*buf++); - *mem++ = ch; - } - } - - - /* - ** Flush the data cache, invalidate the instruction cache. - */ - flush_icache_range((int)orig_mem, (int)orig_mem + count - 1); - - } else { - /* error condition */ - } - debugger_fault_handler = NULL; - return mem; -} - -/* - * While we find nice hex chars, build an int. - * Return number of chars processed. - */ -static int -hexToInt(char **ptr, int *intValue) -{ - int numChars = 0; - int hexValue; - - *intValue = 0; - - if (kgdb_setjmp((long*)fault_jmp_buf) == 0) { - debugger_fault_handler = kgdb_fault_handler; - while (**ptr) { - hexValue = hex(**ptr); - if (hexValue < 0) - break; - - *intValue = (*intValue << 4) | hexValue; - numChars ++; - - (*ptr)++; - } - } else { - /* error condition */ - } - debugger_fault_handler = NULL; - - return (numChars); -} - -/* scan for the sequence $<data>#<checksum> */ -static void -getpacket(char *buffer) -{ - unsigned char checksum; - unsigned char xmitcsum; - int i; - int count; - unsigned char ch; - - do { - /* wait around for the start character, ignore all other - * characters */ - while ((ch = (getDebugChar() & 0x7f)) != '$') ; - - checksum = 0; - xmitcsum = -1; - - count = 0; - - /* now, read until a # or end of buffer is found */ - while (count < BUFMAX) { - ch = getDebugChar() & 0x7f; - if (ch == '#') - break; - checksum = checksum + ch; - buffer[count] = ch; - count = count + 1; - } - - if (count >= BUFMAX) - continue; - - buffer[count] = 0; - - if (ch == '#') { - xmitcsum = hex(getDebugChar() & 0x7f) << 4; - xmitcsum |= hex(getDebugChar() & 0x7f); - if (checksum != xmitcsum) - putDebugChar('-'); /* failed checksum */ - else { - putDebugChar('+'); /* successful transfer */ - /* if a sequence char is present, reply the ID */ - if (buffer[2] == ':') { - putDebugChar(buffer[0]); - putDebugChar(buffer[1]); - /* remove sequence chars from buffer */ - count = strlen(buffer); - for (i=3; i <= count; i++) - buffer[i-3] = buffer[i]; - } - } - } - } while (checksum != xmitcsum); -} - -/* send the packet in buffer. */ -static void putpacket(unsigned char *buffer) -{ - unsigned char checksum; - int count; - unsigned char ch, recv; - - /* $<packet info>#<checksum>. */ - do { - putDebugChar('$'); - checksum = 0; - count = 0; - - while ((ch = buffer[count])) { - putDebugChar(ch); - checksum += ch; - count += 1; - } - - putDebugChar('#'); - putDebugChar(hexchars[checksum >> 4]); - putDebugChar(hexchars[checksum & 0xf]); - recv = getDebugChar(); - } while ((recv & 0x7f) != '+'); -} - -static void kgdb_flush_cache_all(void) -{ - flush_instruction_cache(); -} - -/* Set up exception handlers for tracing and breakpoints - * [could be called kgdb_init()] - */ -void set_debug_traps(void) -{ -#if 0 - unsigned char c; - - save_and_cli(flags); - - /* In case GDB is started before us, ack any packets (presumably - * "$?#xx") sitting there. - * - * I've found this code causes more problems than it solves, - * so that's why it's commented out. GDB seems to work fine - * now starting either before or after the kernel -bwb - */ - - while((c = getDebugChar()) != '$'); - while((c = getDebugChar()) != '#'); - c = getDebugChar(); /* eat first csum byte */ - c = getDebugChar(); /* eat second csum byte */ - putDebugChar('+'); /* ack it */ -#endif - debugger = kgdb; - debugger_bpt = kgdb_bpt; - debugger_sstep = kgdb_sstep; - debugger_iabr_match = kgdb_iabr_match; - debugger_dabr_match = kgdb_dabr_match; - - initialized = 1; -} - -static void kgdb_fault_handler(struct pt_regs *regs) -{ - kgdb_longjmp((long*)fault_jmp_buf, 1); -} - -int kgdb_bpt(struct pt_regs *regs) -{ - return handle_exception(regs); -} - -int kgdb_sstep(struct pt_regs *regs) -{ - return handle_exception(regs); -} - -void kgdb(struct pt_regs *regs) -{ - handle_exception(regs); -} - -int kgdb_iabr_match(struct pt_regs *regs) -{ - printk(KERN_ERR "kgdb doesn't support iabr, what?!?\n"); - return handle_exception(regs); -} - -int kgdb_dabr_match(struct pt_regs *regs) -{ - printk(KERN_ERR "kgdb doesn't support dabr, what?!?\n"); - return handle_exception(regs); -} - -/* Convert the hardware trap type code to a unix signal number. */ -/* - * This table contains the mapping between PowerPC hardware trap types, and - * signals, which are primarily what GDB understands. - */ -static struct hard_trap_info -{ - unsigned int tt; /* Trap type code for powerpc */ - unsigned char signo; /* Signal that we map this trap into */ -} hard_trap_info[] = { -#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) - { 0x100, SIGINT }, /* critical input interrupt */ - { 0x200, SIGSEGV }, /* machine check */ - { 0x300, SIGSEGV }, /* data storage */ - { 0x400, SIGBUS }, /* instruction storage */ - { 0x500, SIGINT }, /* interrupt */ - { 0x600, SIGBUS }, /* alignment */ - { 0x700, SIGILL }, /* program */ - { 0x800, SIGILL }, /* reserved */ - { 0x900, SIGILL }, /* reserved */ - { 0xa00, SIGILL }, /* reserved */ - { 0xb00, SIGILL }, /* reserved */ - { 0xc00, SIGCHLD }, /* syscall */ - { 0xd00, SIGILL }, /* reserved */ - { 0xe00, SIGILL }, /* reserved */ - { 0xf00, SIGILL }, /* reserved */ - /* - ** 0x1000 PIT - ** 0x1010 FIT - ** 0x1020 watchdog - ** 0x1100 data TLB miss - ** 0x1200 instruction TLB miss - */ - { 0x2002, SIGTRAP}, /* debug */ -#else - { 0x200, SIGSEGV }, /* machine check */ - { 0x300, SIGSEGV }, /* address error (store) */ - { 0x400, SIGBUS }, /* instruction bus error */ - { 0x500, SIGINT }, /* interrupt */ - { 0x600, SIGBUS }, /* alingment */ - { 0x700, SIGTRAP }, /* breakpoint trap */ - { 0x800, SIGFPE }, /* fpu unavail */ - { 0x900, SIGALRM }, /* decrementer */ - { 0xa00, SIGILL }, /* reserved */ - { 0xb00, SIGILL }, /* reserved */ - { 0xc00, SIGCHLD }, /* syscall */ - { 0xd00, SIGTRAP }, /* single-step/watch */ - { 0xe00, SIGFPE }, /* fp assist */ -#endif - { 0, 0} /* Must be last */ - -}; - -static int computeSignal(unsigned int tt) -{ - struct hard_trap_info *ht; - - for (ht = hard_trap_info; ht->tt && ht->signo; ht++) - if (ht->tt == tt) - return ht->signo; - - return SIGHUP; /* default for things we don't know about */ -} - -#define PC_REGNUM 64 -#define SP_REGNUM 1 - -/* - * This function does all command processing for interfacing to gdb. - */ -static int -handle_exception (struct pt_regs *regs) -{ - int sigval; - int addr; - int length; - char *ptr; - unsigned int msr; - - /* We don't handle user-mode breakpoints. */ - if (user_mode(regs)) - return 0; - - if (debugger_fault_handler) { - debugger_fault_handler(regs); - panic("kgdb longjump failed!\n"); - } - if (kgdb_active) { - printk(KERN_ERR "interrupt while in kgdb, returning\n"); - return 0; - } - - kgdb_active = 1; - kgdb_started = 1; - -#ifdef KGDB_DEBUG - printk("kgdb: entering handle_exception; trap [0x%x]\n", - (unsigned int)regs->trap); -#endif - - kgdb_interruptible(0); - lock_kernel(); - msr = mfmsr(); - mtmsr(msr & ~MSR_EE); /* disable interrupts */ - - if (regs->nip == (unsigned long)breakinst) { - /* Skip over breakpoint trap insn */ - regs->nip += 4; - } - - /* reply to host that an exception has occurred */ - sigval = computeSignal(regs->trap); - ptr = remcomOutBuffer; - - *ptr++ = 'T'; - *ptr++ = hexchars[sigval >> 4]; - *ptr++ = hexchars[sigval & 0xf]; - *ptr++ = hexchars[PC_REGNUM >> 4]; - *ptr++ = hexchars[PC_REGNUM & 0xf]; - *ptr++ = ':'; - ptr = mem2hex((char *)®s->nip, ptr, 4); - *ptr++ = ';'; - *ptr++ = hexchars[SP_REGNUM >> 4]; - *ptr++ = hexchars[SP_REGNUM & 0xf]; - *ptr++ = ':'; - ptr = mem2hex(((char *)regs) + SP_REGNUM*4, ptr, 4); - *ptr++ = ';'; - *ptr++ = 0; - - putpacket(remcomOutBuffer); - if (kdebug) - printk("remcomOutBuffer: %s\n", remcomOutBuffer); - - /* XXX We may want to add some features dealing with poking the - * XXX page tables, ... (look at sparc-stub.c for more info) - * XXX also required hacking to the gdb sources directly... - */ - - while (1) { - remcomOutBuffer[0] = 0; - - getpacket(remcomInBuffer); - switch (remcomInBuffer[0]) { - case '?': /* report most recent signal */ - remcomOutBuffer[0] = 'S'; - remcomOutBuffer[1] = hexchars[sigval >> 4]; - remcomOutBuffer[2] = hexchars[sigval & 0xf]; - remcomOutBuffer[3] = 0; - break; -#if 0 - case 'q': /* this screws up gdb for some reason...*/ - { - extern long _start, sdata, __bss_start; - - ptr = &remcomInBuffer[1]; - if (strncmp(ptr, "Offsets", 7) != 0) - break; - - ptr = remcomOutBuffer; - sprintf(ptr, "Text=%8.8x;Data=%8.8x;Bss=%8.8x", - &_start, &sdata, &__bss_start); - break; - } -#endif - case 'd': - /* toggle debug flag */ - kdebug ^= 1; - break; - - case 'g': /* return the value of the CPU registers. - * some of them are non-PowerPC names :( - * they are stored in gdb like: - * struct { - * u32 gpr[32]; - * f64 fpr[32]; - * u32 pc, ps, cnd, lr; (ps=msr) - * u32 cnt, xer, mq; - * } - */ - { - int i; - ptr = remcomOutBuffer; - /* General Purpose Regs */ - ptr = mem2hex((char *)regs, ptr, 32 * 4); - /* Floating Point Regs - FIXME */ - /*ptr = mem2hex((char *), ptr, 32 * 8);*/ - for(i=0; i<(32*8*2); i++) { /* 2chars/byte */ - ptr[i] = '0'; - } - ptr += 32*8*2; - /* pc, msr, cr, lr, ctr, xer, (mq is unused) */ - ptr = mem2hex((char *)®s->nip, ptr, 4); - ptr = mem2hex((char *)®s->msr, ptr, 4); - ptr = mem2hex((char *)®s->ccr, ptr, 4); - ptr = mem2hex((char *)®s->link, ptr, 4); - ptr = mem2hex((char *)®s->ctr, ptr, 4); - ptr = mem2hex((char *)®s->xer, ptr, 4); - } - break; - - case 'G': /* set the value of the CPU registers */ - { - ptr = &remcomInBuffer[1]; - - /* - * If the stack pointer has moved, you should pray. - * (cause only god can help you). - */ - - /* General Purpose Regs */ - hex2mem(ptr, (char *)regs, 32 * 4); - - /* Floating Point Regs - FIXME?? */ - /*ptr = hex2mem(ptr, ??, 32 * 8);*/ - ptr += 32*8*2; - - /* pc, msr, cr, lr, ctr, xer, (mq is unused) */ - ptr = hex2mem(ptr, (char *)®s->nip, 4); - ptr = hex2mem(ptr, (char *)®s->msr, 4); - ptr = hex2mem(ptr, (char *)®s->ccr, 4); - ptr = hex2mem(ptr, (char *)®s->link, 4); - ptr = hex2mem(ptr, (char *)®s->ctr, 4); - ptr = hex2mem(ptr, (char *)®s->xer, 4); - - strcpy(remcomOutBuffer,"OK"); - } - break; - case 'H': - /* don't do anything, yet, just acknowledge */ - hexToInt(&ptr, &addr); - strcpy(remcomOutBuffer,"OK"); - break; - - case 'm': /* mAA..AA,LLLL Read LLLL bytes at address AA..AA */ - /* Try to read %x,%x. */ - - ptr = &remcomInBuffer[1]; - - if (hexToInt(&ptr, &addr) && *ptr++ == ',' - && hexToInt(&ptr, &length)) { - if (mem2hex((char *)addr, remcomOutBuffer, - length)) - break; - strcpy(remcomOutBuffer, "E03"); - } else - strcpy(remcomOutBuffer, "E01"); - break; - - case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK */ - /* Try to read '%x,%x:'. */ - - ptr = &remcomInBuffer[1]; - - if (hexToInt(&ptr, &addr) && *ptr++ == ',' - && hexToInt(&ptr, &length) - && *ptr++ == ':') { - if (hex2mem(ptr, (char *)addr, length)) - strcpy(remcomOutBuffer, "OK"); - else - strcpy(remcomOutBuffer, "E03"); - flush_icache_range(addr, addr+length); - } else - strcpy(remcomOutBuffer, "E02"); - break; - - - case 'k': /* kill the program, actually just continue */ - case 'c': /* cAA..AA Continue; address AA..AA optional */ - /* try to read optional parameter, pc unchanged if no parm */ - - ptr = &remcomInBuffer[1]; - if (hexToInt(&ptr, &addr)) - regs->nip = addr; - -/* Need to flush the instruction cache here, as we may have deposited a - * breakpoint, and the icache probably has no way of knowing that a data ref to - * some location may have changed something that is in the instruction cache. - */ - kgdb_flush_cache_all(); - mtmsr(msr); - - kgdb_interruptible(1); - unlock_kernel(); - kgdb_active = 0; - if (kdebug) { - printk("remcomInBuffer: %s\n", remcomInBuffer); - printk("remcomOutBuffer: %s\n", remcomOutBuffer); - } - return 1; - - case 's': - kgdb_flush_cache_all(); -#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) - mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC); - regs->msr |= MSR_DE; -#else - regs->msr |= MSR_SE; -#endif - unlock_kernel(); - kgdb_active = 0; - if (kdebug) { - printk("remcomInBuffer: %s\n", remcomInBuffer); - printk("remcomOutBuffer: %s\n", remcomOutBuffer); - } - return 1; - - case 'r': /* Reset (if user process..exit ???)*/ - panic("kgdb reset."); - break; - } /* switch */ - if (remcomOutBuffer[0] && kdebug) { - printk("remcomInBuffer: %s\n", remcomInBuffer); - printk("remcomOutBuffer: %s\n", remcomOutBuffer); - } - /* reply to the request */ - putpacket(remcomOutBuffer); - } /* while(1) */ -} - -/* This function will generate a breakpoint exception. It is used at the - beginning of a program to sync up with a debugger and can be used - otherwise as a quick means to stop program execution and "break" into - the debugger. */ - -void -breakpoint(void) -{ - if (!initialized) { - printk("breakpoint() called b4 kgdb init\n"); - return; - } - - asm(" .globl breakinst \n\ - breakinst: .long 0x7d821008"); -} - -#ifdef CONFIG_KGDB_CONSOLE -/* Output string in GDB O-packet format if GDB has connected. If nothing - output, returns 0 (caller must then handle output). */ -int -kgdb_output_string (const char* s, unsigned int count) -{ - char buffer[512]; - - if (!kgdb_started) - return 0; - - count = (count <= (sizeof(buffer) / 2 - 2)) - ? count : (sizeof(buffer) / 2 - 2); - - buffer[0] = 'O'; - mem2hex (s, &buffer[1], count); - putpacket(buffer); - - return 1; -} -#endif - -static void sysrq_handle_gdb(int key, struct pt_regs *pt_regs, - struct tty_struct *tty) -{ - printk("Entering GDB stub\n"); - breakpoint(); -} -static struct sysrq_key_op sysrq_gdb_op = { - .handler = sysrq_handle_gdb, - .help_msg = "Gdb", - .action_msg = "GDB", -}; - -static int gdb_register_sysrq(void) -{ - printk("Registering GDB sysrq handler\n"); - register_sysrq_key('g', &sysrq_gdb_op); - return 0; -} -module_init(gdb_register_sysrq); diff --git a/arch/ppc/kernel/ppc_htab.c b/arch/ppc/kernel/ppc_htab.c deleted file mode 100644 index ca810025993..00000000000 --- a/arch/ppc/kernel/ppc_htab.c +++ /dev/null @@ -1,467 +0,0 @@ -/* - * PowerPC hash table management proc entry. Will show information - * about the current hash table and will allow changes to it. - * - * Written by Cort Dougan (cort@cs.nmt.edu) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/config.h> -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/proc_fs.h> -#include <linux/stat.h> -#include <linux/sysctl.h> -#include <linux/ctype.h> -#include <linux/threads.h> -#include <linux/smp_lock.h> -#include <linux/seq_file.h> -#include <linux/init.h> -#include <linux/bitops.h> - -#include <asm/uaccess.h> -#include <asm/mmu.h> -#include <asm/residual.h> -#include <asm/io.h> -#include <asm/pgtable.h> -#include <asm/cputable.h> -#include <asm/system.h> -#include <asm/reg.h> - -static int ppc_htab_show(struct seq_file *m, void *v); -static ssize_t ppc_htab_write(struct file * file, const char __user * buffer, - size_t count, loff_t *ppos); -extern PTE *Hash, *Hash_end; -extern unsigned long Hash_size, Hash_mask; -extern unsigned long _SDR1; -extern unsigned long htab_reloads; -extern unsigned long htab_preloads; -extern unsigned long htab_evicts; -extern unsigned long pte_misses; -extern unsigned long pte_errors; -extern unsigned int primary_pteg_full; -extern unsigned int htab_hash_searches; - -static int ppc_htab_open(struct inode *inode, struct file *file) -{ - return single_open(file, ppc_htab_show, NULL); -} - -struct file_operations ppc_htab_operations = { - .open = ppc_htab_open, - .read = seq_read, - .llseek = seq_lseek, - .write = ppc_htab_write, - .release = single_release, -}; - -static char *pmc1_lookup(unsigned long mmcr0) -{ - switch ( mmcr0 & (0x7f<<7) ) - { - case 0x0: - return "none"; - case MMCR0_PMC1_CYCLES: - return "cycles"; - case MMCR0_PMC1_ICACHEMISS: - return "ic miss"; - case MMCR0_PMC1_DTLB: - return "dtlb miss"; - default: - return "unknown"; - } -} - -static char *pmc2_lookup(unsigned long mmcr0) -{ - switch ( mmcr0 & 0x3f ) - { - case 0x0: - return "none"; - case MMCR0_PMC2_CYCLES: - return "cycles"; - case MMCR0_PMC2_DCACHEMISS: - return "dc miss"; - case MMCR0_PMC2_ITLB: - return "itlb miss"; - case MMCR0_PMC2_LOADMISSTIME: - return "load miss time"; - default: - return "unknown"; - } -} - -/* - * print some useful info about the hash table. This function - * is _REALLY_ slow (see the nested for loops below) but nothing - * in here should be really timing critical. -- Cort - */ -static int ppc_htab_show(struct seq_file *m, void *v) -{ - unsigned long mmcr0 = 0, pmc1 = 0, pmc2 = 0; -#if defined(CONFIG_PPC_STD_MMU) && !defined(CONFIG_PPC64BRIDGE) - unsigned int kptes = 0, uptes = 0; - PTE *ptr; -#endif /* CONFIG_PPC_STD_MMU */ - - if (cpu_has_feature(CPU_FTR_604_PERF_MON)) { - mmcr0 = mfspr(SPRN_MMCR0); - pmc1 = mfspr(SPRN_PMC1); - pmc2 = mfspr(SPRN_PMC2); - seq_printf(m, - "604 Performance Monitoring\n" - "MMCR0\t\t: %08lx %s%s ", - mmcr0, - ( mmcr0>>28 & 0x2 ) ? "(user mode counted)" : "", - ( mmcr0>>28 & 0x4 ) ? "(kernel mode counted)" : ""); - seq_printf(m, - "\nPMC1\t\t: %08lx (%s)\n" - "PMC2\t\t: %08lx (%s)\n", - pmc1, pmc1_lookup(mmcr0), - pmc2, pmc2_lookup(mmcr0)); - } - -#ifdef CONFIG_PPC_STD_MMU - /* if we don't have a htab */ - if ( Hash_size == 0 ) { - seq_printf(m, "No Hash Table used\n"); - return 0; - } - -#ifndef CONFIG_PPC64BRIDGE - for (ptr = Hash; ptr < Hash_end; ptr++) { - unsigned int mctx, vsid; - - if (!ptr->v) - continue; - /* undo the esid skew */ - vsid = ptr->vsid; - mctx = ((vsid - (vsid & 0xf) * 0x111) >> 4) & 0xfffff; - if (mctx == 0) - kptes++; - else - uptes++; - } -#endif - - seq_printf(m, - "PTE Hash Table Information\n" - "Size\t\t: %luKb\n" - "Buckets\t\t: %lu\n" - "Address\t\t: %08lx\n" - "Entries\t\t: %lu\n" -#ifndef CONFIG_PPC64BRIDGE - "User ptes\t: %u\n" - "Kernel ptes\t: %u\n" - "Percent full\t: %lu%%\n" -#endif - , (unsigned long)(Hash_size>>10), - (Hash_size/(sizeof(PTE)*8)), - (unsigned long)Hash, - Hash_size/sizeof(PTE) -#ifndef CONFIG_PPC64BRIDGE - , uptes, - kptes, - ((kptes+uptes)*100) / (Hash_size/sizeof(PTE)) -#endif - ); - - seq_printf(m, - "Reloads\t\t: %lu\n" - "Preloads\t: %lu\n" - "Searches\t: %u\n" - "Overflows\t: %u\n" - "Evicts\t\t: %lu\n", - htab_reloads, htab_preloads, htab_hash_searches, - primary_pteg_full, htab_evicts); -#endif /* CONFIG_PPC_STD_MMU */ - - seq_printf(m, - "Non-error misses: %lu\n" - "Error misses\t: %lu\n", - pte_misses, pte_errors); - return 0; -} - -/* - * Allow user to define performance counters and resize the hash table - */ -static ssize_t ppc_htab_write(struct file * file, const char __user * ubuffer, - size_t count, loff_t *ppos) -{ -#ifdef CONFIG_PPC_STD_MMU - unsigned long tmp; - char buffer[16]; - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - if (strncpy_from_user(buffer, ubuffer, 15)) - return -EFAULT; - buffer[15] = 0; - - /* don't set the htab size for now */ - if ( !strncmp( buffer, "size ", 5) ) - return -EBUSY; - - if ( !strncmp( buffer, "reset", 5) ) - { - if (cpu_has_feature(CPU_FTR_604_PERF_MON)) { - /* reset PMC1 and PMC2 */ - mtspr(SPRN_PMC1, 0); - mtspr(SPRN_PMC2, 0); - } - htab_reloads = 0; - htab_evicts = 0; - pte_misses = 0; - pte_errors = 0; - } - - /* Everything below here requires the performance monitor feature. */ - if (!cpu_has_feature(CPU_FTR_604_PERF_MON)) - return count; - - /* turn off performance monitoring */ - if ( !strncmp( buffer, "off", 3) ) - { - mtspr(SPRN_MMCR0, 0); - mtspr(SPRN_PMC1, 0); - mtspr(SPRN_PMC2, 0); - } - - if ( !strncmp( buffer, "user", 4) ) - { - /* setup mmcr0 and clear the correct pmc */ - tmp = (mfspr(SPRN_MMCR0) & ~(0x60000000)) | 0x20000000; - mtspr(SPRN_MMCR0, tmp); - mtspr(SPRN_PMC1, 0); - mtspr(SPRN_PMC2, 0); - } - - if ( !strncmp( buffer, "kernel", 6) ) - { - /* setup mmcr0 and clear the correct pmc */ - tmp = (mfspr(SPRN_MMCR0) & ~(0x60000000)) | 0x40000000; - mtspr(SPRN_MMCR0, tmp); - mtspr(SPRN_PMC1, 0); - mtspr(SPRN_PMC2, 0); - } - - /* PMC1 values */ - if ( !strncmp( buffer, "dtlb", 4) ) - { - /* setup mmcr0 and clear the correct pmc */ - tmp = (mfspr(SPRN_MMCR0) & ~(0x7F << 7)) | MMCR0_PMC1_DTLB; - mtspr(SPRN_MMCR0, tmp); - mtspr(SPRN_PMC1, 0); - } - - if ( !strncmp( buffer, "ic miss", 7) ) - { - /* setup mmcr0 and clear the correct pmc */ - tmp = (mfspr(SPRN_MMCR0) & ~(0x7F<<7)) | MMCR0_PMC1_ICACHEMISS; - mtspr(SPRN_MMCR0, tmp); - mtspr(SPRN_PMC1, 0); - } - - /* PMC2 values */ - if ( !strncmp( buffer, "load miss time", 14) ) - { - /* setup mmcr0 and clear the correct pmc */ - asm volatile( - "mfspr %0,%1\n\t" /* get current mccr0 */ - "rlwinm %0,%0,0,0,31-6\n\t" /* clear bits [26-31] */ - "ori %0,%0,%2 \n\t" /* or in mmcr0 settings */ - "mtspr %1,%0 \n\t" /* set new mccr0 */ - "mtspr %3,%4 \n\t" /* reset the pmc */ - : "=r" (tmp) - : "i" (SPRN_MMCR0), - "i" (MMCR0_PMC2_LOADMISSTIME), - "i" (SPRN_PMC2), "r" (0) ); - } - - if ( !strncmp( buffer, "itlb", 4) ) - { - /* setup mmcr0 and clear the correct pmc */ - asm volatile( - "mfspr %0,%1\n\t" /* get current mccr0 */ - "rlwinm %0,%0,0,0,31-6\n\t" /* clear bits [26-31] */ - "ori %0,%0,%2 \n\t" /* or in mmcr0 settings */ - "mtspr %1,%0 \n\t" /* set new mccr0 */ - "mtspr %3,%4 \n\t" /* reset the pmc */ - : "=r" (tmp) - : "i" (SPRN_MMCR0), "i" (MMCR0_PMC2_ITLB), - "i" (SPRN_PMC2), "r" (0) ); - } - - if ( !strncmp( buffer, "dc miss", 7) ) - { - /* setup mmcr0 and clear the correct pmc */ - asm volatile( - "mfspr %0,%1\n\t" /* get current mccr0 */ - "rlwinm %0,%0,0,0,31-6\n\t" /* clear bits [26-31] */ - "ori %0,%0,%2 \n\t" /* or in mmcr0 settings */ - "mtspr %1,%0 \n\t" /* set new mccr0 */ - "mtspr %3,%4 \n\t" /* reset the pmc */ - : "=r" (tmp) - : "i" (SPRN_MMCR0), "i" (MMCR0_PMC2_DCACHEMISS), - "i" (SPRN_PMC2), "r" (0) ); - } - - return count; -#else /* CONFIG_PPC_STD_MMU */ - return 0; -#endif /* CONFIG_PPC_STD_MMU */ -} - -int proc_dol2crvec(ctl_table *table, int write, struct file *filp, - void __user *buffer_arg, size_t *lenp, loff_t *ppos) -{ - int vleft, first=1, len, left, val; - char __user *buffer = (char __user *) buffer_arg; - #define TMPBUFLEN 256 - char buf[TMPBUFLEN], *p; - static const char *sizestrings[4] = { - "2MB", "256KB", "512KB", "1MB" - }; - static const char *clockstrings[8] = { - "clock disabled", "+1 clock", "+1.5 clock", "reserved(3)", - "+2 clock", "+2.5 clock", "+3 clock", "reserved(7)" - }; - static const char *typestrings[4] = { - "flow-through burst SRAM", "reserved SRAM", - "pipelined burst SRAM", "pipelined late-write SRAM" - }; - static const char *holdstrings[4] = { - "0.5", "1.0", "(reserved2)", "(reserved3)" - }; - - if (!cpu_has_feature(CPU_FTR_L2CR)) - return -EFAULT; - - if ( /*!table->maxlen ||*/ (*ppos && !write)) { - *lenp = 0; - return 0; - } - - vleft = table->maxlen / sizeof(int); - left = *lenp; - - for (; left /*&& vleft--*/; first=0) { - if (write) { - while (left) { - char c; - if(get_user(c, buffer)) - return -EFAULT; - if (!isspace(c)) - break; - left--; - buffer++; - } - if (!left) - break; - len = left; - if (len > TMPBUFLEN-1) - len = TMPBUFLEN-1; - if(copy_from_user(buf, buffer, len)) - return -EFAULT; - buf[len] = 0; - p = buf; - if (*p < '0' || *p > '9') - break; - val = simple_strtoul(p, &p, 0); - len = p-buf; - if ((len < left) && *p && !isspace(*p)) - break; - buffer += len; - left -= len; - _set_L2CR(val); - } else { - p = buf; - if (!first) - *p++ = '\t'; - val = _get_L2CR(); - p += sprintf(p, "0x%08x: ", val); - p += sprintf(p, " %s", (val >> 31) & 1 ? "enabled" : - "disabled"); - p += sprintf(p, ", %sparity", (val>>30)&1 ? "" : "no "); - p += sprintf(p, ", %s", sizestrings[(val >> 28) & 3]); - p += sprintf(p, ", %s", clockstrings[(val >> 25) & 7]); - p += sprintf(p, ", %s", typestrings[(val >> 23) & 2]); - p += sprintf(p, "%s", (val>>22)&1 ? ", data only" : ""); - p += sprintf(p, "%s", (val>>20)&1 ? ", ZZ enabled": ""); - p += sprintf(p, ", %s", (val>>19)&1 ? "write-through" : - "copy-back"); - p += sprintf(p, "%s", (val>>18)&1 ? ", testing" : ""); - p += sprintf(p, ", %sns hold",holdstrings[(val>>16)&3]); - p += sprintf(p, "%s", (val>>15)&1 ? ", DLL slow" : ""); - p += sprintf(p, "%s", (val>>14)&1 ? ", diff clock" :""); - p += sprintf(p, "%s", (val>>13)&1 ? ", DLL bypass" :""); - - p += sprintf(p,"\n"); - - len = strlen(buf); - if (len > left) - len = left; - if (copy_to_user(buffer, buf, len)) - return -EFAULT; - left -= len; - buffer += len; - break; - } - } - - if (!write && !first && left) { - if(put_user('\n', (char __user *) buffer)) - return -EFAULT; - left--, buffer++; - } - if (write) { - char __user *s = (char __user *) buffer; - while (left) { - char c; - if(get_user(c, s++)) - return -EFAULT; - if (!isspace(c)) - break; - left--; - } - } - if (write && first) - return -EINVAL; - *lenp -= left; - *ppos += *lenp; - return 0; -} - -#ifdef CONFIG_SYSCTL -/* - * Register our sysctl. - */ -static ctl_table htab_ctl_table[]={ - { - .ctl_name = KERN_PPC_L2CR, - .procname = "l2cr", - .mode = 0644, - .proc_handler = &proc_dol2crvec, - }, - { 0, }, -}; -static ctl_table htab_sysctl_root[] = { - { 1, "kernel", NULL, 0, 0755, htab_ctl_table, }, - { 0,}, -}; - -static int __init -register_ppc_htab_sysctl(void) -{ - register_sysctl_table(htab_sysctl_root, 0); - - return 0; -} - -__initcall(register_ppc_htab_sysctl); -#endif diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c deleted file mode 100644 index e0ca61b37f4..00000000000 --- a/arch/ppc/kernel/ppc_ksyms.c +++ /dev/null @@ -1,333 +0,0 @@ -#include <linux/config.h> -#include <linux/module.h> -#include <linux/threads.h> -#include <linux/smp.h> -#include <linux/sched.h> -#include <linux/elfcore.h> -#include <linux/string.h> -#include <linux/interrupt.h> -#include <linux/tty.h> -#include <linux/vt_kern.h> -#include <linux/nvram.h> -#include <linux/console.h> -#include <linux/irq.h> -#include <linux/pci.h> -#include <linux/delay.h> -#include <linux/ide.h> -#include <linux/pm.h> -#include <linux/bitops.h> - -#include <asm/page.h> -#include <asm/semaphore.h> -#include <asm/processor.h> -#include <asm/uaccess.h> -#include <asm/io.h> -#include <asm/ide.h> -#include <asm/atomic.h> -#include <asm/checksum.h> -#include <asm/pgtable.h> -#include <asm/tlbflush.h> -#include <linux/adb.h> -#include <linux/cuda.h> -#include <linux/pmu.h> -#include <asm/prom.h> -#include <asm/system.h> -#include <asm/pci-bridge.h> -#include <asm/irq.h> -#include <asm/pmac_feature.h> -#include <asm/dma.h> -#include <asm/machdep.h> -#include <asm/hw_irq.h> -#include <asm/nvram.h> -#include <asm/mmu_context.h> -#include <asm/backlight.h> -#include <asm/time.h> -#include <asm/cputable.h> -#include <asm/btext.h> -#include <asm/div64.h> -#include <asm/xmon.h> - -#ifdef CONFIG_8xx -#include <asm/commproc.h> -#endif - -extern void transfer_to_handler(void); -extern void do_IRQ(struct pt_regs *regs); -extern void machine_check_exception(struct pt_regs *regs); -extern void alignment_exception(struct pt_regs *regs); -extern void program_check_exception(struct pt_regs *regs); -extern void single_step_exception(struct pt_regs *regs); -extern int do_signal(sigset_t *, struct pt_regs *); -extern int pmac_newworld; -extern int sys_sigreturn(struct pt_regs *regs); - -long long __ashrdi3(long long, int); -long long __ashldi3(long long, int); -long long __lshrdi3(long long, int); - -extern unsigned long mm_ptov (unsigned long paddr); - -EXPORT_SYMBOL(clear_pages); -EXPORT_SYMBOL(clear_user_page); -EXPORT_SYMBOL(do_signal); -EXPORT_SYMBOL(transfer_to_handler); -EXPORT_SYMBOL(do_IRQ); -EXPORT_SYMBOL(machine_check_exception); -EXPORT_SYMBOL(alignment_exception); -EXPORT_SYMBOL(program_check_exception); -EXPORT_SYMBOL(single_step_exception); -EXPORT_SYMBOL(sys_sigreturn); -EXPORT_SYMBOL(ppc_n_lost_interrupts); -EXPORT_SYMBOL(ppc_lost_interrupts); - -EXPORT_SYMBOL(ISA_DMA_THRESHOLD); -EXPORT_SYMBOL(DMA_MODE_READ); -EXPORT_SYMBOL(DMA_MODE_WRITE); -#if defined(CONFIG_PPC_PREP) -EXPORT_SYMBOL(_prep_type); -EXPORT_SYMBOL(ucSystemType); -#endif - -#if !defined(__INLINE_BITOPS) -EXPORT_SYMBOL(set_bit); -EXPORT_SYMBOL(clear_bit); -EXPORT_SYMBOL(change_bit); -EXPORT_SYMBOL(test_and_set_bit); -EXPORT_SYMBOL(test_and_clear_bit); -EXPORT_SYMBOL(test_and_change_bit); -#endif /* __INLINE_BITOPS */ - -EXPORT_SYMBOL(strcpy); -EXPORT_SYMBOL(strncpy); -EXPORT_SYMBOL(strcat); -EXPORT_SYMBOL(strncat); -EXPORT_SYMBOL(strchr); -EXPORT_SYMBOL(strrchr); -EXPORT_SYMBOL(strpbrk); -EXPORT_SYMBOL(strstr); -EXPORT_SYMBOL(strlen); -EXPORT_SYMBOL(strnlen); -EXPORT_SYMBOL(strcmp); -EXPORT_SYMBOL(strncmp); -EXPORT_SYMBOL(strcasecmp); -EXPORT_SYMBOL(__div64_32); - -EXPORT_SYMBOL(csum_partial); -EXPORT_SYMBOL(csum_partial_copy_generic); -EXPORT_SYMBOL(ip_fast_csum); -EXPORT_SYMBOL(csum_tcpudp_magic); - -EXPORT_SYMBOL(__copy_tofrom_user); -EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(__strncpy_from_user); -EXPORT_SYMBOL(__strnlen_user); - -/* -EXPORT_SYMBOL(inb); -EXPORT_SYMBOL(inw); -EXPORT_SYMBOL(inl); -EXPORT_SYMBOL(outb); -EXPORT_SYMBOL(outw); -EXPORT_SYMBOL(outl); -EXPORT_SYMBOL(outsl);*/ - -EXPORT_SYMBOL(__ide_mm_insl); -EXPORT_SYMBOL(__ide_mm_outsw); -EXPORT_SYMBOL(__ide_mm_insw); -EXPORT_SYMBOL(__ide_mm_outsl); - -EXPORT_SYMBOL(_insb); -EXPORT_SYMBOL(_outsb); -EXPORT_SYMBOL(_insw); -EXPORT_SYMBOL(_outsw); -EXPORT_SYMBOL(_insl); -EXPORT_SYMBOL(_outsl); -EXPORT_SYMBOL(_insw_ns); -EXPORT_SYMBOL(_outsw_ns); -EXPORT_SYMBOL(_insl_ns); -EXPORT_SYMBOL(_outsl_ns); -EXPORT_SYMBOL(iopa); -EXPORT_SYMBOL(mm_ptov); -EXPORT_SYMBOL(ioremap); -#ifdef CONFIG_44x -EXPORT_SYMBOL(ioremap64); -#endif -EXPORT_SYMBOL(__ioremap); -EXPORT_SYMBOL(iounmap); -EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ - -#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) -EXPORT_SYMBOL(ppc_ide_md); -#endif - -#ifdef CONFIG_PCI -EXPORT_SYMBOL(isa_io_base); -EXPORT_SYMBOL(isa_mem_base); -EXPORT_SYMBOL(pci_dram_offset); -EXPORT_SYMBOL(pci_alloc_consistent); -EXPORT_SYMBOL(pci_free_consistent); -EXPORT_SYMBOL(pci_bus_io_base); -EXPORT_SYMBOL(pci_bus_io_base_phys); -EXPORT_SYMBOL(pci_bus_mem_base_phys); -EXPORT_SYMBOL(pci_bus_to_hose); -EXPORT_SYMBOL(pci_resource_to_bus); -EXPORT_SYMBOL(pci_phys_to_bus); -EXPORT_SYMBOL(pci_bus_to_phys); -#endif /* CONFIG_PCI */ - -#ifdef CONFIG_NOT_COHERENT_CACHE -EXPORT_SYMBOL(flush_dcache_all); -#endif - -EXPORT_SYMBOL(start_thread); -EXPORT_SYMBOL(kernel_thread); - -EXPORT_SYMBOL(flush_instruction_cache); -EXPORT_SYMBOL(giveup_fpu); -EXPORT_SYMBOL(flush_icache_range); -EXPORT_SYMBOL(flush_dcache_range); -EXPORT_SYMBOL(flush_icache_user_range); -EXPORT_SYMBOL(flush_dcache_page); -EXPORT_SYMBOL(flush_tlb_kernel_range); -EXPORT_SYMBOL(flush_tlb_page); -EXPORT_SYMBOL(_tlbie); -#ifdef CONFIG_ALTIVEC -EXPORT_SYMBOL(last_task_used_altivec); -EXPORT_SYMBOL(giveup_altivec); -#endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_SPE -EXPORT_SYMBOL(last_task_used_spe); -EXPORT_SYMBOL(giveup_spe); -#endif /* CONFIG_SPE */ -#ifdef CONFIG_SMP -EXPORT_SYMBOL(smp_call_function); -EXPORT_SYMBOL(smp_hw_index); -#endif - -EXPORT_SYMBOL(ppc_md); - -#ifdef CONFIG_ADB -EXPORT_SYMBOL(adb_request); -EXPORT_SYMBOL(adb_register); -EXPORT_SYMBOL(adb_unregister); -EXPORT_SYMBOL(adb_poll); -EXPORT_SYMBOL(adb_try_handler_change); -#endif /* CONFIG_ADB */ -#ifdef CONFIG_ADB_CUDA -EXPORT_SYMBOL(cuda_request); -EXPORT_SYMBOL(cuda_poll); -#endif /* CONFIG_ADB_CUDA */ -#ifdef CONFIG_PPC_MULTIPLATFORM -EXPORT_SYMBOL(_machine); -#endif -#ifdef CONFIG_PPC_PMAC -EXPORT_SYMBOL(sys_ctrler); -EXPORT_SYMBOL(pmac_newworld); -#endif -#ifdef CONFIG_PPC_OF -EXPORT_SYMBOL(find_devices); -EXPORT_SYMBOL(find_type_devices); -EXPORT_SYMBOL(find_compatible_devices); -EXPORT_SYMBOL(find_path_device); -EXPORT_SYMBOL(device_is_compatible); -EXPORT_SYMBOL(machine_is_compatible); -EXPORT_SYMBOL(find_all_nodes); -EXPORT_SYMBOL(get_property); -EXPORT_SYMBOL(request_OF_resource); -EXPORT_SYMBOL(release_OF_resource); -EXPORT_SYMBOL(of_find_node_by_name); -EXPORT_SYMBOL(of_find_node_by_type); -EXPORT_SYMBOL(of_find_compatible_node); -EXPORT_SYMBOL(of_find_node_by_path); -EXPORT_SYMBOL(of_find_all_nodes); -EXPORT_SYMBOL(of_get_parent); -EXPORT_SYMBOL(of_get_next_child); -EXPORT_SYMBOL(of_node_get); -EXPORT_SYMBOL(of_node_put); -#endif /* CONFIG_PPC_OF */ -#if defined(CONFIG_BOOTX_TEXT) -EXPORT_SYMBOL(btext_update_display); -#endif -#if defined(CONFIG_SCSI) && defined(CONFIG_PPC_PMAC) -EXPORT_SYMBOL(note_scsi_host); -#endif -#ifdef CONFIG_VT -EXPORT_SYMBOL(kd_mksound); -#endif -EXPORT_SYMBOL(to_tm); - -EXPORT_SYMBOL(pm_power_off); - -EXPORT_SYMBOL(__ashrdi3); -EXPORT_SYMBOL(__ashldi3); -EXPORT_SYMBOL(__lshrdi3); -EXPORT_SYMBOL(memcpy); -EXPORT_SYMBOL(cacheable_memcpy); -EXPORT_SYMBOL(memset); -EXPORT_SYMBOL(memmove); -EXPORT_SYMBOL(memscan); -EXPORT_SYMBOL(memcmp); -EXPORT_SYMBOL(memchr); - -#if defined(CONFIG_FB_VGA16_MODULE) -EXPORT_SYMBOL(screen_info); -#endif - -EXPORT_SYMBOL(__delay); -EXPORT_SYMBOL(timer_interrupt); -EXPORT_SYMBOL(irq_desc); -EXPORT_SYMBOL(tb_ticks_per_jiffy); -EXPORT_SYMBOL(get_wchan); -EXPORT_SYMBOL(console_drivers); -#ifdef CONFIG_XMON -EXPORT_SYMBOL(xmon); -EXPORT_SYMBOL(xmon_printf); -#endif -EXPORT_SYMBOL(__up); -EXPORT_SYMBOL(__down); -EXPORT_SYMBOL(__down_interruptible); - -#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) -extern void (*debugger)(struct pt_regs *regs); -extern int (*debugger_bpt)(struct pt_regs *regs); -extern int (*debugger_sstep)(struct pt_regs *regs); -extern int (*debugger_iabr_match)(struct pt_regs *regs); -extern int (*debugger_dabr_match)(struct pt_regs *regs); -extern void (*debugger_fault_handler)(struct pt_regs *regs); - -EXPORT_SYMBOL(debugger); -EXPORT_SYMBOL(debugger_bpt); -EXPORT_SYMBOL(debugger_sstep); -EXPORT_SYMBOL(debugger_iabr_match); -EXPORT_SYMBOL(debugger_dabr_match); -EXPORT_SYMBOL(debugger_fault_handler); -#endif - -#ifdef CONFIG_8xx -EXPORT_SYMBOL(cpm_install_handler); -EXPORT_SYMBOL(cpm_free_handler); -#endif /* CONFIG_8xx */ -#if defined(CONFIG_8xx) || defined(CONFIG_40x) || defined(CONFIG_85xx) ||\ - defined(CONFIG_83xx) -EXPORT_SYMBOL(__res); -#endif - -EXPORT_SYMBOL(next_mmu_context); -EXPORT_SYMBOL(set_context); -EXPORT_SYMBOL_GPL(__handle_mm_fault); /* For MOL */ -EXPORT_SYMBOL(disarm_decr); -#ifdef CONFIG_PPC_STD_MMU -extern long mol_trampoline; -EXPORT_SYMBOL(mol_trampoline); /* For MOL */ -EXPORT_SYMBOL(flush_hash_pages); /* For MOL */ -#ifdef CONFIG_SMP -extern int mmu_hash_lock; -EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */ -#endif /* CONFIG_SMP */ -extern long *intercept_table; -EXPORT_SYMBOL(intercept_table); -#endif /* CONFIG_PPC_STD_MMU */ -#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) -EXPORT_SYMBOL(__mtdcr); -EXPORT_SYMBOL(__mfdcr); -#endif diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c deleted file mode 100644 index cb1c7b92f8c..00000000000 --- a/arch/ppc/kernel/process.c +++ /dev/null @@ -1,845 +0,0 @@ -/* - * arch/ppc/kernel/process.c - * - * Derived from "arch/i386/kernel/process.c" - * Copyright (C) 1995 Linus Torvalds - * - * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and - * Paul Mackerras (paulus@cs.anu.edu.au) - * - * PowerPC version - * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include <linux/config.h> -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/smp.h> -#include <linux/smp_lock.h> -#include <linux/stddef.h> -#include <linux/unistd.h> -#include <linux/ptrace.h> -#include <linux/slab.h> -#include <linux/user.h> -#include <linux/elf.h> -#include <linux/init.h> -#include <linux/prctl.h> -#include <linux/init_task.h> -#include <linux/module.h> -#include <linux/kallsyms.h> -#include <linux/mqueue.h> -#include <linux/hardirq.h> - -#include <asm/pgtable.h> -#include <asm/uaccess.h> -#include <asm/system.h> -#include <asm/io.h> -#include <asm/processor.h> -#include <asm/mmu.h> -#include <asm/prom.h> - -extern unsigned long _get_SP(void); - -struct task_struct *last_task_used_math = NULL; -struct task_struct *last_task_used_altivec = NULL; -struct task_struct *last_task_used_spe = NULL; - -static struct fs_struct init_fs = INIT_FS; -static struct files_struct init_files = INIT_FILES; -static struct signal_struct init_signals = INIT_SIGNALS(init_signals); -static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); -EXPORT_SYMBOL(init_mm); - -/* this is 8kB-aligned so we can get to the thread_info struct - at the base of it from the stack pointer with 1 integer instruction. */ -union thread_union init_thread_union - __attribute__((__section__(".data.init_task"))) = -{ INIT_THREAD_INFO(init_task) }; - -/* initial task structure */ -struct task_struct init_task = INIT_TASK(init_task); -EXPORT_SYMBOL(init_task); - -/* only used to get secondary processor up */ -struct task_struct *current_set[NR_CPUS] = {&init_task, }; - -#undef SHOW_TASK_SWITCHES -#undef CHECK_STACK - -#if defined(CHECK_STACK) -unsigned long -kernel_stack_top(struct task_struct *tsk) -{ - return ((unsigned long)tsk) + sizeof(union task_union); -} - -unsigned long -task_top(struct task_struct *tsk) -{ - return ((unsigned long)tsk) + sizeof(struct thread_info); -} - -/* check to make sure the kernel stack is healthy */ -int check_stack(struct task_struct *tsk) -{ - unsigned long stack_top = kernel_stack_top(tsk); - unsigned long tsk_top = task_top(tsk); - int ret = 0; - -#if 0 - /* check thread magic */ - if ( tsk->thread.magic != THREAD_MAGIC ) - { - ret |= 1; - printk("thread.magic bad: %08x\n", tsk->thread.magic); - } -#endif - - if ( !tsk ) - printk("check_stack(): tsk bad tsk %p\n",tsk); - - /* check if stored ksp is bad */ - if ( (tsk->thread.ksp > stack_top) || (tsk->thread.ksp < tsk_top) ) - { - printk("stack out of bounds: %s/%d\n" - " tsk_top %08lx ksp %08lx stack_top %08lx\n", - tsk->comm,tsk->pid, - tsk_top, tsk->thread.ksp, stack_top); - ret |= 2; - } - - /* check if stack ptr RIGHT NOW is bad */ - if ( (tsk == current) && ((_get_SP() > stack_top ) || (_get_SP() < tsk_top)) ) - { - printk("current stack ptr out of bounds: %s/%d\n" - " tsk_top %08lx sp %08lx stack_top %08lx\n", - current->comm,current->pid, - tsk_top, _get_SP(), stack_top); - ret |= 4; - } - -#if 0 - /* check amount of free stack */ - for ( i = (unsigned long *)task_top(tsk) ; i < kernel_stack_top(tsk) ; i++ ) - { - if ( !i ) - printk("check_stack(): i = %p\n", i); - if ( *i != 0 ) - { - /* only notify if it's less than 900 bytes */ - if ( (i - (unsigned long *)task_top(tsk)) < 900 ) - printk("%d bytes free on stack\n", - i - task_top(tsk)); - break; - } - } -#endif - - if (ret) - { - panic("bad kernel stack"); - } - return(ret); -} -#endif /* defined(CHECK_STACK) */ - -/* - * Make sure the floating-point register state in the - * the thread_struct is up to date for task tsk. - */ -void flush_fp_to_thread(struct task_struct *tsk) -{ - if (tsk->thread.regs) { - /* - * We need to disable preemption here because if we didn't, - * another process could get scheduled after the regs->msr - * test but before we have finished saving the FP registers - * to the thread_struct. That process could take over the - * FPU, and then when we get scheduled again we would store - * bogus values for the remaining FP registers. - */ - preempt_disable(); - if (tsk->thread.regs->msr & MSR_FP) { -#ifdef CONFIG_SMP - /* - * This should only ever be called for current or - * for a stopped child process. Since we save away - * the FP register state on context switch on SMP, - * there is something wrong if a stopped child appears - * to still have its FP state in the CPU registers. - */ - BUG_ON(tsk != current); -#endif - giveup_fpu(current); - } - preempt_enable(); - } -} - -void enable_kernel_fp(void) -{ - WARN_ON(preemptible()); - -#ifdef CONFIG_SMP - if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) - giveup_fpu(current); - else - giveup_fpu(NULL); /* just enables FP for kernel */ -#else - giveup_fpu(last_task_used_math); -#endif /* CONFIG_SMP */ -} -EXPORT_SYMBOL(enable_kernel_fp); - -int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) -{ - preempt_disable(); - if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP)) - giveup_fpu(tsk); - preempt_enable(); - memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs)); - return 1; -} - -#ifdef CONFIG_ALTIVEC -void enable_kernel_altivec(void) -{ - WARN_ON(preemptible()); - -#ifdef CONFIG_SMP - if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) - giveup_altivec(current); - else - giveup_altivec(NULL); /* just enable AltiVec for kernel - force */ -#else - giveup_altivec(last_task_used_altivec); -#endif /* __SMP __ */ -} -EXPORT_SYMBOL(enable_kernel_altivec); - -/* - * Make sure the VMX/Altivec register state in the - * the thread_struct is up to date for task tsk. - */ -void flush_altivec_to_thread(struct task_struct *tsk) -{ - if (tsk->thread.regs) { - preempt_disable(); - if (tsk->thread.regs->msr & MSR_VEC) { -#ifdef CONFIG_SMP - BUG_ON(tsk != current); -#endif - giveup_altivec(current); - } - preempt_enable(); - } -} - -int dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs) -{ - if (regs->msr & MSR_VEC) - giveup_altivec(current); - memcpy(vrregs, ¤t->thread.vr[0], sizeof(*vrregs)); - return 1; -} -#endif /* CONFIG_ALTIVEC */ - -#ifdef CONFIG_SPE -void -enable_kernel_spe(void) -{ - WARN_ON(preemptible()); - -#ifdef CONFIG_SMP - if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) - giveup_spe(current); - else - giveup_spe(NULL); /* just enable SPE for kernel - force */ -#else - giveup_spe(last_task_used_spe); -#endif /* __SMP __ */ -} -EXPORT_SYMBOL(enable_kernel_spe); - -void flush_spe_to_thread(struct task_struct *tsk) -{ - if (tsk->thread.regs) { - preempt_disable(); - if (tsk->thread.regs->msr & MSR_SPE) { -#ifdef CONFIG_SMP - BUG_ON(tsk != current); -#endif - giveup_spe(current); - } - preempt_enable(); - } -} - -int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) -{ - if (regs->msr & MSR_SPE) - giveup_spe(current); - /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */ - memcpy(evrregs, ¤t->thread.evr[0], sizeof(u32) * 35); - return 1; -} -#endif /* CONFIG_SPE */ - -struct task_struct *__switch_to(struct task_struct *prev, - struct task_struct *new) -{ - struct thread_struct *new_thread, *old_thread; - unsigned long s; - struct task_struct *last; - - local_irq_save(s); -#ifdef CHECK_STACK - check_stack(prev); - check_stack(new); -#endif - -#ifdef CONFIG_SMP - /* avoid complexity of lazy save/restore of fpu - * by just saving it every time we switch out if - * this task used the fpu during the last quantum. - * - * If it tries to use the fpu again, it'll trap and - * reload its fp regs. So we don't have to do a restore - * every switch, just a save. - * -- Cort - */ - if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) - giveup_fpu(prev); -#ifdef CONFIG_ALTIVEC - /* - * If the previous thread used altivec in the last quantum - * (thus changing altivec regs) then save them. - * We used to check the VRSAVE register but not all apps - * set it, so we don't rely on it now (and in fact we need - * to save & restore VSCR even if VRSAVE == 0). -- paulus - * - * On SMP we always save/restore altivec regs just to avoid the - * complexity of changing processors. - * -- Cort - */ - if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))) - giveup_altivec(prev); -#endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_SPE - /* - * If the previous thread used spe in the last quantum - * (thus changing spe regs) then save them. - * - * On SMP we always save/restore spe regs just to avoid the - * complexity of changing processors. - */ - if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) - giveup_spe(prev); -#endif /* CONFIG_SPE */ -#endif /* CONFIG_SMP */ - -#ifdef CONFIG_ALTIVEC - /* Avoid the trap. On smp this this never happens since - * we don't set last_task_used_altivec -- Cort - */ - if (new->thread.regs && last_task_used_altivec == new) - new->thread.regs->msr |= MSR_VEC; -#endif -#ifdef CONFIG_SPE - /* Avoid the trap. On smp this this never happens since - * we don't set last_task_used_spe - */ - if (new->thread.regs && last_task_used_spe == new) - new->thread.regs->msr |= MSR_SPE; -#endif /* CONFIG_SPE */ - new_thread = &new->thread; - old_thread = ¤t->thread; - last = _switch(old_thread, new_thread); - local_irq_restore(s); - return last; -} - -void show_regs(struct pt_regs * regs) -{ - int i, trap; - - printk("NIP: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx %s\n", - regs->nip, regs->link, regs->gpr[1], regs, regs->trap, - print_tainted()); - printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n", - regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0, - regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0, - regs->msr&MSR_IR ? 1 : 0, - regs->msr&MSR_DR ? 1 : 0); - trap = TRAP(regs); - if (trap == 0x300 || trap == 0x600) - printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr); - printk("TASK = %p[%d] '%s' THREAD: %p\n", - current, current->pid, current->comm, current->thread_info); - printk("Last syscall: %ld ", current->thread.last_syscall); - -#ifdef CONFIG_SMP - printk(" CPU: %d", smp_processor_id()); -#endif /* CONFIG_SMP */ - - for (i = 0; i < 32; i++) { - long r; - if ((i % 8) == 0) - printk("\n" KERN_INFO "GPR%02d: ", i); - if (__get_user(r, ®s->gpr[i])) - break; - printk("%08lX ", r); - if (i == 12 && !FULL_REGS(regs)) - break; - } - printk("\n"); -#ifdef CONFIG_KALLSYMS - /* - * Lookup NIP late so we have the best change of getting the - * above info out without failing - */ - printk("NIP [%08lx] ", regs->nip); - print_symbol("%s\n", regs->nip); - printk("LR [%08lx] ", regs->link); - print_symbol("%s\n", regs->link); -#endif - show_stack(current, (unsigned long *) regs->gpr[1]); -} - -void exit_thread(void) -{ - if (last_task_used_math == current) - last_task_used_math = NULL; - if (last_task_used_altivec == current) - last_task_used_altivec = NULL; -#ifdef CONFIG_SPE - if (last_task_used_spe == current) - last_task_used_spe = NULL; -#endif -} - -void flush_thread(void) -{ - if (last_task_used_math == current) - last_task_used_math = NULL; - if (last_task_used_altivec == current) - last_task_used_altivec = NULL; -#ifdef CONFIG_SPE - if (last_task_used_spe == current) - last_task_used_spe = NULL; -#endif -} - -void -release_thread(struct task_struct *t) -{ -} - -/* - * This gets called before we allocate a new thread and copy - * the current task into it. - */ -void prepare_to_copy(struct task_struct *tsk) -{ - struct pt_regs *regs = tsk->thread.regs; - - if (regs == NULL) - return; - preempt_disable(); - if (regs->msr & MSR_FP) - giveup_fpu(current); -#ifdef CONFIG_ALTIVEC - if (regs->msr & MSR_VEC) - giveup_altivec(current); -#endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_SPE - if (regs->msr & MSR_SPE) - giveup_spe(current); -#endif /* CONFIG_SPE */ - preempt_enable(); -} - -/* - * Copy a thread.. - */ -int -copy_thread(int nr, unsigned long clone_flags, unsigned long usp, - unsigned long unused, - struct task_struct *p, struct pt_regs *regs) -{ - struct pt_regs *childregs, *kregs; - extern void ret_from_fork(void); - unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE; - unsigned long childframe; - - CHECK_FULL_REGS(regs); - /* Copy registers */ - sp -= sizeof(struct pt_regs); - childregs = (struct pt_regs *) sp; - *childregs = *regs; - if ((childregs->msr & MSR_PR) == 0) { - /* for kernel thread, set `current' and stackptr in new task */ - childregs->gpr[1] = sp + sizeof(struct pt_regs); - childregs->gpr[2] = (unsigned long) p; - p->thread.regs = NULL; /* no user register state */ - } else { - childregs->gpr[1] = usp; - p->thread.regs = childregs; - if (clone_flags & CLONE_SETTLS) - childregs->gpr[2] = childregs->gpr[6]; - } - childregs->gpr[3] = 0; /* Result from fork() */ - sp -= STACK_FRAME_OVERHEAD; - childframe = sp; - - /* - * The way this works is that at some point in the future - * some task will call _switch to switch to the new task. - * That will pop off the stack frame created below and start - * the new task running at ret_from_fork. The new task will - * do some house keeping and then return from the fork or clone - * system call, using the stack frame created above. - */ - sp -= sizeof(struct pt_regs); - kregs = (struct pt_regs *) sp; - sp -= STACK_FRAME_OVERHEAD; - p->thread.ksp = sp; - kregs->nip = (unsigned long)ret_from_fork; - - p->thread.last_syscall = -1; - - return 0; -} - -/* - * Set up a thread for executing a new program - */ -void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp) -{ - set_fs(USER_DS); - memset(regs->gpr, 0, sizeof(regs->gpr)); - regs->ctr = 0; - regs->link = 0; - regs->xer = 0; - regs->ccr = 0; - regs->mq = 0; - regs->nip = nip; - regs->gpr[1] = sp; - regs->msr = MSR_USER; - if (last_task_used_math == current) - last_task_used_math = NULL; - if (last_task_used_altivec == current) - last_task_used_altivec = NULL; -#ifdef CONFIG_SPE - if (last_task_used_spe == current) - last_task_used_spe = NULL; -#endif - memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); - current->thread.fpscr.val = 0; -#ifdef CONFIG_ALTIVEC - memset(current->thread.vr, 0, sizeof(current->thread.vr)); - memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr)); - current->thread.vrsave = 0; - current->thread.used_vr = 0; -#endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_SPE - memset(current->thread.evr, 0, sizeof(current->thread.evr)); - current->thread.acc = 0; - current->thread.spefscr = 0; - current->thread.used_spe = 0; -#endif /* CONFIG_SPE */ -} - -#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ - | PR_FP_EXC_RES | PR_FP_EXC_INV) - -int set_fpexc_mode(struct task_struct *tsk, unsigned int val) -{ - struct pt_regs *regs = tsk->thread.regs; - - /* This is a bit hairy. If we are an SPE enabled processor - * (have embedded fp) we store the IEEE exception enable flags in - * fpexc_mode. fpexc_mode is also used for setting FP exception - * mode (asyn, precise, disabled) for 'Classic' FP. */ - if (val & PR_FP_EXC_SW_ENABLE) { -#ifdef CONFIG_SPE - tsk->thread.fpexc_mode = val & - (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); -#else - return -EINVAL; -#endif - } else { - /* on a CONFIG_SPE this does not hurt us. The bits that - * __pack_fe01 use do not overlap with bits used for - * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits - * on CONFIG_SPE implementations are reserved so writing to - * them does not change anything */ - if (val > PR_FP_EXC_PRECISE) - return -EINVAL; - tsk->thread.fpexc_mode = __pack_fe01(val); - if (regs != NULL && (regs->msr & MSR_FP) != 0) - regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) - | tsk->thread.fpexc_mode; - } - return 0; -} - -int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) -{ - unsigned int val; - - if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) -#ifdef CONFIG_SPE - val = tsk->thread.fpexc_mode; -#else - return -EINVAL; -#endif - else - val = __unpack_fe01(tsk->thread.fpexc_mode); - return put_user(val, (unsigned int __user *) adr); -} - -int sys_clone(unsigned long clone_flags, unsigned long usp, - int __user *parent_tidp, void __user *child_threadptr, - int __user *child_tidp, int p6, - struct pt_regs *regs) -{ - CHECK_FULL_REGS(regs); - if (usp == 0) - usp = regs->gpr[1]; /* stack pointer for child */ - return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp); -} - -int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3, - unsigned long p4, unsigned long p5, unsigned long p6, - struct pt_regs *regs) -{ - CHECK_FULL_REGS(regs); - return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); -} - -int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3, - unsigned long p4, unsigned long p5, unsigned long p6, - struct pt_regs *regs) -{ - CHECK_FULL_REGS(regs); - return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], - regs, 0, NULL, NULL); -} - -int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, - unsigned long a3, unsigned long a4, unsigned long a5, - struct pt_regs *regs) -{ - int error; - char * filename; - - filename = getname((char __user *) a0); - error = PTR_ERR(filename); - if (IS_ERR(filename)) - goto out; - preempt_disable(); - if (regs->msr & MSR_FP) - giveup_fpu(current); -#ifdef CONFIG_ALTIVEC - if (regs->msr & MSR_VEC) - giveup_altivec(current); -#endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_SPE - if (regs->msr & MSR_SPE) - giveup_spe(current); -#endif /* CONFIG_SPE */ - preempt_enable(); - error = do_execve(filename, (char __user *__user *) a1, - (char __user *__user *) a2, regs); - if (error == 0) { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); - } - putname(filename); -out: - return error; -} - -void dump_stack(void) -{ - show_stack(current, NULL); -} - -EXPORT_SYMBOL(dump_stack); - -void show_stack(struct task_struct *tsk, unsigned long *stack) -{ - unsigned long sp, stack_top, prev_sp, ret; - int count = 0; - unsigned long next_exc = 0; - struct pt_regs *regs; - extern char ret_from_except, ret_from_except_full, ret_from_syscall; - - sp = (unsigned long) stack; - if (tsk == NULL) - tsk = current; - if (sp == 0) { - if (tsk == current) - asm("mr %0,1" : "=r" (sp)); - else - sp = tsk->thread.ksp; - } - - prev_sp = (unsigned long) (tsk->thread_info + 1); - stack_top = (unsigned long) tsk->thread_info + THREAD_SIZE; - while (count < 16 && sp > prev_sp && sp < stack_top && (sp & 3) == 0) { - if (count == 0) { - printk("Call trace:"); -#ifdef CONFIG_KALLSYMS - printk("\n"); -#endif - } else { - if (next_exc) { - ret = next_exc; - next_exc = 0; - } else - ret = *(unsigned long *)(sp + 4); - printk(" [%08lx] ", ret); -#ifdef CONFIG_KALLSYMS - print_symbol("%s", ret); - printk("\n"); -#endif - if (ret == (unsigned long) &ret_from_except - || ret == (unsigned long) &ret_from_except_full - || ret == (unsigned long) &ret_from_syscall) { - /* sp + 16 points to an exception frame */ - regs = (struct pt_regs *) (sp + 16); - if (sp + 16 + sizeof(*regs) <= stack_top) - next_exc = regs->nip; - } - } - ++count; - sp = *(unsigned long *)sp; - } -#ifndef CONFIG_KALLSYMS - if (count > 0) - printk("\n"); -#endif -} - -#if 0 -/* - * Low level print for debugging - Cort - */ -int __init ll_printk(const char *fmt, ...) -{ - va_list args; - char buf[256]; - int i; - - va_start(args, fmt); - i=vsprintf(buf,fmt,args); - ll_puts(buf); - va_end(args); - return i; -} - -int lines = 24, cols = 80; -int orig_x = 0, orig_y = 0; - -void puthex(unsigned long val) -{ - unsigned char buf[10]; - int i; - for (i = 7; i >= 0; i--) - { - buf[i] = "0123456789ABCDEF"[val & 0x0F]; - val >>= 4; - } - buf[8] = '\0'; - prom_print(buf); -} - -void __init ll_puts(const char *s) -{ - int x,y; - char *vidmem = (char *)/*(_ISA_MEM_BASE + 0xB8000) */0xD00B8000; - char c; - extern int mem_init_done; - - if ( mem_init_done ) /* assume this means we can printk */ - { - printk(s); - return; - } - -#if 0 - if ( have_of ) - { - prom_print(s); - return; - } -#endif - - /* - * can't ll_puts on chrp without openfirmware yet. - * vidmem just needs to be setup for it. - * -- Cort - */ - if ( _machine != _MACH_prep ) - return; - x = orig_x; - y = orig_y; - - while ( ( c = *s++ ) != '\0' ) { - if ( c == '\n' ) { - x = 0; - if ( ++y >= lines ) { - /*scroll();*/ - /*y--;*/ - y = 0; - } - } else { - vidmem [ ( x + cols * y ) * 2 ] = c; - if ( ++x >= cols ) { - x = 0; - if ( ++y >= lines ) { - /*scroll();*/ - /*y--;*/ - y = 0; - } - } - } - } - - orig_x = x; - orig_y = y; -} -#endif - -unsigned long get_wchan(struct task_struct *p) -{ - unsigned long ip, sp; - unsigned long stack_page = (unsigned long) p->thread_info; - int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) - return 0; - sp = p->thread.ksp; - do { - sp = *(unsigned long *)sp; - if (sp < stack_page || sp >= stack_page + 8188) - return 0; - if (count > 0) { - ip = *(unsigned long *)(sp + 4); - if (!in_sched_functions(ip)) - return ip; - } - } while (count++ < 16); - return 0; -} diff --git a/arch/ppc/kernel/relocate_kernel.S b/arch/ppc/kernel/relocate_kernel.S deleted file mode 100644 index 9b2ad48e988..00000000000 --- a/arch/ppc/kernel/relocate_kernel.S +++ /dev/null @@ -1,123 +0,0 @@ -/* - * relocate_kernel.S - put the kernel image in place to boot - * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> - * - * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz - * - * This source code is licensed under the GNU General Public License, - * Version 2. See the file COPYING for more details. - */ - -#include <asm/reg.h> -#include <asm/ppc_asm.h> -#include <asm/processor.h> - -#include <asm/kexec.h> - -#define PAGE_SIZE 4096 /* must be same value as in <asm/page.h> */ - - /* - * Must be relocatable PIC code callable as a C function. - */ - .globl relocate_new_kernel -relocate_new_kernel: - /* r3 = page_list */ - /* r4 = reboot_code_buffer */ - /* r5 = start_address */ - - li r0, 0 - - /* - * Set Machine Status Register to a known status, - * switch the MMU off and jump to 1: in a single step. - */ - - mr r8, r0 - ori r8, r8, MSR_RI|MSR_ME - mtspr SPRN_SRR1, r8 - addi r8, r4, 1f - relocate_new_kernel - mtspr SPRN_SRR0, r8 - sync - rfi - -1: - /* from this point address translation is turned off */ - /* and interrupts are disabled */ - - /* set a new stack at the bottom of our page... */ - /* (not really needed now) */ - addi r1, r4, KEXEC_CONTROL_CODE_SIZE - 8 /* for LR Save+Back Chain */ - stw r0, 0(r1) - - /* Do the copies */ - li r6, 0 /* checksum */ - mr r0, r3 - b 1f - -0: /* top, read another word for the indirection page */ - lwzu r0, 4(r3) - -1: - /* is it a destination page? (r8) */ - rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */ - beq 2f - - rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */ - b 0b - -2: /* is it an indirection page? (r3) */ - rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */ - beq 2f - - rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */ - subi r3, r3, 4 - b 0b - -2: /* are we done? */ - rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */ - beq 2f - b 3f - -2: /* is it a source page? (r9) */ - rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */ - beq 0b - - rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */ - - li r7, PAGE_SIZE / 4 - mtctr r7 - subi r9, r9, 4 - subi r8, r8, 4 -9: - lwzu r0, 4(r9) /* do the copy */ - xor r6, r6, r0 - stwu r0, 4(r8) - dcbst 0, r8 - sync - icbi 0, r8 - bdnz 9b - - addi r9, r9, 4 - addi r8, r8, 4 - b 0b - -3: - - /* To be certain of avoiding problems with self-modifying code - * execute a serializing instruction here. - */ - isync - sync - - /* jump to the entry point, usually the setup routine */ - mtlr r5 - blrl - -1: b 1b - -relocate_new_kernel_end: - - .globl relocate_new_kernel_size -relocate_new_kernel_size: - .long relocate_new_kernel_end - relocate_new_kernel - diff --git a/arch/ppc/kernel/semaphore.c b/arch/ppc/kernel/semaphore.c deleted file mode 100644 index 2fe429b27c1..00000000000 --- a/arch/ppc/kernel/semaphore.c +++ /dev/null @@ -1,131 +0,0 @@ -/* - * PowerPC-specific semaphore code. - * - * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * April 2001 - Reworked by Paul Mackerras <paulus@samba.org> - * to eliminate the SMP races in the old version between the updates - * of `count' and `waking'. Now we use negative `count' values to - * indicate that some process(es) are waiting for the semaphore. - */ - -#include <linux/sched.h> -#include <linux/init.h> -#include <asm/atomic.h> -#include <asm/semaphore.h> -#include <asm/errno.h> - -/* - * Atomically update sem->count. - * This does the equivalent of the following: - * - * old_count = sem->count; - * tmp = MAX(old_count, 0) + incr; - * sem->count = tmp; - * return old_count; - */ -static inline int __sem_update_count(struct semaphore *sem, int incr) -{ - int old_count, tmp; - - __asm__ __volatile__("\n" -"1: lwarx %0,0,%3\n" -" srawi %1,%0,31\n" -" andc %1,%0,%1\n" -" add %1,%1,%4\n" - PPC405_ERR77(0,%3) -" stwcx. %1,0,%3\n" -" bne 1b" - : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) - : "r" (&sem->count), "r" (incr), "m" (sem->count) - : "cc"); - - return old_count; -} - -void __up(struct semaphore *sem) -{ - /* - * Note that we incremented count in up() before we came here, - * but that was ineffective since the result was <= 0, and - * any negative value of count is equivalent to 0. - * This ends up setting count to 1, unless count is now > 0 - * (i.e. because some other cpu has called up() in the meantime), - * in which case we just increment count. - */ - __sem_update_count(sem, 1); - wake_up(&sem->wait); -} - -/* - * Note that when we come in to __down or __down_interruptible, - * we have already decremented count, but that decrement was - * ineffective since the result was < 0, and any negative value - * of count is equivalent to 0. - * Thus it is only when we decrement count from some value > 0 - * that we have actually got the semaphore. - */ -void __sched __down(struct semaphore *sem) -{ - struct task_struct *tsk = current; - DECLARE_WAITQUEUE(wait, tsk); - - tsk->state = TASK_UNINTERRUPTIBLE; - add_wait_queue_exclusive(&sem->wait, &wait); - smp_wmb(); - - /* - * Try to get the semaphore. If the count is > 0, then we've - * got the semaphore; we decrement count and exit the loop. - * If the count is 0 or negative, we set it to -1, indicating - * that we are asleep, and then sleep. - */ - while (__sem_update_count(sem, -1) <= 0) { - schedule(); - tsk->state = TASK_UNINTERRUPTIBLE; - } - remove_wait_queue(&sem->wait, &wait); - tsk->state = TASK_RUNNING; - - /* - * If there are any more sleepers, wake one of them up so - * that it can either get the semaphore, or set count to -1 - * indicating that there are still processes sleeping. - */ - wake_up(&sem->wait); -} - -int __sched __down_interruptible(struct semaphore * sem) -{ - int retval = 0; - struct task_struct *tsk = current; - DECLARE_WAITQUEUE(wait, tsk); - - tsk->state = TASK_INTERRUPTIBLE; - add_wait_queue_exclusive(&sem->wait, &wait); - smp_wmb(); - - while (__sem_update_count(sem, -1) <= 0) { - if (signal_pending(current)) { - /* - * A signal is pending - give up trying. - * Set sem->count to 0 if it is negative, - * since we are no longer sleeping. - */ - __sem_update_count(sem, 0); - retval = -EINTR; - break; - } - schedule(); - tsk->state = TASK_INTERRUPTIBLE; - } - tsk->state = TASK_RUNNING; - remove_wait_queue(&sem->wait, &wait); - wake_up(&sem->wait); - return retval; -} diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c deleted file mode 100644 index 6bcb85d2b7f..00000000000 --- a/arch/ppc/kernel/setup.c +++ /dev/null @@ -1,809 +0,0 @@ -/* - * Common prep/pmac/chrp boot and setup code. - */ - -#include <linux/config.h> -#include <linux/module.h> -#include <linux/string.h> -#include <linux/sched.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/reboot.h> -#include <linux/delay.h> -#include <linux/initrd.h> -#include <linux/ide.h> -#include <linux/tty.h> -#include <linux/bootmem.h> -#include <linux/seq_file.h> -#include <linux/root_dev.h> -#include <linux/cpu.h> -#include <linux/console.h> - -#include <asm/residual.h> -#include <asm/io.h> -#include <asm/prom.h> -#include <asm/processor.h> -#include <asm/pgtable.h> -#include <asm/bootinfo.h> -#include <asm/setup.h> -#include <asm/amigappc.h> -#include <asm/smp.h> -#include <asm/elf.h> -#include <asm/cputable.h> -#include <asm/bootx.h> -#include <asm/btext.h> -#include <asm/machdep.h> -#include <asm/uaccess.h> -#include <asm/system.h> -#include <asm/pmac_feature.h> -#include <asm/sections.h> -#include <asm/nvram.h> -#include <asm/xmon.h> -#include <asm/ocp.h> - -#define USES_PPC_SYS (defined(CONFIG_85xx) || defined(CONFIG_83xx) || \ - defined(CONFIG_MPC10X_BRIDGE) || defined(CONFIG_8260) || \ - defined(CONFIG_PPC_MPC52xx)) - -#if USES_PPC_SYS -#include <asm/ppc_sys.h> -#endif - -#if defined CONFIG_KGDB -#include <asm/kgdb.h> -#endif - -extern void platform_init(unsigned long r3, unsigned long r4, - unsigned long r5, unsigned long r6, unsigned long r7); -extern void bootx_init(unsigned long r4, unsigned long phys); -extern void identify_cpu(unsigned long offset, unsigned long cpu); -extern void do_cpu_ftr_fixups(unsigned long offset); -extern void reloc_got2(unsigned long offset); - -extern void ppc6xx_idle(void); -extern void power4_idle(void); - -extern boot_infos_t *boot_infos; -struct ide_machdep_calls ppc_ide_md; - -/* Used with the BI_MEMSIZE bootinfo parameter to store the memory - size value reported by the boot loader. */ -unsigned long boot_mem_size; - -unsigned long ISA_DMA_THRESHOLD; -unsigned int DMA_MODE_READ; -unsigned int DMA_MODE_WRITE; - -#ifdef CONFIG_PPC_MULTIPLATFORM -int _machine = 0; - -extern void prep_init(unsigned long r3, unsigned long r4, - unsigned long r5, unsigned long r6, unsigned long r7); -extern void pmac_init(unsigned long r3, unsigned long r4, - unsigned long r5, unsigned long r6, unsigned long r7); -extern void chrp_init(unsigned long r3, unsigned long r4, - unsigned long r5, unsigned long r6, unsigned long r7); - -dev_t boot_dev; -#endif /* CONFIG_PPC_MULTIPLATFORM */ - -int have_of; -EXPORT_SYMBOL(have_of); - -#ifdef __DO_IRQ_CANON -int ppc_do_canonicalize_irqs; -EXPORT_SYMBOL(ppc_do_canonicalize_irqs); -#endif - -#ifdef CONFIG_MAGIC_SYSRQ -unsigned long SYSRQ_KEY = 0x54; -#endif /* CONFIG_MAGIC_SYSRQ */ - -#ifdef CONFIG_VGA_CONSOLE -unsigned long vgacon_remap_base; -#endif - -struct machdep_calls ppc_md; - -/* - * These are used in binfmt_elf.c to put aux entries on the stack - * for each elf executable being started. - */ -int dcache_bsize; -int icache_bsize; -int ucache_bsize; - -#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_FB_VGA16) || \ - defined(CONFIG_FB_VGA16_MODULE) || defined(CONFIG_FB_VESA) -struct screen_info screen_info = { - 0, 25, /* orig-x, orig-y */ - 0, /* unused */ - 0, /* orig-video-page */ - 0, /* orig-video-mode */ - 80, /* orig-video-cols */ - 0,0,0, /* ega_ax, ega_bx, ega_cx */ - 25, /* orig-video-lines */ - 1, /* orig-video-isVGA */ - 16 /* orig-video-points */ -}; -#endif /* CONFIG_VGA_CONSOLE || CONFIG_FB_VGA16 || CONFIG_FB_VESA */ - -void machine_restart(char *cmd) -{ -#ifdef CONFIG_NVRAM - nvram_sync(); -#endif - ppc_md.restart(cmd); -} - -void machine_power_off(void) -{ -#ifdef CONFIG_NVRAM - nvram_sync(); -#endif - ppc_md.power_off(); -} - -void machine_halt(void) -{ -#ifdef CONFIG_NVRAM - nvram_sync(); -#endif - ppc_md.halt(); -} - -void (*pm_power_off)(void) = machine_power_off; - -#ifdef CONFIG_TAU -extern u32 cpu_temp(unsigned long cpu); -extern u32 cpu_temp_both(unsigned long cpu); -#endif /* CONFIG_TAU */ - -int show_cpuinfo(struct seq_file *m, void *v) -{ - int i = (int) v - 1; - int err = 0; - unsigned int pvr; - unsigned short maj, min; - unsigned long lpj; - - if (i >= NR_CPUS) { - /* Show summary information */ -#ifdef CONFIG_SMP - unsigned long bogosum = 0; - for (i = 0; i < NR_CPUS; ++i) - if (cpu_online(i)) - bogosum += cpu_data[i].loops_per_jiffy; - seq_printf(m, "total bogomips\t: %lu.%02lu\n", - bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); -#endif /* CONFIG_SMP */ - - if (ppc_md.show_cpuinfo != NULL) - err = ppc_md.show_cpuinfo(m); - return err; - } - -#ifdef CONFIG_SMP - if (!cpu_online(i)) - return 0; - pvr = cpu_data[i].pvr; - lpj = cpu_data[i].loops_per_jiffy; -#else - pvr = mfspr(SPRN_PVR); - lpj = loops_per_jiffy; -#endif - - seq_printf(m, "processor\t: %d\n", i); - seq_printf(m, "cpu\t\t: "); - - if (cur_cpu_spec->pvr_mask) - seq_printf(m, "%s", cur_cpu_spec->cpu_name); - else - seq_printf(m, "unknown (%08x)", pvr); -#ifdef CONFIG_ALTIVEC - if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC) - seq_printf(m, ", altivec supported"); -#endif - seq_printf(m, "\n"); - -#ifdef CONFIG_TAU - if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) { -#ifdef CONFIG_TAU_AVERAGE - /* more straightforward, but potentially misleading */ - seq_printf(m, "temperature \t: %u C (uncalibrated)\n", - cpu_temp(i)); -#else - /* show the actual temp sensor range */ - u32 temp; - temp = cpu_temp_both(i); - seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n", - temp & 0xff, temp >> 16); -#endif - } -#endif /* CONFIG_TAU */ - - if (ppc_md.show_percpuinfo != NULL) { - err = ppc_md.show_percpuinfo(m, i); - if (err) - return err; - } - - /* If we are a Freescale core do a simple check so - * we dont have to keep adding cases in the future */ - if ((PVR_VER(pvr) & 0x8000) == 0x8000) { - maj = PVR_MAJ(pvr); - min = PVR_MIN(pvr); - } else { - switch (PVR_VER(pvr)) { - case 0x0020: /* 403 family */ - maj = PVR_MAJ(pvr) + 1; - min = PVR_MIN(pvr); - break; - case 0x1008: /* 740P/750P ?? */ - maj = ((pvr >> 8) & 0xFF) - 1; - min = pvr & 0xFF; - break; - default: - maj = (pvr >> 8) & 0xFF; - min = pvr & 0xFF; - break; - } - } - - seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n", - maj, min, PVR_VER(pvr), PVR_REV(pvr)); - - seq_printf(m, "bogomips\t: %lu.%02lu\n", - lpj / (500000/HZ), (lpj / (5000/HZ)) % 100); - -#if USES_PPC_SYS - if (cur_ppc_sys_spec->ppc_sys_name) - seq_printf(m, "chipset\t\t: %s\n", - cur_ppc_sys_spec->ppc_sys_name); -#endif - -#ifdef CONFIG_SMP - seq_printf(m, "\n"); -#endif - - return 0; -} - -static void *c_start(struct seq_file *m, loff_t *pos) -{ - int i = *pos; - - return i <= NR_CPUS? (void *) (i + 1): NULL; -} - -static void *c_next(struct seq_file *m, void *v, loff_t *pos) -{ - ++*pos; - return c_start(m, pos); -} - -static void c_stop(struct seq_file *m, void *v) -{ -} - -struct seq_operations cpuinfo_op = { - .start =c_start, - .next = c_next, - .stop = c_stop, - .show = show_cpuinfo, -}; - -/* - * We're called here very early in the boot. We determine the machine - * type and call the appropriate low-level setup functions. - * -- Cort <cort@fsmlabs.com> - * - * Note that the kernel may be running at an address which is different - * from the address that it was linked at, so we must use RELOC/PTRRELOC - * to access static data (including strings). -- paulus - */ -__init -unsigned long -early_init(int r3, int r4, int r5) -{ - unsigned long phys; - unsigned long offset = reloc_offset(); - - /* Default */ - phys = offset + KERNELBASE; - - /* First zero the BSS -- use memset, some arches don't have - * caches on yet */ - memset_io(PTRRELOC(&__bss_start), 0, _end - __bss_start); - - /* - * Identify the CPU type and fix up code sections - * that depend on which cpu we have. - */ - identify_cpu(offset, 0); - do_cpu_ftr_fixups(offset); - -#if defined(CONFIG_PPC_MULTIPLATFORM) - reloc_got2(offset); - - /* If we came here from BootX, clear the screen, - * set up some pointers and return. */ - if ((r3 == 0x426f6f58) && (r5 == 0)) - bootx_init(r4, phys); - - /* - * don't do anything on prep - * for now, don't use bootinfo because it breaks yaboot 0.5 - * and assume that if we didn't find a magic number, we have OF - */ - else if (*(unsigned long *)(0) != 0xdeadc0de) - phys = prom_init(r3, r4, (prom_entry)r5); - - reloc_got2(-offset); -#endif - - return phys; -} - -#ifdef CONFIG_PPC_OF -/* - * Assume here that all clock rates are the same in a - * smp system. -- Cort - */ -int -of_show_percpuinfo(struct seq_file *m, int i) -{ - struct device_node *cpu_node; - u32 *fp; - int s; - - cpu_node = find_type_devices("cpu"); - if (!cpu_node) - return 0; - for (s = 0; s < i && cpu_node->next; s++) - cpu_node = cpu_node->next; - fp = (u32 *)get_property(cpu_node, "clock-frequency", NULL); - if (fp) - seq_printf(m, "clock\t\t: %dMHz\n", *fp / 1000000); - return 0; -} - -void __init -intuit_machine_type(void) -{ - char *model; - struct device_node *root; - - /* ask the OF info if we're a chrp or pmac */ - root = find_path_device("/"); - if (root != 0) { - /* assume pmac unless proven to be chrp -- Cort */ - _machine = _MACH_Pmac; - model = get_property(root, "device_type", NULL); - if (model && !strncmp("chrp", model, 4)) - _machine = _MACH_chrp; - else { - model = get_property(root, "model", NULL); - if (model && !strncmp(model, "IBM", 3)) - _machine = _MACH_chrp; - } - } -} -#endif - -#ifdef CONFIG_PPC_MULTIPLATFORM -/* - * The PPC_MULTIPLATFORM version of platform_init... - */ -void __init -platform_init(unsigned long r3, unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7) -{ -#ifdef CONFIG_BOOTX_TEXT - if (boot_text_mapped) { - btext_clearscreen(); - btext_welcome(); - } -#endif - - parse_bootinfo(find_bootinfo()); - - /* if we didn't get any bootinfo telling us what we are... */ - if (_machine == 0) { - /* prep boot loader tells us if we're prep or not */ - if ( *(unsigned long *)(KERNELBASE) == (0xdeadc0de) ) - _machine = _MACH_prep; - } - -#ifdef CONFIG_PPC_PREP - /* not much more to do here, if prep */ - if (_machine == _MACH_prep) { - prep_init(r3, r4, r5, r6, r7); - return; - } -#endif - - have_of = 1; - - /* prom_init has already been called from __start */ - if (boot_infos) - relocate_nodes(); - - /* If we aren't PReP, we can find out if we're Pmac - * or CHRP with this. */ - if (_machine == 0) - intuit_machine_type(); - - /* finish_device_tree may need _machine defined. */ - finish_device_tree(); - - /* - * If we were booted via quik, r3 points to the physical - * address of the command-line parameters. - * If we were booted from an xcoff image (i.e. netbooted or - * booted from floppy), we get the command line from the - * bootargs property of the /chosen node. - * If an initial ramdisk is present, r3 and r4 - * are used for initrd_start and initrd_size, - * otherwise they contain 0xdeadbeef. - */ - if (r3 >= 0x4000 && r3 < 0x800000 && r4 == 0) { - strlcpy(cmd_line, (char *)r3 + KERNELBASE, - sizeof(cmd_line)); - } else if (boot_infos != 0) { - /* booted by BootX - check for ramdisk */ - if (boot_infos->kernelParamsOffset != 0) - strlcpy(cmd_line, (char *) boot_infos - + boot_infos->kernelParamsOffset, - sizeof(cmd_line)); -#ifdef CONFIG_BLK_DEV_INITRD - if (boot_infos->ramDisk) { - initrd_start = (unsigned long) boot_infos - + boot_infos->ramDisk; - initrd_end = initrd_start + boot_infos->ramDiskSize; - initrd_below_start_ok = 1; - } -#endif - } else { - struct device_node *chosen; - char *p; - -#ifdef CONFIG_BLK_DEV_INITRD - if (r3 && r4 && r4 != 0xdeadbeef) { - if (r3 < KERNELBASE) - r3 += KERNELBASE; - initrd_start = r3; - initrd_end = r3 + r4; - ROOT_DEV = Root_RAM0; - initrd_below_start_ok = 1; - } -#endif - chosen = find_devices("chosen"); - if (chosen != NULL) { - p = get_property(chosen, "bootargs", NULL); - if (p && *p) { - strlcpy(cmd_line, p, sizeof(cmd_line)); - } - } - } -#ifdef CONFIG_ADB - if (strstr(cmd_line, "adb_sync")) { - extern int __adb_probe_sync; - __adb_probe_sync = 1; - } -#endif /* CONFIG_ADB */ - - switch (_machine) { -#ifdef CONFIG_PPC_PMAC - case _MACH_Pmac: - pmac_init(r3, r4, r5, r6, r7); - break; -#endif -#ifdef CONFIG_PPC_CHRP - case _MACH_chrp: - chrp_init(r3, r4, r5, r6, r7); - break; -#endif - } -} - -#ifdef CONFIG_SERIAL_CORE_CONSOLE -extern char *of_stdout_device; - -static int __init set_preferred_console(void) -{ - struct device_node *prom_stdout; - char *name; - int offset = 0; - - if (of_stdout_device == NULL) - return -ENODEV; - - /* The user has requested a console so this is already set up. */ - if (strstr(saved_command_line, "console=")) - return -EBUSY; - - prom_stdout = find_path_device(of_stdout_device); - if (!prom_stdout) - return -ENODEV; - - name = (char *)get_property(prom_stdout, "name", NULL); - if (!name) - return -ENODEV; - - if (strcmp(name, "serial") == 0) { - int i; - u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i); - if (i > 8) { - switch (reg[1]) { - case 0x3f8: - offset = 0; - break; - case 0x2f8: - offset = 1; - break; - case 0x898: - offset = 2; - break; - case 0x890: - offset = 3; - break; - default: - /* We dont recognise the serial port */ - return -ENODEV; - } - } - } else if (strcmp(name, "ch-a") == 0) - offset = 0; - else if (strcmp(name, "ch-b") == 0) - offset = 1; - else - return -ENODEV; - return add_preferred_console("ttyS", offset, NULL); -} -console_initcall(set_preferred_console); -#endif /* CONFIG_SERIAL_CORE_CONSOLE */ -#endif /* CONFIG_PPC_MULTIPLATFORM */ - -struct bi_record *find_bootinfo(void) -{ - struct bi_record *rec; - - rec = (struct bi_record *)_ALIGN((ulong)__bss_start+(1<<20)-1,(1<<20)); - if ( rec->tag != BI_FIRST ) { - /* - * This 0x10000 offset is a terrible hack but it will go away when - * we have the bootloader handle all the relocation and - * prom calls -- Cort - */ - rec = (struct bi_record *)_ALIGN((ulong)__bss_start+0x10000+(1<<20)-1,(1<<20)); - if ( rec->tag != BI_FIRST ) - return NULL; - } - return rec; -} - -void parse_bootinfo(struct bi_record *rec) -{ - if (rec == NULL || rec->tag != BI_FIRST) - return; - while (rec->tag != BI_LAST) { - ulong *data = rec->data; - switch (rec->tag) { - case BI_CMD_LINE: - strlcpy(cmd_line, (void *)data, sizeof(cmd_line)); - break; -#ifdef CONFIG_BLK_DEV_INITRD - case BI_INITRD: - initrd_start = data[0] + KERNELBASE; - initrd_end = data[0] + data[1] + KERNELBASE; - break; -#endif /* CONFIG_BLK_DEV_INITRD */ -#ifdef CONFIG_PPC_MULTIPLATFORM - case BI_MACHTYPE: - _machine = data[0]; - break; -#endif - case BI_MEMSIZE: - boot_mem_size = data[0]; - break; - } - rec = (struct bi_record *)((ulong)rec + rec->size); - } -} - -/* - * Find out what kind of machine we're on and save any data we need - * from the early boot process (devtree is copied on pmac by prom_init()). - * This is called very early on the boot process, after a minimal - * MMU environment has been set up but before MMU_init is called. - */ -void __init -machine_init(unsigned long r3, unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7) -{ -#ifdef CONFIG_CMDLINE - strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line)); -#endif /* CONFIG_CMDLINE */ - -#ifdef CONFIG_6xx - ppc_md.power_save = ppc6xx_idle; -#endif -#ifdef CONFIG_POWER4 - ppc_md.power_save = power4_idle; -#endif - - platform_init(r3, r4, r5, r6, r7); - - if (ppc_md.progress) - ppc_md.progress("id mach(): done", 0x200); -} -#ifdef CONFIG_BOOKE_WDT -/* Checks wdt=x and wdt_period=xx command-line option */ -int __init early_parse_wdt(char *p) -{ - if (p && strncmp(p, "0", 1) != 0) - booke_wdt_enabled = 1; - - return 0; -} -early_param("wdt", early_parse_wdt); - -int __init early_parse_wdt_period (char *p) -{ - if (p) - booke_wdt_period = simple_strtoul(p, NULL, 0); - - return 0; -} -early_param("wdt_period", early_parse_wdt_period); -#endif /* CONFIG_BOOKE_WDT */ - -/* Checks "l2cr=xxxx" command-line option */ -int __init ppc_setup_l2cr(char *str) -{ - if (cpu_has_feature(CPU_FTR_L2CR)) { - unsigned long val = simple_strtoul(str, NULL, 0); - printk(KERN_INFO "l2cr set to %lx\n", val); - _set_L2CR(0); /* force invalidate by disable cache */ - _set_L2CR(val); /* and enable it */ - } - return 1; -} -__setup("l2cr=", ppc_setup_l2cr); - -#ifdef CONFIG_GENERIC_NVRAM - -/* Generic nvram hooks used by drivers/char/gen_nvram.c */ -unsigned char nvram_read_byte(int addr) -{ - if (ppc_md.nvram_read_val) - return ppc_md.nvram_read_val(addr); - return 0xff; -} -EXPORT_SYMBOL(nvram_read_byte); - -void nvram_write_byte(unsigned char val, int addr) -{ - if (ppc_md.nvram_write_val) - ppc_md.nvram_write_val(addr, val); -} -EXPORT_SYMBOL(nvram_write_byte); - -void nvram_sync(void) -{ - if (ppc_md.nvram_sync) - ppc_md.nvram_sync(); -} -EXPORT_SYMBOL(nvram_sync); - -#endif /* CONFIG_NVRAM */ - -static struct cpu cpu_devices[NR_CPUS]; - -int __init ppc_init(void) -{ - int i; - - /* clear the progress line */ - if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); - - /* register CPU devices */ - for (i = 0; i < NR_CPUS; i++) - if (cpu_possible(i)) - register_cpu(&cpu_devices[i], i, NULL); - - /* call platform init */ - if (ppc_md.init != NULL) { - ppc_md.init(); - } - return 0; -} - -arch_initcall(ppc_init); - -/* Warning, IO base is not yet inited */ -void __init setup_arch(char **cmdline_p) -{ - extern char *klimit; - extern void do_init_bootmem(void); - - /* so udelay does something sensible, assume <= 1000 bogomips */ - loops_per_jiffy = 500000000 / HZ; - -#ifdef CONFIG_PPC_MULTIPLATFORM - /* This could be called "early setup arch", it must be done - * now because xmon need it - */ - if (_machine == _MACH_Pmac) - pmac_feature_init(); /* New cool way */ -#endif - -#ifdef CONFIG_XMON - xmon_init(1); - if (strstr(cmd_line, "xmon")) - xmon(NULL); -#endif /* CONFIG_XMON */ - if ( ppc_md.progress ) ppc_md.progress("setup_arch: enter", 0x3eab); - -#if defined(CONFIG_KGDB) - if (ppc_md.kgdb_map_scc) - ppc_md.kgdb_map_scc(); - set_debug_traps(); - if (strstr(cmd_line, "gdb")) { - if (ppc_md.progress) - ppc_md.progress("setup_arch: kgdb breakpoint", 0x4000); - printk("kgdb breakpoint activated\n"); - breakpoint(); - } -#endif - - /* - * Set cache line size based on type of cpu as a default. - * Systems with OF can look in the properties on the cpu node(s) - * for a possibly more accurate value. - */ - if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) { - dcache_bsize = cur_cpu_spec->dcache_bsize; - icache_bsize = cur_cpu_spec->icache_bsize; - ucache_bsize = 0; - } else - ucache_bsize = dcache_bsize = icache_bsize - = cur_cpu_spec->dcache_bsize; - - /* reboot on panic */ - panic_timeout = 180; - - init_mm.start_code = PAGE_OFFSET; - init_mm.end_code = (unsigned long) _etext; - init_mm.end_data = (unsigned long) _edata; - init_mm.brk = (unsigned long) klimit; - - /* Save unparsed command line copy for /proc/cmdline */ - strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE); - *cmdline_p = cmd_line; - - parse_early_param(); - - /* set up the bootmem stuff with available memory */ - do_init_bootmem(); - if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab); - -#ifdef CONFIG_PPC_OCP - /* Initialize OCP device list */ - ocp_early_init(); - if ( ppc_md.progress ) ppc_md.progress("ocp: exit", 0x3eab); -#endif - -#ifdef CONFIG_DUMMY_CONSOLE - conswitchp = &dummy_con; -#endif - - ppc_md.setup_arch(); - if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); - - paging_init(); - - /* this is for modules since _machine can be a define -- Cort */ - ppc_md.ppc_machine = _machine; -} diff --git a/arch/ppc/kernel/smp-tbsync.c b/arch/ppc/kernel/smp-tbsync.c deleted file mode 100644 index 2c9cd95bcea..00000000000 --- a/arch/ppc/kernel/smp-tbsync.c +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Smp timebase synchronization for ppc. - * - * Copyright (C) 2003 Samuel Rydh (samuel@ibrium.se) - * - */ - -#include <linux/config.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/smp.h> -#include <linux/unistd.h> -#include <linux/init.h> -#include <asm/atomic.h> -#include <asm/smp.h> -#include <asm/time.h> - -#define NUM_ITER 300 - -enum { - kExit=0, kSetAndTest, kTest -}; - -static struct { - volatile int tbu; - volatile int tbl; - volatile int mark; - volatile int cmd; - volatile int handshake; - int filler[3]; - - volatile int ack; - int filler2[7]; - - volatile int race_result; -} *tbsync; - -static volatile int running; - -static void __devinit -enter_contest( int mark, int add ) -{ - while( (int)(get_tbl() - mark) < 0 ) - tbsync->race_result = add; -} - -void __devinit -smp_generic_take_timebase( void ) -{ - int cmd, tbl, tbu; - - local_irq_disable(); - while( !running ) - ; - rmb(); - - for( ;; ) { - tbsync->ack = 1; - while( !tbsync->handshake ) - ; - rmb(); - - cmd = tbsync->cmd; - tbl = tbsync->tbl; - tbu = tbsync->tbu; - tbsync->ack = 0; - if( cmd == kExit ) - return; - - if( cmd == kSetAndTest ) { - while( tbsync->handshake ) - ; - asm volatile ("mttbl %0" :: "r" (tbl) ); - asm volatile ("mttbu %0" :: "r" (tbu) ); - } else { - while( tbsync->handshake ) - ; - } - enter_contest( tbsync->mark, -1 ); - } - local_irq_enable(); -} - -static int __devinit -start_contest( int cmd, int offset, int num ) -{ - int i, tbu, tbl, mark, score=0; - - tbsync->cmd = cmd; - - local_irq_disable(); - for( i=-3; i<num; ) { - tbl = get_tbl() + 400; - tbsync->tbu = tbu = get_tbu(); - tbsync->tbl = tbl + offset; - tbsync->mark = mark = tbl + 400; - - wmb(); - - tbsync->handshake = 1; - while( tbsync->ack ) - ; - - while( (int)(get_tbl() - tbl) <= 0 ) - ; - tbsync->handshake = 0; - enter_contest( mark, 1 ); - - while( !tbsync->ack ) - ; - - if( tbsync->tbu != get_tbu() || ((tbsync->tbl ^ get_tbl()) & 0x80000000) ) - continue; - if( i++ > 0 ) - score += tbsync->race_result; - } - local_irq_enable(); - return score; -} - -void __devinit -smp_generic_give_timebase( void ) -{ - int i, score, score2, old, min=0, max=5000, offset=1000; - - printk("Synchronizing timebase\n"); - - /* if this fails then this kernel won't work anyway... */ - tbsync = kmalloc( sizeof(*tbsync), GFP_KERNEL ); - memset( tbsync, 0, sizeof(*tbsync) ); - mb(); - running = 1; - - while( !tbsync->ack ) - ; - - /* binary search */ - for( old=-1 ; old != offset ; offset=(min+max)/2 ) { - score = start_contest( kSetAndTest, offset, NUM_ITER ); - - printk("score %d, offset %d\n", score, offset ); - - if( score > 0 ) - max = offset; - else - min = offset; - old = offset; - } - score = start_contest( kSetAndTest, min, NUM_ITER ); - score2 = start_contest( kSetAndTest, max, NUM_ITER ); - - printk( "Min %d (score %d), Max %d (score %d)\n", min, score, max, score2 ); - score = abs( score ); - score2 = abs( score2 ); - offset = (score < score2) ? min : max; - - /* guard against inaccurate mttb */ - for( i=0; i<10; i++ ) { - start_contest( kSetAndTest, offset, NUM_ITER/10 ); - - if( (score2=start_contest(kTest, offset, NUM_ITER)) < 0 ) - score2 = -score2; - if( score2 <= score || score2 < 20 ) - break; - } - printk("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER ); - - /* exiting */ - tbsync->cmd = kExit; - wmb(); - tbsync->handshake = 1; - while( tbsync->ack ) - ; - tbsync->handshake = 0; - kfree( tbsync ); - tbsync = NULL; - running = 0; - - /* all done */ - smp_tb_synchronized = 1; -} diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c deleted file mode 100644 index bc5bf112483..00000000000 --- a/arch/ppc/kernel/smp.c +++ /dev/null @@ -1,411 +0,0 @@ -/* - * Smp support for ppc. - * - * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great - * deal of code from the sparc and intel versions. - * - * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> - * - */ - -#include <linux/config.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/smp.h> -#include <linux/smp_lock.h> -#include <linux/interrupt.h> -#include <linux/kernel_stat.h> -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/spinlock.h> -#include <linux/cache.h> - -#include <asm/ptrace.h> -#include <asm/atomic.h> -#include <asm/irq.h> -#include <asm/page.h> -#include <asm/pgtable.h> -#include <asm/io.h> -#include <asm/prom.h> -#include <asm/smp.h> -#include <asm/residual.h> -#include <asm/time.h> -#include <asm/thread_info.h> -#include <asm/tlbflush.h> -#include <asm/xmon.h> -#include <asm/machdep.h> - -volatile int smp_commenced; -int smp_tb_synchronized; -struct cpuinfo_PPC cpu_data[NR_CPUS]; -atomic_t ipi_recv; -atomic_t ipi_sent; -cpumask_t cpu_online_map; -cpumask_t cpu_possible_map; -int smp_hw_index[NR_CPUS]; -struct thread_info *secondary_ti; -static struct task_struct *idle_tasks[NR_CPUS]; - -EXPORT_SYMBOL(cpu_online_map); -EXPORT_SYMBOL(cpu_possible_map); - -/* SMP operations for this machine */ -struct smp_ops_t *smp_ops; - -/* all cpu mappings are 1-1 -- Cort */ -volatile unsigned long cpu_callin_map[NR_CPUS]; - -int start_secondary(void *); -void smp_call_function_interrupt(void); -static int __smp_call_function(void (*func) (void *info), void *info, - int wait, int target); - -/* Low level assembly function used to backup CPU 0 state */ -extern void __save_cpu_setup(void); - -/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. - * - * Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up - * in /proc/interrupts will be wrong!!! --Troy */ -#define PPC_MSG_CALL_FUNCTION 0 -#define PPC_MSG_RESCHEDULE 1 -#define PPC_MSG_INVALIDATE_TLB 2 -#define PPC_MSG_XMON_BREAK 3 - -static inline void -smp_message_pass(int target, int msg) -{ - if (smp_ops) { - atomic_inc(&ipi_sent); - smp_ops->message_pass(target, msg); - } -} - -/* - * Common functions - */ -void smp_message_recv(int msg, struct pt_regs *regs) -{ - atomic_inc(&ipi_recv); - - switch( msg ) { - case PPC_MSG_CALL_FUNCTION: - smp_call_function_interrupt(); - break; - case PPC_MSG_RESCHEDULE: - set_need_resched(); - break; - case PPC_MSG_INVALIDATE_TLB: - _tlbia(); - break; -#ifdef CONFIG_XMON - case PPC_MSG_XMON_BREAK: - xmon(regs); - break; -#endif /* CONFIG_XMON */ - default: - printk("SMP %d: smp_message_recv(): unknown msg %d\n", - smp_processor_id(), msg); - break; - } -} - -/* - * 750's don't broadcast tlb invalidates so - * we have to emulate that behavior. - * -- Cort - */ -void smp_send_tlb_invalidate(int cpu) -{ - if ( PVR_VER(mfspr(SPRN_PVR)) == 8 ) - smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_INVALIDATE_TLB); -} - -void smp_send_reschedule(int cpu) -{ - /* - * This is only used if `cpu' is running an idle task, - * so it will reschedule itself anyway... - * - * This isn't the case anymore since the other CPU could be - * sleeping and won't reschedule until the next interrupt (such - * as the timer). - * -- Cort - */ - /* This is only used if `cpu' is running an idle task, - so it will reschedule itself anyway... */ - smp_message_pass(cpu, PPC_MSG_RESCHEDULE); -} - -#ifdef CONFIG_XMON -void smp_send_xmon_break(int cpu) -{ - smp_message_pass(cpu, PPC_MSG_XMON_BREAK); -} -#endif /* CONFIG_XMON */ - -static void stop_this_cpu(void *dummy) -{ - local_irq_disable(); - while (1) - ; -} - -void smp_send_stop(void) -{ - smp_call_function(stop_this_cpu, NULL, 1, 0); -} - -/* - * Structure and data for smp_call_function(). This is designed to minimise - * static memory requirements. It also looks cleaner. - * Stolen from the i386 version. - */ -static DEFINE_SPINLOCK(call_lock); - -static struct call_data_struct { - void (*func) (void *info); - void *info; - atomic_t started; - atomic_t finished; - int wait; -} *call_data; - -/* - * this function sends a 'generic call function' IPI to all other CPUs - * in the system. - */ - -int smp_call_function(void (*func) (void *info), void *info, int nonatomic, - int wait) -/* - * [SUMMARY] Run a function on all other CPUs. - * <func> The function to run. This must be fast and non-blocking. - * <info> An arbitrary pointer to pass to the function. - * <nonatomic> currently unused. - * <wait> If true, wait (atomically) until function has completed on other CPUs. - * [RETURNS] 0 on success, else a negative status code. Does not return until - * remote CPUs are nearly ready to execute <<func>> or are or have executed. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -{ - /* FIXME: get cpu lock with hotplug cpus, or change this to - bitmask. --RR */ - if (num_online_cpus() <= 1) - return 0; - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - return __smp_call_function(func, info, wait, MSG_ALL_BUT_SELF); -} - -static int __smp_call_function(void (*func) (void *info), void *info, - int wait, int target) -{ - struct call_data_struct data; - int ret = -1; - int timeout; - int ncpus = 1; - - if (target == MSG_ALL_BUT_SELF) - ncpus = num_online_cpus() - 1; - else if (target == MSG_ALL) - ncpus = num_online_cpus(); - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - spin_lock(&call_lock); - call_data = &data; - /* Send a message to all other CPUs and wait for them to respond */ - smp_message_pass(target, PPC_MSG_CALL_FUNCTION); - - /* Wait for response */ - timeout = 1000000; - while (atomic_read(&data.started) != ncpus) { - if (--timeout == 0) { - printk("smp_call_function on cpu %d: other cpus not responding (%d)\n", - smp_processor_id(), atomic_read(&data.started)); - goto out; - } - barrier(); - udelay(1); - } - - if (wait) { - timeout = 1000000; - while (atomic_read(&data.finished) != ncpus) { - if (--timeout == 0) { - printk("smp_call_function on cpu %d: other cpus not finishing (%d/%d)\n", - smp_processor_id(), atomic_read(&data.finished), atomic_read(&data.started)); - goto out; - } - barrier(); - udelay(1); - } - } - ret = 0; - - out: - spin_unlock(&call_lock); - return ret; -} - -void smp_call_function_interrupt(void) -{ - void (*func) (void *info) = call_data->func; - void *info = call_data->info; - int wait = call_data->wait; - - /* - * Notify initiating CPU that I've grabbed the data and am - * about to execute the function - */ - atomic_inc(&call_data->started); - /* - * At this point the info structure may be out of scope unless wait==1 - */ - (*func)(info); - if (wait) - atomic_inc(&call_data->finished); -} - -static void __devinit smp_store_cpu_info(int id) -{ - struct cpuinfo_PPC *c = &cpu_data[id]; - - /* assume bogomips are same for everything */ - c->loops_per_jiffy = loops_per_jiffy; - c->pvr = mfspr(SPRN_PVR); -} - -void __init smp_prepare_cpus(unsigned int max_cpus) -{ - int num_cpus, i, cpu; - struct task_struct *p; - - /* Fixup boot cpu */ - smp_store_cpu_info(smp_processor_id()); - cpu_callin_map[smp_processor_id()] = 1; - - if (smp_ops == NULL) { - printk("SMP not supported on this machine.\n"); - return; - } - - /* Probe platform for CPUs: always linear. */ - num_cpus = smp_ops->probe(); - for (i = 0; i < num_cpus; ++i) - cpu_set(i, cpu_possible_map); - - /* Backup CPU 0 state */ - __save_cpu_setup(); - - for_each_cpu(cpu) { - if (cpu == smp_processor_id()) - continue; - /* create a process for the processor */ - p = fork_idle(cpu); - if (IS_ERR(p)) - panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); - p->thread_info->cpu = cpu; - idle_tasks[cpu] = p; - } -} - -void __devinit smp_prepare_boot_cpu(void) -{ - cpu_set(smp_processor_id(), cpu_online_map); - cpu_set(smp_processor_id(), cpu_possible_map); -} - -int __init setup_profiling_timer(unsigned int multiplier) -{ - return 0; -} - -/* Processor coming up starts here */ -int __devinit start_secondary(void *unused) -{ - int cpu; - - atomic_inc(&init_mm.mm_count); - current->active_mm = &init_mm; - - cpu = smp_processor_id(); - smp_store_cpu_info(cpu); - set_dec(tb_ticks_per_jiffy); - cpu_callin_map[cpu] = 1; - - printk("CPU %d done callin...\n", cpu); - smp_ops->setup_cpu(cpu); - printk("CPU %d done setup...\n", cpu); - smp_ops->take_timebase(); - printk("CPU %d done timebase take...\n", cpu); - - spin_lock(&call_lock); - cpu_set(cpu, cpu_online_map); - spin_unlock(&call_lock); - - local_irq_enable(); - - cpu_idle(); - return 0; -} - -int __cpu_up(unsigned int cpu) -{ - char buf[32]; - int c; - - secondary_ti = idle_tasks[cpu]->thread_info; - mb(); - - /* - * There was a cache flush loop here to flush the cache - * to memory for the first 8MB of RAM. The cache flush - * has been pushed into the kick_cpu function for those - * platforms that need it. - */ - - /* wake up cpu */ - smp_ops->kick_cpu(cpu); - - /* - * wait to see if the cpu made a callin (is actually up). - * use this value that I found through experimentation. - * -- Cort - */ - for (c = 1000; c && !cpu_callin_map[cpu]; c--) - udelay(100); - - if (!cpu_callin_map[cpu]) { - sprintf(buf, "didn't find cpu %u", cpu); - if (ppc_md.progress) ppc_md.progress(buf, 0x360+cpu); - printk("Processor %u is stuck.\n", cpu); - return -ENOENT; - } - - sprintf(buf, "found cpu %u", cpu); - if (ppc_md.progress) ppc_md.progress(buf, 0x350+cpu); - printk("Processor %d found.\n", cpu); - - smp_ops->give_timebase(); - - /* Wait until cpu puts itself in the online map */ - while (!cpu_online(cpu)) - cpu_relax(); - - return 0; -} - -void smp_cpus_done(unsigned int max_cpus) -{ - smp_ops->setup_cpu(0); -} diff --git a/arch/ppc/kernel/softemu8xx.c b/arch/ppc/kernel/softemu8xx.c deleted file mode 100644 index 9bbb6bf7b64..00000000000 --- a/arch/ppc/kernel/softemu8xx.c +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Software emulation of some PPC instructions for the 8xx core. - * - * Copyright (C) 1998 Dan Malek (dmalek@jlc.net) - * - * Software floating emuation for the MPC8xx processor. I did this mostly - * because it was easier than trying to get the libraries compiled for - * software floating point. The goal is still to get the libraries done, - * but I lost patience and needed some hacks to at least get init and - * shells running. The first problem is the setjmp/longjmp that save - * and restore the floating point registers. - * - * For this emulation, our working registers are found on the register - * save area. - */ - -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/stddef.h> -#include <linux/unistd.h> -#include <linux/ptrace.h> -#include <linux/slab.h> -#include <linux/user.h> -#include <linux/a.out.h> -#include <linux/interrupt.h> - -#include <asm/pgtable.h> -#include <asm/uaccess.h> -#include <asm/system.h> -#include <asm/io.h> - -extern void -print_8xx_pte(struct mm_struct *mm, unsigned long addr); -extern int -get_8xx_pte(struct mm_struct *mm, unsigned long addr); - -/* Eventually we may need a look-up table, but this works for now. -*/ -#define LFS 48 -#define LFD 50 -#define LFDU 51 -#define STFD 54 -#define STFDU 55 -#define FMR 63 - -/* - * We return 0 on success, 1 on unimplemented instruction, and EFAULT - * if a load/store faulted. - */ -int -Soft_emulate_8xx(struct pt_regs *regs) -{ - uint inst, instword; - uint flreg, idxreg, disp; - uint retval; - signed short sdisp; - uint *ea, *ip; - - retval = 0; - - instword = *((uint *)regs->nip); - inst = instword >> 26; - - flreg = (instword >> 21) & 0x1f; - idxreg = (instword >> 16) & 0x1f; - disp = instword & 0xffff; - - ea = (uint *)(regs->gpr[idxreg] + disp); - ip = (uint *)¤t->thread.fpr[flreg]; - - switch ( inst ) - { - case LFD: - /* this is a 16 bit quantity that is sign extended - * so use a signed short here -- Cort - */ - sdisp = (instword & 0xffff); - ea = (uint *)(regs->gpr[idxreg] + sdisp); - if (copy_from_user(ip, ea, sizeof(double))) - retval = -EFAULT; - break; - - case LFDU: - if (copy_from_user(ip, ea, sizeof(double))) - retval = -EFAULT; - else - regs->gpr[idxreg] = (uint)ea; - break; - case LFS: - sdisp = (instword & 0xffff); - ea = (uint *)(regs->gpr[idxreg] + sdisp); - if (copy_from_user(ip, ea, sizeof(float))) - retval = -EFAULT; - break; - case STFD: - /* this is a 16 bit quantity that is sign extended - * so use a signed short here -- Cort - */ - sdisp = (instword & 0xffff); - ea = (uint *)(regs->gpr[idxreg] + sdisp); - if (copy_to_user(ea, ip, sizeof(double))) - retval = -EFAULT; - break; - - case STFDU: - if (copy_to_user(ea, ip, sizeof(double))) - retval = -EFAULT; - else - regs->gpr[idxreg] = (uint)ea; - break; - case FMR: - /* assume this is a fp move -- Cort */ - memcpy( ip, ¤t->thread.fpr[(instword>>11)&0x1f], - sizeof(double) ); - break; - default: - retval = 1; - printk("Bad emulation %s/%d\n" - " NIP: %08lx instruction: %08x opcode: %x " - "A: %x B: %x C: %x code: %x rc: %x\n", - current->comm,current->pid, - regs->nip, - instword,inst, - (instword>>16)&0x1f, - (instword>>11)&0x1f, - (instword>>6)&0x1f, - (instword>>1)&0x3ff, - instword&1); - { - int pa; - print_8xx_pte(current->mm,regs->nip); - pa = get_8xx_pte(current->mm,regs->nip) & PAGE_MASK; - pa |= (regs->nip & ~PAGE_MASK); - pa = (unsigned long)__va(pa); - printk("Kernel VA for NIP %x ", pa); - print_8xx_pte(current->mm,pa); - } - - } - - if (retval == 0) - regs->nip += 4; - return(retval); -} - diff --git a/arch/ppc/kernel/swsusp.S b/arch/ppc/kernel/swsusp.S deleted file mode 100644 index 69773cc1a85..00000000000 --- a/arch/ppc/kernel/swsusp.S +++ /dev/null @@ -1,349 +0,0 @@ -#include <linux/config.h> -#include <linux/threads.h> -#include <asm/processor.h> -#include <asm/page.h> -#include <asm/cputable.h> -#include <asm/thread_info.h> -#include <asm/ppc_asm.h> -#include <asm/asm-offsets.h> - - -/* - * Structure for storing CPU registers on the save area. - */ -#define SL_SP 0 -#define SL_PC 4 -#define SL_MSR 8 -#define SL_SDR1 0xc -#define SL_SPRG0 0x10 /* 4 sprg's */ -#define SL_DBAT0 0x20 -#define SL_IBAT0 0x28 -#define SL_DBAT1 0x30 -#define SL_IBAT1 0x38 -#define SL_DBAT2 0x40 -#define SL_IBAT2 0x48 -#define SL_DBAT3 0x50 -#define SL_IBAT3 0x58 -#define SL_TB 0x60 -#define SL_R2 0x68 -#define SL_CR 0x6c -#define SL_LR 0x70 -#define SL_R12 0x74 /* r12 to r31 */ -#define SL_SIZE (SL_R12 + 80) - - .section .data - .align 5 - -_GLOBAL(swsusp_save_area) - .space SL_SIZE - - - .section .text - .align 5 - -_GLOBAL(swsusp_arch_suspend) - - lis r11,swsusp_save_area@h - ori r11,r11,swsusp_save_area@l - - mflr r0 - stw r0,SL_LR(r11) - mfcr r0 - stw r0,SL_CR(r11) - stw r1,SL_SP(r11) - stw r2,SL_R2(r11) - stmw r12,SL_R12(r11) - - /* Save MSR & SDR1 */ - mfmsr r4 - stw r4,SL_MSR(r11) - mfsdr1 r4 - stw r4,SL_SDR1(r11) - - /* Get a stable timebase and save it */ -1: mftbu r4 - stw r4,SL_TB(r11) - mftb r5 - stw r5,SL_TB+4(r11) - mftbu r3 - cmpw r3,r4 - bne 1b - - /* Save SPRGs */ - mfsprg r4,0 - stw r4,SL_SPRG0(r11) - mfsprg r4,1 - stw r4,SL_SPRG0+4(r11) - mfsprg r4,2 - stw r4,SL_SPRG0+8(r11) - mfsprg r4,3 - stw r4,SL_SPRG0+12(r11) - - /* Save BATs */ - mfdbatu r4,0 - stw r4,SL_DBAT0(r11) - mfdbatl r4,0 - stw r4,SL_DBAT0+4(r11) - mfdbatu r4,1 - stw r4,SL_DBAT1(r11) - mfdbatl r4,1 - stw r4,SL_DBAT1+4(r11) - mfdbatu r4,2 - stw r4,SL_DBAT2(r11) - mfdbatl r4,2 - stw r4,SL_DBAT2+4(r11) - mfdbatu r4,3 - stw r4,SL_DBAT3(r11) - mfdbatl r4,3 - stw r4,SL_DBAT3+4(r11) - mfibatu r4,0 - stw r4,SL_IBAT0(r11) - mfibatl r4,0 - stw r4,SL_IBAT0+4(r11) - mfibatu r4,1 - stw r4,SL_IBAT1(r11) - mfibatl r4,1 - stw r4,SL_IBAT1+4(r11) - mfibatu r4,2 - stw r4,SL_IBAT2(r11) - mfibatl r4,2 - stw r4,SL_IBAT2+4(r11) - mfibatu r4,3 - stw r4,SL_IBAT3(r11) - mfibatl r4,3 - stw r4,SL_IBAT3+4(r11) - -#if 0 - /* Backup various CPU config stuffs */ - bl __save_cpu_setup -#endif - /* Call the low level suspend stuff (we should probably have made - * a stackframe... - */ - bl swsusp_save - - /* Restore LR from the save area */ - lis r11,swsusp_save_area@h - ori r11,r11,swsusp_save_area@l - lwz r0,SL_LR(r11) - mtlr r0 - - blr - - -/* Resume code */ -_GLOBAL(swsusp_arch_resume) - - /* Stop pending alitvec streams and memory accesses */ -BEGIN_FTR_SECTION - DSSALL -END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - sync - - /* Disable MSR:DR to make sure we don't take a TLB or - * hash miss during the copy, as our hash table will - * for a while be unuseable. For .text, we assume we are - * covered by a BAT. This works only for non-G5 at this - * point. G5 will need a better approach, possibly using - * a small temporary hash table filled with large mappings, - * disabling the MMU completely isn't a good option for - * performance reasons. - * (Note that 750's may have the same performance issue as - * the G5 in this case, we should investigate using moving - * BATs for these CPUs) - */ - mfmsr r0 - sync - rlwinm r0,r0,0,28,26 /* clear MSR_DR */ - mtmsr r0 - sync - isync - - /* Load ptr the list of pages to copy in r3 */ - lis r11,(pagedir_nosave - KERNELBASE)@h - ori r11,r11,pagedir_nosave@l - lwz r10,0(r11) - - /* Copy the pages. This is a very basic implementation, to - * be replaced by something more cache efficient */ -1: - tophys(r3,r10) - li r0,256 - mtctr r0 - lwz r11,pbe_address(r3) /* source */ - tophys(r5,r11) - lwz r10,pbe_orig_address(r3) /* destination */ - tophys(r6,r10) -2: - lwz r8,0(r5) - lwz r9,4(r5) - lwz r10,8(r5) - lwz r11,12(r5) - addi r5,r5,16 - stw r8,0(r6) - stw r9,4(r6) - stw r10,8(r6) - stw r11,12(r6) - addi r6,r6,16 - bdnz 2b - lwz r10,pbe_next(r3) - cmpwi 0,r10,0 - bne 1b - - /* Do a very simple cache flush/inval of the L1 to ensure - * coherency of the icache - */ - lis r3,0x0002 - mtctr r3 - li r3, 0 -1: - lwz r0,0(r3) - addi r3,r3,0x0020 - bdnz 1b - isync - sync - - /* Now flush those cache lines */ - lis r3,0x0002 - mtctr r3 - li r3, 0 -1: - dcbf 0,r3 - addi r3,r3,0x0020 - bdnz 1b - sync - - /* Ok, we are now running with the kernel data of the old - * kernel fully restored. We can get to the save area - * easily now. As for the rest of the code, it assumes the - * loader kernel and the booted one are exactly identical - */ - lis r11,swsusp_save_area@h - ori r11,r11,swsusp_save_area@l - tophys(r11,r11) - -#if 0 - /* Restore various CPU config stuffs */ - bl __restore_cpu_setup -#endif - /* Restore the BATs, and SDR1. Then we can turn on the MMU. - * This is a bit hairy as we are running out of those BATs, - * but first, our code is probably in the icache, and we are - * writing the same value to the BAT, so that should be fine, - * though a better solution will have to be found long-term - */ - lwz r4,SL_SDR1(r11) - mtsdr1 r4 - lwz r4,SL_SPRG0(r11) - mtsprg 0,r4 - lwz r4,SL_SPRG0+4(r11) - mtsprg 1,r4 - lwz r4,SL_SPRG0+8(r11) - mtsprg 2,r4 - lwz r4,SL_SPRG0+12(r11) - mtsprg 3,r4 - -#if 0 - lwz r4,SL_DBAT0(r11) - mtdbatu 0,r4 - lwz r4,SL_DBAT0+4(r11) - mtdbatl 0,r4 - lwz r4,SL_DBAT1(r11) - mtdbatu 1,r4 - lwz r4,SL_DBAT1+4(r11) - mtdbatl 1,r4 - lwz r4,SL_DBAT2(r11) - mtdbatu 2,r4 - lwz r4,SL_DBAT2+4(r11) - mtdbatl 2,r4 - lwz r4,SL_DBAT3(r11) - mtdbatu 3,r4 - lwz r4,SL_DBAT3+4(r11) - mtdbatl 3,r4 - lwz r4,SL_IBAT0(r11) - mtibatu 0,r4 - lwz r4,SL_IBAT0+4(r11) - mtibatl 0,r4 - lwz r4,SL_IBAT1(r11) - mtibatu 1,r4 - lwz r4,SL_IBAT1+4(r11) - mtibatl 1,r4 - lwz r4,SL_IBAT2(r11) - mtibatu 2,r4 - lwz r4,SL_IBAT2+4(r11) - mtibatl 2,r4 - lwz r4,SL_IBAT3(r11) - mtibatu 3,r4 - lwz r4,SL_IBAT3+4(r11) - mtibatl 3,r4 -#endif - -BEGIN_FTR_SECTION - li r4,0 - mtspr SPRN_DBAT4U,r4 - mtspr SPRN_DBAT4L,r4 - mtspr SPRN_DBAT5U,r4 - mtspr SPRN_DBAT5L,r4 - mtspr SPRN_DBAT6U,r4 - mtspr SPRN_DBAT6L,r4 - mtspr SPRN_DBAT7U,r4 - mtspr SPRN_DBAT7L,r4 - mtspr SPRN_IBAT4U,r4 - mtspr SPRN_IBAT4L,r4 - mtspr SPRN_IBAT5U,r4 - mtspr SPRN_IBAT5L,r4 - mtspr SPRN_IBAT6U,r4 - mtspr SPRN_IBAT6L,r4 - mtspr SPRN_IBAT7U,r4 - mtspr SPRN_IBAT7L,r4 -END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) - - /* Flush all TLBs */ - lis r4,0x1000 -1: addic. r4,r4,-0x1000 - tlbie r4 - blt 1b - sync - - /* restore the MSR and turn on the MMU */ - lwz r3,SL_MSR(r11) - bl turn_on_mmu - tovirt(r11,r11) - - /* Restore TB */ - li r3,0 - mttbl r3 - lwz r3,SL_TB(r11) - lwz r4,SL_TB+4(r11) - mttbu r3 - mttbl r4 - - /* Kick decrementer */ - li r0,1 - mtdec r0 - - /* Restore the callee-saved registers and return */ - lwz r0,SL_CR(r11) - mtcr r0 - lwz r2,SL_R2(r11) - lmw r12,SL_R12(r11) - lwz r1,SL_SP(r11) - lwz r0,SL_LR(r11) - mtlr r0 - - // XXX Note: we don't really need to call swsusp_resume - - li r3,0 - blr - -/* FIXME:This construct is actually not useful since we don't shut - * down the instruction MMU, we could just flip back MSR-DR on. - */ -turn_on_mmu: - mflr r4 - mtsrr0 r4 - mtsrr1 r3 - sync - isync - rfi - diff --git a/arch/ppc/kernel/temp.c b/arch/ppc/kernel/temp.c deleted file mode 100644 index 26bd8ea35a4..00000000000 --- a/arch/ppc/kernel/temp.c +++ /dev/null @@ -1,271 +0,0 @@ -/* - * temp.c Thermal management for cpu's with Thermal Assist Units - * - * Written by Troy Benjegerdes <hozer@drgw.net> - * - * TODO: - * dynamic power management to limit peak CPU temp (using ICTC) - * calibration??? - * - * Silly, crazy ideas: use cpu load (from scheduler) and ICTC to extend battery - * life in portables, and add a 'performance/watt' metric somewhere in /proc - */ - -#include <linux/config.h> -#include <linux/errno.h> -#include <linux/jiffies.h> -#include <linux/kernel.h> -#include <linux/param.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/init.h> - -#include <asm/io.h> -#include <asm/reg.h> -#include <asm/nvram.h> -#include <asm/cache.h> -#include <asm/8xx_immap.h> -#include <asm/machdep.h> - -static struct tau_temp -{ - int interrupts; - unsigned char low; - unsigned char high; - unsigned char grew; -} tau[NR_CPUS]; - -struct timer_list tau_timer; - -#undef DEBUG - -/* TODO: put these in a /proc interface, with some sanity checks, and maybe - * dynamic adjustment to minimize # of interrupts */ -/* configurable values for step size and how much to expand the window when - * we get an interrupt. These are based on the limit that was out of range */ -#define step_size 2 /* step size when temp goes out of range */ -#define window_expand 1 /* expand the window by this much */ -/* configurable values for shrinking the window */ -#define shrink_timer 2*HZ /* period between shrinking the window */ -#define min_window 2 /* minimum window size, degrees C */ - -void set_thresholds(unsigned long cpu) -{ -#ifdef CONFIG_TAU_INT - /* - * setup THRM1, - * threshold, valid bit, enable interrupts, interrupt when below threshold - */ - mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID); - - /* setup THRM2, - * threshold, valid bit, enable interrupts, interrupt when above threshhold - */ - mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE); -#else - /* same thing but don't enable interrupts */ - mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID); - mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V); -#endif -} - -void TAUupdate(int cpu) -{ - unsigned thrm; - -#ifdef DEBUG - printk("TAUupdate "); -#endif - - /* if both thresholds are crossed, the step_sizes cancel out - * and the window winds up getting expanded twice. */ - if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */ - if(thrm & THRM1_TIN){ /* crossed low threshold */ - if (tau[cpu].low >= step_size){ - tau[cpu].low -= step_size; - tau[cpu].high -= (step_size - window_expand); - } - tau[cpu].grew = 1; -#ifdef DEBUG - printk("low threshold crossed "); -#endif - } - } - if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */ - if(thrm & THRM1_TIN){ /* crossed high threshold */ - if (tau[cpu].high <= 127-step_size){ - tau[cpu].low += (step_size - window_expand); - tau[cpu].high += step_size; - } - tau[cpu].grew = 1; -#ifdef DEBUG - printk("high threshold crossed "); -#endif - } - } - -#ifdef DEBUG - printk("grew = %d\n", tau[cpu].grew); -#endif - -#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */ - set_thresholds(cpu); -#endif - -} - -#ifdef CONFIG_TAU_INT -/* - * TAU interrupts - called when we have a thermal assist unit interrupt - * with interrupts disabled - */ - -void TAUException(struct pt_regs * regs) -{ - int cpu = smp_processor_id(); - - irq_enter(); - tau[cpu].interrupts++; - - TAUupdate(cpu); - - irq_exit(); -} -#endif /* CONFIG_TAU_INT */ - -static void tau_timeout(void * info) -{ - int cpu; - unsigned long flags; - int size; - int shrink; - - /* disabling interrupts *should* be okay */ - local_irq_save(flags); - cpu = smp_processor_id(); - -#ifndef CONFIG_TAU_INT - TAUupdate(cpu); -#endif - - size = tau[cpu].high - tau[cpu].low; - if (size > min_window && ! tau[cpu].grew) { - /* do an exponential shrink of half the amount currently over size */ - shrink = (2 + size - min_window) / 4; - if (shrink) { - tau[cpu].low += shrink; - tau[cpu].high -= shrink; - } else { /* size must have been min_window + 1 */ - tau[cpu].low += 1; -#if 1 /* debug */ - if ((tau[cpu].high - tau[cpu].low) != min_window){ - printk(KERN_ERR "temp.c: line %d, logic error\n", __LINE__); - } -#endif - } - } - - tau[cpu].grew = 0; - - set_thresholds(cpu); - - /* - * Do the enable every time, since otherwise a bunch of (relatively) - * complex sleep code needs to be added. One mtspr every time - * tau_timeout is called is probably not a big deal. - * - * Enable thermal sensor and set up sample interval timer - * need 20 us to do the compare.. until a nice 'cpu_speed' function - * call is implemented, just assume a 500 mhz clock. It doesn't really - * matter if we take too long for a compare since it's all interrupt - * driven anyway. - * - * use a extra long time.. (60 us @ 500 mhz) - */ - mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E); - - local_irq_restore(flags); -} - -static void tau_timeout_smp(unsigned long unused) -{ - - /* schedule ourselves to be run again */ - mod_timer(&tau_timer, jiffies + shrink_timer) ; - on_each_cpu(tau_timeout, NULL, 1, 0); -} - -/* - * setup the TAU - * - * Set things up to use THRM1 as a temperature lower bound, and THRM2 as an upper bound. - * Start off at zero - */ - -int tau_initialized = 0; - -void __init TAU_init_smp(void * info) -{ - unsigned long cpu = smp_processor_id(); - - /* set these to a reasonable value and let the timer shrink the - * window */ - tau[cpu].low = 5; - tau[cpu].high = 120; - - set_thresholds(cpu); -} - -int __init TAU_init(void) -{ - /* We assume in SMP that if one CPU has TAU support, they - * all have it --BenH - */ - if (!cpu_has_feature(CPU_FTR_TAU)) { - printk("Thermal assist unit not available\n"); - tau_initialized = 0; - return 1; - } - - - /* first, set up the window shrinking timer */ - init_timer(&tau_timer); - tau_timer.function = tau_timeout_smp; - tau_timer.expires = jiffies + shrink_timer; - add_timer(&tau_timer); - - on_each_cpu(TAU_init_smp, NULL, 1, 0); - - printk("Thermal assist unit "); -#ifdef CONFIG_TAU_INT - printk("using interrupts, "); -#else - printk("using timers, "); -#endif - printk("shrink_timer: %d jiffies\n", shrink_timer); - tau_initialized = 1; - - return 0; -} - -__initcall(TAU_init); - -/* - * return current temp - */ - -u32 cpu_temp_both(unsigned long cpu) -{ - return ((tau[cpu].high << 16) | tau[cpu].low); -} - -int cpu_temp(unsigned long cpu) -{ - return ((tau[cpu].high + tau[cpu].low) / 2); -} - -int tau_interrupts(unsigned long cpu) -{ - return (tau[cpu].interrupts); -} diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c deleted file mode 100644 index 53ea723af60..00000000000 --- a/arch/ppc/kernel/time.c +++ /dev/null @@ -1,447 +0,0 @@ -/* - * Common time routines among all ppc machines. - * - * Written by Cort Dougan (cort@cs.nmt.edu) to merge - * Paul Mackerras' version and mine for PReP and Pmac. - * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net). - * - * First round of bugfixes by Gabriel Paubert (paubert@iram.es) - * to make clock more stable (2.4.0-test5). The only thing - * that this code assumes is that the timebases have been synchronized - * by firmware on SMP and are never stopped (never do sleep - * on SMP then, nap and doze are OK). - * - * TODO (not necessarily in this file): - * - improve precision and reproducibility of timebase frequency - * measurement at boot time. - * - get rid of xtime_lock for gettimeofday (generic kernel problem - * to be implemented on all architectures for SMP scalability and - * eventually implementing gettimeofday without entering the kernel). - * - put all time/clock related variables in a single structure - * to minimize number of cache lines touched by gettimeofday() - * - for astronomical applications: add a new function to get - * non ambiguous timestamps even around leap seconds. This needs - * a new timestamp format and a good name. - * - * - * The following comment is partially obsolete (at least the long wait - * is no more a valid reason): - * Since the MPC8xx has a programmable interrupt timer, I decided to - * use that rather than the decrementer. Two reasons: 1.) the clock - * frequency is low, causing 2.) a long wait in the timer interrupt - * while ((d = get_dec()) == dval) - * loop. The MPC8xx can be driven from a variety of input clocks, - * so a number of assumptions have been made here because the kernel - * parameter HZ is a constant. We assume (correctly, today :-) that - * the MPC8xx on the MBX board is driven from a 32.768 kHz crystal. - * This is then divided by 4, providing a 8192 Hz clock into the PIT. - * Since it is not possible to get a nice 100 Hz clock out of this, without - * creating a software PLL, I have set HZ to 128. -- Dan - * - * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 - * "A Kernel Model for Precision Timekeeping" by Dave Mills - */ - -#include <linux/config.h> -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/param.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/interrupt.h> -#include <linux/timex.h> -#include <linux/kernel_stat.h> -#include <linux/mc146818rtc.h> -#include <linux/time.h> -#include <linux/init.h> -#include <linux/profile.h> - -#include <asm/io.h> -#include <asm/nvram.h> -#include <asm/cache.h> -#include <asm/8xx_immap.h> -#include <asm/machdep.h> - -#include <asm/time.h> - -unsigned long disarm_decr[NR_CPUS]; - -extern struct timezone sys_tz; - -/* keep track of when we need to update the rtc */ -time_t last_rtc_update; - -/* The decrementer counts down by 128 every 128ns on a 601. */ -#define DECREMENTER_COUNT_601 (1000000000 / HZ) - -unsigned tb_ticks_per_jiffy; -unsigned tb_to_us; -unsigned tb_last_stamp; -unsigned long tb_to_ns_scale; - -extern unsigned long wall_jiffies; - -/* used for timezone offset */ -static long timezone_offset; - -DEFINE_SPINLOCK(rtc_lock); - -EXPORT_SYMBOL(rtc_lock); - -/* Timer interrupt helper function */ -static inline int tb_delta(unsigned *jiffy_stamp) { - int delta; - if (__USE_RTC()) { - delta = get_rtcl(); - if (delta < *jiffy_stamp) *jiffy_stamp -= 1000000000; - delta -= *jiffy_stamp; - } else { - delta = get_tbl() - *jiffy_stamp; - } - return delta; -} - -#ifdef CONFIG_SMP -unsigned long profile_pc(struct pt_regs *regs) -{ - unsigned long pc = instruction_pointer(regs); - - if (in_lock_functions(pc)) - return regs->link; - - return pc; -} -EXPORT_SYMBOL(profile_pc); -#endif - -void wakeup_decrementer(void) -{ - set_dec(tb_ticks_per_jiffy); - /* No currently-supported powerbook has a 601, - * so use get_tbl, not native - */ - last_jiffy_stamp(0) = tb_last_stamp = get_tbl(); -} - -/* - * timer_interrupt - gets called when the decrementer overflows, - * with interrupts disabled. - * We set it up to overflow again in 1/HZ seconds. - */ -void timer_interrupt(struct pt_regs * regs) -{ - int next_dec; - unsigned long cpu = smp_processor_id(); - unsigned jiffy_stamp = last_jiffy_stamp(cpu); - extern void do_IRQ(struct pt_regs *); - - if (atomic_read(&ppc_n_lost_interrupts) != 0) - do_IRQ(regs); - - irq_enter(); - - while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) <= 0) { - jiffy_stamp += tb_ticks_per_jiffy; - - profile_tick(CPU_PROFILING, regs); - update_process_times(user_mode(regs)); - - if (smp_processor_id()) - continue; - - /* We are in an interrupt, no need to save/restore flags */ - write_seqlock(&xtime_lock); - tb_last_stamp = jiffy_stamp; - do_timer(regs); - - /* - * update the rtc when needed, this should be performed on the - * right fraction of a second. Half or full second ? - * Full second works on mk48t59 clocks, others need testing. - * Note that this update is basically only used through - * the adjtimex system calls. Setting the HW clock in - * any other way is a /dev/rtc and userland business. - * This is still wrong by -0.5/+1.5 jiffies because of the - * timer interrupt resolution and possible delay, but here we - * hit a quantization limit which can only be solved by higher - * resolution timers and decoupling time management from timer - * interrupts. This is also wrong on the clocks - * which require being written at the half second boundary. - * We should have an rtc call that only sets the minutes and - * seconds like on Intel to avoid problems with non UTC clocks. - */ - if ( ppc_md.set_rtc_time && ntp_synced() && - xtime.tv_sec - last_rtc_update >= 659 && - abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ && - jiffies - wall_jiffies == 1) { - if (ppc_md.set_rtc_time(xtime.tv_sec+1 + timezone_offset) == 0) - last_rtc_update = xtime.tv_sec+1; - else - /* Try again one minute later */ - last_rtc_update += 60; - } - write_sequnlock(&xtime_lock); - } - if ( !disarm_decr[smp_processor_id()] ) - set_dec(next_dec); - last_jiffy_stamp(cpu) = jiffy_stamp; - - if (ppc_md.heartbeat && !ppc_md.heartbeat_count--) - ppc_md.heartbeat(); - - irq_exit(); -} - -/* - * This version of gettimeofday has microsecond resolution. - */ -void do_gettimeofday(struct timeval *tv) -{ - unsigned long flags; - unsigned long seq; - unsigned delta, lost_ticks, usec, sec; - - do { - seq = read_seqbegin_irqsave(&xtime_lock, flags); - sec = xtime.tv_sec; - usec = (xtime.tv_nsec / 1000); - delta = tb_ticks_since(tb_last_stamp); -#ifdef CONFIG_SMP - /* As long as timebases are not in sync, gettimeofday can only - * have jiffy resolution on SMP. - */ - if (!smp_tb_synchronized) - delta = 0; -#endif /* CONFIG_SMP */ - lost_ticks = jiffies - wall_jiffies; - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); - - usec += mulhwu(tb_to_us, tb_ticks_per_jiffy * lost_ticks + delta); - while (usec >= 1000000) { - sec++; - usec -= 1000000; - } - tv->tv_sec = sec; - tv->tv_usec = usec; -} - -EXPORT_SYMBOL(do_gettimeofday); - -int do_settimeofday(struct timespec *tv) -{ - time_t wtm_sec, new_sec = tv->tv_sec; - long wtm_nsec, new_nsec = tv->tv_nsec; - unsigned long flags; - int tb_delta; - - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) - return -EINVAL; - - write_seqlock_irqsave(&xtime_lock, flags); - /* Updating the RTC is not the job of this code. If the time is - * stepped under NTP, the RTC will be update after STA_UNSYNC - * is cleared. Tool like clock/hwclock either copy the RTC - * to the system time, in which case there is no point in writing - * to the RTC again, or write to the RTC but then they don't call - * settimeofday to perform this operation. Note also that - * we don't touch the decrementer since: - * a) it would lose timer interrupt synchronization on SMP - * (if it is working one day) - * b) it could make one jiffy spuriously shorter or longer - * which would introduce another source of uncertainty potentially - * harmful to relatively short timers. - */ - - /* This works perfectly on SMP only if the tb are in sync but - * guarantees an error < 1 jiffy even if they are off by eons, - * still reasonable when gettimeofday resolution is 1 jiffy. - */ - tb_delta = tb_ticks_since(last_jiffy_stamp(smp_processor_id())); - tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; - - new_nsec -= 1000 * mulhwu(tb_to_us, tb_delta); - - wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); - wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); - - set_normalized_timespec(&xtime, new_sec, new_nsec); - set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); - - /* In case of a large backwards jump in time with NTP, we want the - * clock to be updated as soon as the PLL is again in lock. - */ - last_rtc_update = new_sec - 658; - - ntp_clear(); - write_sequnlock_irqrestore(&xtime_lock, flags); - clock_was_set(); - return 0; -} - -EXPORT_SYMBOL(do_settimeofday); - -/* This function is only called on the boot processor */ -void __init time_init(void) -{ - time_t sec, old_sec; - unsigned old_stamp, stamp, elapsed; - - if (ppc_md.time_init != NULL) - timezone_offset = ppc_md.time_init(); - - if (__USE_RTC()) { - /* 601 processor: dec counts down by 128 every 128ns */ - tb_ticks_per_jiffy = DECREMENTER_COUNT_601; - /* mulhwu_scale_factor(1000000000, 1000000) is 0x418937 */ - tb_to_us = 0x418937; - } else { - ppc_md.calibrate_decr(); - tb_to_ns_scale = mulhwu(tb_to_us, 1000 << 10); - } - - /* Now that the decrementer is calibrated, it can be used in case the - * clock is stuck, but the fact that we have to handle the 601 - * makes things more complex. Repeatedly read the RTC until the - * next second boundary to try to achieve some precision. If there - * is no RTC, we still need to set tb_last_stamp and - * last_jiffy_stamp(cpu 0) to the current stamp. - */ - stamp = get_native_tbl(); - if (ppc_md.get_rtc_time) { - sec = ppc_md.get_rtc_time(); - elapsed = 0; - do { - old_stamp = stamp; - old_sec = sec; - stamp = get_native_tbl(); - if (__USE_RTC() && stamp < old_stamp) - old_stamp -= 1000000000; - elapsed += stamp - old_stamp; - sec = ppc_md.get_rtc_time(); - } while ( sec == old_sec && elapsed < 2*HZ*tb_ticks_per_jiffy); - if (sec==old_sec) - printk("Warning: real time clock seems stuck!\n"); - xtime.tv_sec = sec; - xtime.tv_nsec = 0; - /* No update now, we just read the time from the RTC ! */ - last_rtc_update = xtime.tv_sec; - } - last_jiffy_stamp(0) = tb_last_stamp = stamp; - - /* Not exact, but the timer interrupt takes care of this */ - set_dec(tb_ticks_per_jiffy); - - /* If platform provided a timezone (pmac), we correct the time */ - if (timezone_offset) { - sys_tz.tz_minuteswest = -timezone_offset / 60; - sys_tz.tz_dsttime = 0; - xtime.tv_sec -= timezone_offset; - } - set_normalized_timespec(&wall_to_monotonic, - -xtime.tv_sec, -xtime.tv_nsec); -} - -#define FEBRUARY 2 -#define STARTOFTIME 1970 -#define SECDAY 86400L -#define SECYR (SECDAY * 365) - -/* - * Note: this is wrong for 2100, but our signed 32-bit time_t will - * have overflowed long before that, so who cares. -- paulus - */ -#define leapyear(year) ((year) % 4 == 0) -#define days_in_year(a) (leapyear(a) ? 366 : 365) -#define days_in_month(a) (month_days[(a) - 1]) - -static int month_days[12] = { - 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 -}; - -void to_tm(int tim, struct rtc_time * tm) -{ - register int i; - register long hms, day, gday; - - gday = day = tim / SECDAY; - hms = tim % SECDAY; - - /* Hours, minutes, seconds are easy */ - tm->tm_hour = hms / 3600; - tm->tm_min = (hms % 3600) / 60; - tm->tm_sec = (hms % 3600) % 60; - - /* Number of years in days */ - for (i = STARTOFTIME; day >= days_in_year(i); i++) - day -= days_in_year(i); - tm->tm_year = i; - - /* Number of months in days left */ - if (leapyear(tm->tm_year)) - days_in_month(FEBRUARY) = 29; - for (i = 1; day >= days_in_month(i); i++) - day -= days_in_month(i); - days_in_month(FEBRUARY) = 28; - tm->tm_mon = i; - - /* Days are what is left over (+1) from all that. */ - tm->tm_mday = day + 1; - - /* - * Determine the day of week. Jan. 1, 1970 was a Thursday. - */ - tm->tm_wday = (gday + 4) % 7; -} - -/* Auxiliary function to compute scaling factors */ -/* Actually the choice of a timebase running at 1/4 the of the bus - * frequency giving resolution of a few tens of nanoseconds is quite nice. - * It makes this computation very precise (27-28 bits typically) which - * is optimistic considering the stability of most processor clock - * oscillators and the precision with which the timebase frequency - * is measured but does not harm. - */ -unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) { - unsigned mlt=0, tmp, err; - /* No concern for performance, it's done once: use a stupid - * but safe and compact method to find the multiplier. - */ - for (tmp = 1U<<31; tmp != 0; tmp >>= 1) { - if (mulhwu(inscale, mlt|tmp) < outscale) mlt|=tmp; - } - /* We might still be off by 1 for the best approximation. - * A side effect of this is that if outscale is too large - * the returned value will be zero. - * Many corner cases have been checked and seem to work, - * some might have been forgotten in the test however. - */ - err = inscale*(mlt+1); - if (err <= inscale/2) mlt++; - return mlt; -} - -unsigned long long sched_clock(void) -{ - unsigned long lo, hi, hi2; - unsigned long long tb; - - if (!__USE_RTC()) { - do { - hi = get_tbu(); - lo = get_tbl(); - hi2 = get_tbu(); - } while (hi2 != hi); - tb = ((unsigned long long) hi << 32) | lo; - tb = (tb * tb_to_ns_scale) >> 10; - } else { - do { - hi = get_rtcu(); - lo = get_rtcl(); - hi2 = get_rtcu(); - } while (hi2 != hi); - tb = ((unsigned long long) hi) * 1000000000 + lo; - } - return tb; -} diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c deleted file mode 100644 index 16adde6b429..00000000000 --- a/arch/ppc/kernel/traps.c +++ /dev/null @@ -1,968 +0,0 @@ -/* - * arch/ppc/kernel/traps.c - * - * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * Modified by Cort Dougan (cort@cs.nmt.edu) - * and Paul Mackerras (paulus@cs.anu.edu.au) - */ - -/* - * This file handles the architecture-dependent parts of hardware exceptions - */ - -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/stddef.h> -#include <linux/unistd.h> -#include <linux/ptrace.h> -#include <linux/slab.h> -#include <linux/user.h> -#include <linux/a.out.h> -#include <linux/interrupt.h> -#include <linux/config.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/prctl.h> - -#include <asm/pgtable.h> -#include <asm/uaccess.h> -#include <asm/system.h> -#include <asm/io.h> -#include <asm/reg.h> -#include <asm/xmon.h> -#ifdef CONFIG_PMAC_BACKLIGHT -#include <asm/backlight.h> -#endif -#include <asm/pmc.h> - -#ifdef CONFIG_XMON -extern int xmon_bpt(struct pt_regs *regs); -extern int xmon_sstep(struct pt_regs *regs); -extern int xmon_iabr_match(struct pt_regs *regs); -extern int xmon_dabr_match(struct pt_regs *regs); - -void (*debugger)(struct pt_regs *regs) = xmon; -int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt; -int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep; -int (*debugger_iabr_match)(struct pt_regs *regs) = xmon_iabr_match; -int (*debugger_dabr_match)(struct pt_regs *regs) = xmon_dabr_match; -void (*debugger_fault_handler)(struct pt_regs *regs); -#else -#ifdef CONFIG_KGDB -void (*debugger)(struct pt_regs *regs); -int (*debugger_bpt)(struct pt_regs *regs); -int (*debugger_sstep)(struct pt_regs *regs); -int (*debugger_iabr_match)(struct pt_regs *regs); -int (*debugger_dabr_match)(struct pt_regs *regs); -void (*debugger_fault_handler)(struct pt_regs *regs); -#else -#define debugger(regs) do { } while (0) -#define debugger_bpt(regs) 0 -#define debugger_sstep(regs) 0 -#define debugger_iabr_match(regs) 0 -#define debugger_dabr_match(regs) 0 -#define debugger_fault_handler ((void (*)(struct pt_regs *))0) -#endif -#endif - -/* - * Trap & Exception support - */ - -DEFINE_SPINLOCK(die_lock); - -int die(const char * str, struct pt_regs * fp, long err) -{ - static int die_counter; - int nl = 0; - console_verbose(); - spin_lock_irq(&die_lock); -#ifdef CONFIG_PMAC_BACKLIGHT - if (_machine == _MACH_Pmac) { - set_backlight_enable(1); - set_backlight_level(BACKLIGHT_MAX); - } -#endif - printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); -#ifdef CONFIG_PREEMPT - printk("PREEMPT "); - nl = 1; -#endif -#ifdef CONFIG_SMP - printk("SMP NR_CPUS=%d ", NR_CPUS); - nl = 1; -#endif - if (nl) - printk("\n"); - show_regs(fp); - spin_unlock_irq(&die_lock); - /* do_exit() should take care of panic'ing from an interrupt - * context so we don't handle it here - */ - do_exit(err); -} - -void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) -{ - siginfo_t info; - - if (!user_mode(regs)) { - debugger(regs); - die("Exception in kernel mode", regs, signr); - } - info.si_signo = signr; - info.si_errno = 0; - info.si_code = code; - info.si_addr = (void __user *) addr; - force_sig_info(signr, &info, current); - - /* - * Init gets no signals that it doesn't have a handler for. - * That's all very well, but if it has caused a synchronous - * exception and we ignore the resulting signal, it will just - * generate the same exception over and over again and we get - * nowhere. Better to kill it and let the kernel panic. - */ - if (current->pid == 1) { - __sighandler_t handler; - - spin_lock_irq(¤t->sighand->siglock); - handler = current->sighand->action[signr-1].sa.sa_handler; - spin_unlock_irq(¤t->sighand->siglock); - if (handler == SIG_DFL) { - /* init has generated a synchronous exception - and it doesn't have a handler for the signal */ - printk(KERN_CRIT "init has generated signal %d " - "but has no handler for it\n", signr); - do_exit(signr); - } - } -} - -/* - * I/O accesses can cause machine checks on powermacs. - * Check if the NIP corresponds to the address of a sync - * instruction for which there is an entry in the exception - * table. - * Note that the 601 only takes a machine check on TEA - * (transfer error ack) signal assertion, and does not - * set any of the top 16 bits of SRR1. - * -- paulus. - */ -static inline int check_io_access(struct pt_regs *regs) -{ -#ifdef CONFIG_PPC_PMAC - unsigned long msr = regs->msr; - const struct exception_table_entry *entry; - unsigned int *nip = (unsigned int *)regs->nip; - - if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) - && (entry = search_exception_tables(regs->nip)) != NULL) { - /* - * Check that it's a sync instruction, or somewhere - * in the twi; isync; nop sequence that inb/inw/inl uses. - * As the address is in the exception table - * we should be able to read the instr there. - * For the debug message, we look at the preceding - * load or store. - */ - if (*nip == 0x60000000) /* nop */ - nip -= 2; - else if (*nip == 0x4c00012c) /* isync */ - --nip; - if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { - /* sync or twi */ - unsigned int rb; - - --nip; - rb = (*nip >> 11) & 0x1f; - printk(KERN_DEBUG "%s bad port %lx at %p\n", - (*nip & 0x100)? "OUT to": "IN from", - regs->gpr[rb] - _IO_BASE, nip); - regs->msr |= MSR_RI; - regs->nip = entry->fixup; - return 1; - } - } -#endif /* CONFIG_PPC_PMAC */ - return 0; -} - -#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) -/* On 4xx, the reason for the machine check or program exception - is in the ESR. */ -#define get_reason(regs) ((regs)->dsisr) -#ifndef CONFIG_FSL_BOOKE -#define get_mc_reason(regs) ((regs)->dsisr) -#else -#define get_mc_reason(regs) (mfspr(SPRN_MCSR)) -#endif -#define REASON_FP ESR_FP -#define REASON_ILLEGAL (ESR_PIL | ESR_PUO) -#define REASON_PRIVILEGED ESR_PPR -#define REASON_TRAP ESR_PTR - -/* single-step stuff */ -#define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) -#define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) - -#else -/* On non-4xx, the reason for the machine check or program - exception is in the MSR. */ -#define get_reason(regs) ((regs)->msr) -#define get_mc_reason(regs) ((regs)->msr) -#define REASON_FP 0x100000 -#define REASON_ILLEGAL 0x80000 -#define REASON_PRIVILEGED 0x40000 -#define REASON_TRAP 0x20000 - -#define single_stepping(regs) ((regs)->msr & MSR_SE) -#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) -#endif - -/* - * This is "fall-back" implementation for configurations - * which don't provide platform-specific machine check info - */ -void __attribute__ ((weak)) -platform_machine_check(struct pt_regs *regs) -{ -} - -void machine_check_exception(struct pt_regs *regs) -{ - unsigned long reason = get_mc_reason(regs); - - if (user_mode(regs)) { - regs->msr |= MSR_RI; - _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); - return; - } - -#if defined(CONFIG_8xx) && defined(CONFIG_PCI) - /* the qspan pci read routines can cause machine checks -- Cort */ - bad_page_fault(regs, regs->dar, SIGBUS); - return; -#endif - - if (debugger_fault_handler) { - debugger_fault_handler(regs); - regs->msr |= MSR_RI; - return; - } - - if (check_io_access(regs)) - return; - -#if defined(CONFIG_4xx) && !defined(CONFIG_440A) - if (reason & ESR_IMCP) { - printk("Instruction"); - mtspr(SPRN_ESR, reason & ~ESR_IMCP); - } else - printk("Data"); - printk(" machine check in kernel mode.\n"); -#elif defined(CONFIG_440A) - printk("Machine check in kernel mode.\n"); - if (reason & ESR_IMCP){ - printk("Instruction Synchronous Machine Check exception\n"); - mtspr(SPRN_ESR, reason & ~ESR_IMCP); - } - else { - u32 mcsr = mfspr(SPRN_MCSR); - if (mcsr & MCSR_IB) - printk("Instruction Read PLB Error\n"); - if (mcsr & MCSR_DRB) - printk("Data Read PLB Error\n"); - if (mcsr & MCSR_DWB) - printk("Data Write PLB Error\n"); - if (mcsr & MCSR_TLBP) - printk("TLB Parity Error\n"); - if (mcsr & MCSR_ICP){ - flush_instruction_cache(); - printk("I-Cache Parity Error\n"); - } - if (mcsr & MCSR_DCSP) - printk("D-Cache Search Parity Error\n"); - if (mcsr & MCSR_DCFP) - printk("D-Cache Flush Parity Error\n"); - if (mcsr & MCSR_IMPE) - printk("Machine Check exception is imprecise\n"); - - /* Clear MCSR */ - mtspr(SPRN_MCSR, mcsr); - } -#elif defined (CONFIG_E500) - printk("Machine check in kernel mode.\n"); - printk("Caused by (from MCSR=%lx): ", reason); - - if (reason & MCSR_MCP) - printk("Machine Check Signal\n"); - if (reason & MCSR_ICPERR) - printk("Instruction Cache Parity Error\n"); - if (reason & MCSR_DCP_PERR) - printk("Data Cache Push Parity Error\n"); - if (reason & MCSR_DCPERR) - printk("Data Cache Parity Error\n"); - if (reason & MCSR_GL_CI) - printk("Guarded Load or Cache-Inhibited stwcx.\n"); - if (reason & MCSR_BUS_IAERR) - printk("Bus - Instruction Address Error\n"); - if (reason & MCSR_BUS_RAERR) - printk("Bus - Read Address Error\n"); - if (reason & MCSR_BUS_WAERR) - printk("Bus - Write Address Error\n"); - if (reason & MCSR_BUS_IBERR) - printk("Bus - Instruction Data Error\n"); - if (reason & MCSR_BUS_RBERR) - printk("Bus - Read Data Bus Error\n"); - if (reason & MCSR_BUS_WBERR) - printk("Bus - Read Data Bus Error\n"); - if (reason & MCSR_BUS_IPERR) - printk("Bus - Instruction Parity Error\n"); - if (reason & MCSR_BUS_RPERR) - printk("Bus - Read Parity Error\n"); -#elif defined (CONFIG_E200) - printk("Machine check in kernel mode.\n"); - printk("Caused by (from MCSR=%lx): ", reason); - - if (reason & MCSR_MCP) - printk("Machine Check Signal\n"); - if (reason & MCSR_CP_PERR) - printk("Cache Push Parity Error\n"); - if (reason & MCSR_CPERR) - printk("Cache Parity Error\n"); - if (reason & MCSR_EXCP_ERR) - printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); - if (reason & MCSR_BUS_IRERR) - printk("Bus - Read Bus Error on instruction fetch\n"); - if (reason & MCSR_BUS_DRERR) - printk("Bus - Read Bus Error on data load\n"); - if (reason & MCSR_BUS_WRERR) - printk("Bus - Write Bus Error on buffered store or cache line push\n"); -#else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */ - printk("Machine check in kernel mode.\n"); - printk("Caused by (from SRR1=%lx): ", reason); - switch (reason & 0x601F0000) { - case 0x80000: - printk("Machine check signal\n"); - break; - case 0: /* for 601 */ - case 0x40000: - case 0x140000: /* 7450 MSS error and TEA */ - printk("Transfer error ack signal\n"); - break; - case 0x20000: - printk("Data parity error signal\n"); - break; - case 0x10000: - printk("Address parity error signal\n"); - break; - case 0x20000000: - printk("L1 Data Cache error\n"); - break; - case 0x40000000: - printk("L1 Instruction Cache error\n"); - break; - case 0x00100000: - printk("L2 data cache parity error\n"); - break; - default: - printk("Unknown values in msr\n"); - } -#endif /* CONFIG_4xx */ - - /* - * Optional platform-provided routine to print out - * additional info, e.g. bus error registers. - */ - platform_machine_check(regs); - - debugger(regs); - die("machine check", regs, SIGBUS); -} - -void SMIException(struct pt_regs *regs) -{ - debugger(regs); -#if !(defined(CONFIG_XMON) || defined(CONFIG_KGDB)) - show_regs(regs); - panic("System Management Interrupt"); -#endif -} - -void unknown_exception(struct pt_regs *regs) -{ - printk("Bad trap at PC: %lx, MSR: %lx, vector=%lx %s\n", - regs->nip, regs->msr, regs->trap, print_tainted()); - _exception(SIGTRAP, regs, 0, 0); -} - -void instruction_breakpoint_exception(struct pt_regs *regs) -{ - if (debugger_iabr_match(regs)) - return; - _exception(SIGTRAP, regs, TRAP_BRKPT, 0); -} - -void RunModeException(struct pt_regs *regs) -{ - _exception(SIGTRAP, regs, 0, 0); -} - -/* Illegal instruction emulation support. Originally written to - * provide the PVR to user applications using the mfspr rd, PVR. - * Return non-zero if we can't emulate, or -EFAULT if the associated - * memory access caused an access fault. Return zero on success. - * - * There are a couple of ways to do this, either "decode" the instruction - * or directly match lots of bits. In this case, matching lots of - * bits is faster and easier. - * - */ -#define INST_MFSPR_PVR 0x7c1f42a6 -#define INST_MFSPR_PVR_MASK 0xfc1fffff - -#define INST_DCBA 0x7c0005ec -#define INST_DCBA_MASK 0x7c0007fe - -#define INST_MCRXR 0x7c000400 -#define INST_MCRXR_MASK 0x7c0007fe - -#define INST_STRING 0x7c00042a -#define INST_STRING_MASK 0x7c0007fe -#define INST_STRING_GEN_MASK 0x7c00067e -#define INST_LSWI 0x7c0004aa -#define INST_LSWX 0x7c00042a -#define INST_STSWI 0x7c0005aa -#define INST_STSWX 0x7c00052a - -static int emulate_string_inst(struct pt_regs *regs, u32 instword) -{ - u8 rT = (instword >> 21) & 0x1f; - u8 rA = (instword >> 16) & 0x1f; - u8 NB_RB = (instword >> 11) & 0x1f; - u32 num_bytes; - unsigned long EA; - int pos = 0; - - /* Early out if we are an invalid form of lswx */ - if ((instword & INST_STRING_MASK) == INST_LSWX) - if ((rT == rA) || (rT == NB_RB)) - return -EINVAL; - - EA = (rA == 0) ? 0 : regs->gpr[rA]; - - switch (instword & INST_STRING_MASK) { - case INST_LSWX: - case INST_STSWX: - EA += NB_RB; - num_bytes = regs->xer & 0x7f; - break; - case INST_LSWI: - case INST_STSWI: - num_bytes = (NB_RB == 0) ? 32 : NB_RB; - break; - default: - return -EINVAL; - } - - while (num_bytes != 0) - { - u8 val; - u32 shift = 8 * (3 - (pos & 0x3)); - - switch ((instword & INST_STRING_MASK)) { - case INST_LSWX: - case INST_LSWI: - if (get_user(val, (u8 __user *)EA)) - return -EFAULT; - /* first time updating this reg, - * zero it out */ - if (pos == 0) - regs->gpr[rT] = 0; - regs->gpr[rT] |= val << shift; - break; - case INST_STSWI: - case INST_STSWX: - val = regs->gpr[rT] >> shift; - if (put_user(val, (u8 __user *)EA)) - return -EFAULT; - break; - } - /* move EA to next address */ - EA += 1; - num_bytes--; - - /* manage our position within the register */ - if (++pos == 4) { - pos = 0; - if (++rT == 32) - rT = 0; - } - } - - return 0; -} - -static int emulate_instruction(struct pt_regs *regs) -{ - u32 instword; - u32 rd; - - if (!user_mode(regs)) - return -EINVAL; - CHECK_FULL_REGS(regs); - - if (get_user(instword, (u32 __user *)(regs->nip))) - return -EFAULT; - - /* Emulate the mfspr rD, PVR. - */ - if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) { - rd = (instword >> 21) & 0x1f; - regs->gpr[rd] = mfspr(SPRN_PVR); - return 0; - } - - /* Emulating the dcba insn is just a no-op. */ - if ((instword & INST_DCBA_MASK) == INST_DCBA) - return 0; - - /* Emulate the mcrxr insn. */ - if ((instword & INST_MCRXR_MASK) == INST_MCRXR) { - int shift = (instword >> 21) & 0x1c; - unsigned long msk = 0xf0000000UL >> shift; - - regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); - regs->xer &= ~0xf0000000UL; - return 0; - } - - /* Emulate load/store string insn. */ - if ((instword & INST_STRING_GEN_MASK) == INST_STRING) - return emulate_string_inst(regs, instword); - - return -EINVAL; -} - -/* - * After we have successfully emulated an instruction, we have to - * check if the instruction was being single-stepped, and if so, - * pretend we got a single-step exception. This was pointed out - * by Kumar Gala. -- paulus - */ -static void emulate_single_step(struct pt_regs *regs) -{ - if (single_stepping(regs)) { - clear_single_step(regs); - _exception(SIGTRAP, regs, TRAP_TRACE, 0); - } -} - -/* - * Look through the list of trap instructions that are used for BUG(), - * BUG_ON() and WARN_ON() and see if we hit one. At this point we know - * that the exception was caused by a trap instruction of some kind. - * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0 - * otherwise. - */ -extern struct bug_entry __start___bug_table[], __stop___bug_table[]; - -#ifndef CONFIG_MODULES -#define module_find_bug(x) NULL -#endif - -struct bug_entry *find_bug(unsigned long bugaddr) -{ - struct bug_entry *bug; - - for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) - if (bugaddr == bug->bug_addr) - return bug; - return module_find_bug(bugaddr); -} - -int check_bug_trap(struct pt_regs *regs) -{ - struct bug_entry *bug; - unsigned long addr; - - if (regs->msr & MSR_PR) - return 0; /* not in kernel */ - addr = regs->nip; /* address of trap instruction */ - if (addr < PAGE_OFFSET) - return 0; - bug = find_bug(regs->nip); - if (bug == NULL) - return 0; - if (bug->line & BUG_WARNING_TRAP) { - /* this is a WARN_ON rather than BUG/BUG_ON */ -#ifdef CONFIG_XMON - xmon_printf(KERN_ERR "Badness in %s at %s:%ld\n", - bug->function, bug->file, - bug->line & ~BUG_WARNING_TRAP); -#endif /* CONFIG_XMON */ - printk(KERN_ERR "Badness in %s at %s:%ld\n", - bug->function, bug->file, - bug->line & ~BUG_WARNING_TRAP); - dump_stack(); - return 1; - } -#ifdef CONFIG_XMON - xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%ld!\n", - bug->function, bug->file, bug->line); - xmon(regs); -#endif /* CONFIG_XMON */ - printk(KERN_CRIT "kernel BUG in %s at %s:%ld!\n", - bug->function, bug->file, bug->line); - - return 0; -} - -void program_check_exception(struct pt_regs *regs) -{ - unsigned int reason = get_reason(regs); - extern int do_mathemu(struct pt_regs *regs); - -#ifdef CONFIG_MATH_EMULATION - /* (reason & REASON_ILLEGAL) would be the obvious thing here, - * but there seems to be a hardware bug on the 405GP (RevD) - * that means ESR is sometimes set incorrectly - either to - * ESR_DST (!?) or 0. In the process of chasing this with the - * hardware people - not sure if it can happen on any illegal - * instruction or only on FP instructions, whether there is a - * pattern to occurences etc. -dgibson 31/Mar/2003 */ - if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) { - emulate_single_step(regs); - return; - } -#endif /* CONFIG_MATH_EMULATION */ - - if (reason & REASON_FP) { - /* IEEE FP exception */ - int code = 0; - u32 fpscr; - - /* We must make sure the FP state is consistent with - * our MSR_FP in regs - */ - preempt_disable(); - if (regs->msr & MSR_FP) - giveup_fpu(current); - preempt_enable(); - - fpscr = current->thread.fpscr.val; - fpscr &= fpscr << 22; /* mask summary bits with enables */ - if (fpscr & FPSCR_VX) - code = FPE_FLTINV; - else if (fpscr & FPSCR_OX) - code = FPE_FLTOVF; - else if (fpscr & FPSCR_UX) - code = FPE_FLTUND; - else if (fpscr & FPSCR_ZX) - code = FPE_FLTDIV; - else if (fpscr & FPSCR_XX) - code = FPE_FLTRES; - _exception(SIGFPE, regs, code, regs->nip); - return; - } - - if (reason & REASON_TRAP) { - /* trap exception */ - if (debugger_bpt(regs)) - return; - if (check_bug_trap(regs)) { - regs->nip += 4; - return; - } - _exception(SIGTRAP, regs, TRAP_BRKPT, 0); - return; - } - - /* Try to emulate it if we should. */ - if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { - switch (emulate_instruction(regs)) { - case 0: - regs->nip += 4; - emulate_single_step(regs); - return; - case -EFAULT: - _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); - return; - } - } - - if (reason & REASON_PRIVILEGED) - _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); - else - _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); -} - -void single_step_exception(struct pt_regs *regs) -{ - regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */ - if (debugger_sstep(regs)) - return; - _exception(SIGTRAP, regs, TRAP_TRACE, 0); -} - -void alignment_exception(struct pt_regs *regs) -{ - int fixed; - - fixed = fix_alignment(regs); - if (fixed == 1) { - regs->nip += 4; /* skip over emulated instruction */ - emulate_single_step(regs); - return; - } - if (fixed == -EFAULT) { - /* fixed == -EFAULT means the operand address was bad */ - if (user_mode(regs)) - _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar); - else - bad_page_fault(regs, regs->dar, SIGSEGV); - return; - } - _exception(SIGBUS, regs, BUS_ADRALN, regs->dar); -} - -void StackOverflow(struct pt_regs *regs) -{ - printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", - current, regs->gpr[1]); - debugger(regs); - show_regs(regs); - panic("kernel stack overflow"); -} - -void nonrecoverable_exception(struct pt_regs *regs) -{ - printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", - regs->nip, regs->msr); - debugger(regs); - die("nonrecoverable exception", regs, SIGKILL); -} - -void trace_syscall(struct pt_regs *regs) -{ - printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", - current, current->pid, regs->nip, regs->link, regs->gpr[0], - regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); -} - -#ifdef CONFIG_8xx -void SoftwareEmulation(struct pt_regs *regs) -{ - extern int do_mathemu(struct pt_regs *); - extern int Soft_emulate_8xx(struct pt_regs *); - int errcode; - - CHECK_FULL_REGS(regs); - - if (!user_mode(regs)) { - debugger(regs); - die("Kernel Mode Software FPU Emulation", regs, SIGFPE); - } - -#ifdef CONFIG_MATH_EMULATION - errcode = do_mathemu(regs); -#else - errcode = Soft_emulate_8xx(regs); -#endif - if (errcode) { - if (errcode > 0) - _exception(SIGFPE, regs, 0, 0); - else if (errcode == -EFAULT) - _exception(SIGSEGV, regs, 0, 0); - else - _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); - } else - emulate_single_step(regs); -} -#endif /* CONFIG_8xx */ - -#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) - -void DebugException(struct pt_regs *regs, unsigned long debug_status) -{ - if (debug_status & DBSR_IC) { /* instruction completion */ - regs->msr &= ~MSR_DE; - if (user_mode(regs)) { - current->thread.dbcr0 &= ~DBCR0_IC; - } else { - /* Disable instruction completion */ - mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); - /* Clear the instruction completion event */ - mtspr(SPRN_DBSR, DBSR_IC); - if (debugger_sstep(regs)) - return; - } - _exception(SIGTRAP, regs, TRAP_TRACE, 0); - } -} -#endif /* CONFIG_4xx || CONFIG_BOOKE */ - -#if !defined(CONFIG_TAU_INT) -void TAUException(struct pt_regs *regs) -{ - printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", - regs->nip, regs->msr, regs->trap, print_tainted()); -} -#endif /* CONFIG_INT_TAU */ - -/* - * FP unavailable trap from kernel - print a message, but let - * the task use FP in the kernel until it returns to user mode. - */ -void kernel_fp_unavailable_exception(struct pt_regs *regs) -{ - regs->msr |= MSR_FP; - printk(KERN_ERR "floating point used in kernel (task=%p, pc=%lx)\n", - current, regs->nip); -} - -void altivec_unavailable_exception(struct pt_regs *regs) -{ - static int kernel_altivec_count; - -#ifndef CONFIG_ALTIVEC - if (user_mode(regs)) { - /* A user program has executed an altivec instruction, - but this kernel doesn't support altivec. */ - _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); - return; - } -#endif - /* The kernel has executed an altivec instruction without - first enabling altivec. Whinge but let it do it. */ - if (++kernel_altivec_count < 10) - printk(KERN_ERR "AltiVec used in kernel (task=%p, pc=%lx)\n", - current, regs->nip); - regs->msr |= MSR_VEC; -} - -#ifdef CONFIG_ALTIVEC -void altivec_assist_exception(struct pt_regs *regs) -{ - int err; - - preempt_disable(); - if (regs->msr & MSR_VEC) - giveup_altivec(current); - preempt_enable(); - if (!user_mode(regs)) { - printk(KERN_ERR "altivec assist exception in kernel mode" - " at %lx\n", regs->nip); - debugger(regs); - die("altivec assist exception", regs, SIGFPE); - return; - } - - err = emulate_altivec(regs); - if (err == 0) { - regs->nip += 4; /* skip emulated instruction */ - emulate_single_step(regs); - return; - } - - if (err == -EFAULT) { - /* got an error reading the instruction */ - _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); - } else { - /* didn't recognize the instruction */ - /* XXX quick hack for now: set the non-Java bit in the VSCR */ - printk(KERN_ERR "unrecognized altivec instruction " - "in %s at %lx\n", current->comm, regs->nip); - current->thread.vscr.u[3] |= 0x10000; - } -} -#endif /* CONFIG_ALTIVEC */ - -#ifdef CONFIG_E500 -void performance_monitor_exception(struct pt_regs *regs) -{ - perf_irq(regs); -} -#endif - -#ifdef CONFIG_FSL_BOOKE -void CacheLockingException(struct pt_regs *regs, unsigned long address, - unsigned long error_code) -{ - /* We treat cache locking instructions from the user - * as priv ops, in the future we could try to do - * something smarter - */ - if (error_code & (ESR_DLK|ESR_ILK)) - _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); - return; -} -#endif /* CONFIG_FSL_BOOKE */ - -#ifdef CONFIG_SPE -void SPEFloatingPointException(struct pt_regs *regs) -{ - unsigned long spefscr; - int fpexc_mode; - int code = 0; - - spefscr = current->thread.spefscr; - fpexc_mode = current->thread.fpexc_mode; - - /* Hardware does not neccessarily set sticky - * underflow/overflow/invalid flags */ - if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { - code = FPE_FLTOVF; - spefscr |= SPEFSCR_FOVFS; - } - else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { - code = FPE_FLTUND; - spefscr |= SPEFSCR_FUNFS; - } - else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) - code = FPE_FLTDIV; - else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { - code = FPE_FLTINV; - spefscr |= SPEFSCR_FINVS; - } - else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) - code = FPE_FLTRES; - - current->thread.spefscr = spefscr; - - _exception(SIGFPE, regs, code, regs->nip); - return; -} -#endif - -#ifdef CONFIG_BOOKE_WDT -/* - * Default handler for a Watchdog exception, - * spins until a reboot occurs - */ -void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) -{ - /* Generic WatchdogHandler, implement your own */ - mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); - return; -} - -void WatchdogException(struct pt_regs *regs) -{ - printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); - WatchdogHandler(regs); -} -#endif - -void __init trap_init(void) -{ -} diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S deleted file mode 100644 index 09c6525cfa6..00000000000 --- a/arch/ppc/kernel/vmlinux.lds.S +++ /dev/null @@ -1,172 +0,0 @@ -#include <asm-generic/vmlinux.lds.h> - -OUTPUT_ARCH(powerpc:common) -jiffies = jiffies_64 + 4; -SECTIONS -{ - /* Read-only sections, merged into text segment: */ - . = + SIZEOF_HEADERS; - .interp : { *(.interp) } - .hash : { *(.hash) } - .dynsym : { *(.dynsym) } - .dynstr : { *(.dynstr) } - .rel.text : { *(.rel.text) } - .rela.text : { *(.rela.text) } - .rel.data : { *(.rel.data) } - .rela.data : { *(.rela.data) } - .rel.rodata : { *(.rel.rodata) } - .rela.rodata : { *(.rela.rodata) } - .rel.got : { *(.rel.got) } - .rela.got : { *(.rela.got) } - .rel.ctors : { *(.rel.ctors) } - .rela.ctors : { *(.rela.ctors) } - .rel.dtors : { *(.rel.dtors) } - .rela.dtors : { *(.rela.dtors) } - .rel.bss : { *(.rel.bss) } - .rela.bss : { *(.rela.bss) } - .rel.plt : { *(.rel.plt) } - .rela.plt : { *(.rela.plt) } -/* .init : { *(.init) } =0*/ - .plt : { *(.plt) } - .text : - { - *(.text) - SCHED_TEXT - LOCK_TEXT - *(.fixup) - *(.got1) - __got2_start = .; - *(.got2) - __got2_end = .; - } - _etext = .; - PROVIDE (etext = .); - - RODATA - .fini : { *(.fini) } =0 - .ctors : { *(.ctors) } - .dtors : { *(.dtors) } - - .fixup : { *(.fixup) } - - __ex_table : { - __start___ex_table = .; - *(__ex_table) - __stop___ex_table = .; - } - - __bug_table : { - __start___bug_table = .; - *(__bug_table) - __stop___bug_table = .; - } - - /* Read-write section, merged into data segment: */ - . = ALIGN(4096); - .data : - { - *(.data) - *(.data1) - *(.sdata) - *(.sdata2) - *(.got.plt) *(.got) - *(.dynamic) - CONSTRUCTORS - } - - . = ALIGN(4096); - __nosave_begin = .; - .data_nosave : { *(.data.nosave) } - . = ALIGN(4096); - __nosave_end = .; - - . = ALIGN(32); - .data.cacheline_aligned : { *(.data.cacheline_aligned) } - - _edata = .; - PROVIDE (edata = .); - - . = ALIGN(8192); - .data.init_task : { *(.data.init_task) } - - . = ALIGN(4096); - __init_begin = .; - .init.text : { - _sinittext = .; - *(.init.text) - _einittext = .; - } - /* .exit.text is discarded at runtime, not link time, - to deal with references from __bug_table */ - .exit.text : { *(.exit.text) } - .init.data : { - *(.init.data); - __vtop_table_begin = .; - *(.vtop_fixup); - __vtop_table_end = .; - __ptov_table_begin = .; - *(.ptov_fixup); - __ptov_table_end = .; - } - . = ALIGN(16); - __setup_start = .; - .init.setup : { *(.init.setup) } - __setup_end = .; - __initcall_start = .; - .initcall.init : { - *(.initcall1.init) - *(.initcall2.init) - *(.initcall3.init) - *(.initcall4.init) - *(.initcall5.init) - *(.initcall6.init) - *(.initcall7.init) - } - __initcall_end = .; - - __con_initcall_start = .; - .con_initcall.init : { *(.con_initcall.init) } - __con_initcall_end = .; - - SECURITY_INIT - - __start___ftr_fixup = .; - __ftr_fixup : { *(__ftr_fixup) } - __stop___ftr_fixup = .; - - . = ALIGN(32); - __per_cpu_start = .; - .data.percpu : { *(.data.percpu) } - __per_cpu_end = .; - - . = ALIGN(4096); - __initramfs_start = .; - .init.ramfs : { *(.init.ramfs) } - __initramfs_end = .; - - . = ALIGN(4096); - __init_end = .; - - . = ALIGN(4096); - _sextratext = .; - _eextratext = .; - - __bss_start = .; - .bss : - { - *(.sbss) *(.scommon) - *(.dynbss) - *(.bss) - *(COMMON) - } - __bss_stop = .; - - _end = . ; - PROVIDE (end = .); - - /* Sections to be discarded. */ - /DISCARD/ : { - *(.exitcall.exit) - *(.exit.data) - } -} |
