diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-15 09:32:52 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-15 09:32:52 -0700 |
commit | 0fa213310cd8fa7a51071cdcf130e26fa56e9549 (patch) | |
tree | 2a7e5cc33c8938ec82604a99c3797a3132fd91ec /arch/powerpc/kernel | |
parent | d3bf80bff13597004b5724ee4549cd68eb0badf0 (diff) | |
parent | bc47ab0241c7c86da4f5e5f82fbca7d45387c18d (diff) |
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (103 commits)
powerpc: Fix bug in move of altivec code to vector.S
powerpc: Add support for swiotlb on 32-bit
powerpc/spufs: Remove unused error path
powerpc: Fix warning when printing a resource_size_t
powerpc/xmon: Remove unused variable in xmon.c
powerpc/pseries: Fix warnings when printing resource_size_t
powerpc: Shield code specific to 64-bit server processors
powerpc: Separate PACA fields for server CPUs
powerpc: Split exception handling out of head_64.S
powerpc: Introduce CONFIG_PPC_BOOK3S
powerpc: Move VMX and VSX asm code to vector.S
powerpc: Set init_bootmem_done on NUMA platforms as well
powerpc/mm: Fix a AB->BA deadlock scenario with nohash MMU context lock
powerpc/mm: Fix some SMP issues with MMU context handling
powerpc: Add PTRACE_SINGLEBLOCK support
fbdev: Add PLB support and cleanup DCR in xilinxfb driver.
powerpc/virtex: Add ml510 reference design device tree
powerpc/virtex: Add Xilinx ML510 reference design support
powerpc/virtex: refactor intc driver and add support for i8259 cascading
powerpc/virtex: Add support for Xilinx PCI host bridge
...
Diffstat (limited to 'arch/powerpc/kernel')
28 files changed, 1731 insertions, 1460 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index a2c683403c2..a7def5f90ca 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -36,7 +36,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ firmware.o nvram_64.o obj64-$(CONFIG_RELOCATABLE) += reloc_64.o obj-$(CONFIG_PPC64) += vdso64/ -obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o +obj-$(CONFIG_ALTIVEC) += vecemu.o obj-$(CONFIG_PPC_970_NAP) += idle_power4.o obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o obj-$(CONFIG_PPC_CLOCK) += clock.o @@ -82,6 +82,7 @@ obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o obj-$(CONFIG_STACKTRACE) += stacktrace.o +obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o pci64-$(CONFIG_PPC64) += pci_dn.o isa-bridge.o obj-$(CONFIG_PCI) += pci_$(CONFIG_WORD_SIZE).o $(pci64-y) \ @@ -111,6 +112,7 @@ obj-y += ppc_save_regs.o endif extra-$(CONFIG_PPC_FPU) += fpu.o +extra-$(CONFIG_ALTIVEC) += vector.o extra-$(CONFIG_PPC64) += entry_64.o extra-y += systbl_chk.i diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 5ffcfaa77d6..a5b632e52fa 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -24,6 +24,7 @@ #include <asm/system.h> #include <asm/cache.h> #include <asm/cputable.h> +#include <asm/emulated_ops.h> struct aligninfo { unsigned char len; @@ -730,8 +731,10 @@ int fix_alignment(struct pt_regs *regs) areg = dsisr & 0x1f; /* register to update */ #ifdef CONFIG_SPE - if ((instr >> 26) == 0x4) + if ((instr >> 26) == 0x4) { + PPC_WARN_EMULATED(spe); return emulate_spe(regs, reg, instr); + } #endif instr = (dsisr >> 10) & 0x7f; @@ -783,23 +786,28 @@ int fix_alignment(struct pt_regs *regs) flags |= SPLT; nb = 8; } + PPC_WARN_EMULATED(vsx); return emulate_vsx(addr, reg, areg, regs, flags, nb); } #endif /* A size of 0 indicates an instruction we don't support, with * the exception of DCBZ which is handled as a special case here */ - if (instr == DCBZ) + if (instr == DCBZ) { + PPC_WARN_EMULATED(dcbz); return emulate_dcbz(regs, addr); + } if (unlikely(nb == 0)) return 0; /* Load/Store Multiple instructions are handled in their own * function */ - if (flags & M) + if (flags & M) { + PPC_WARN_EMULATED(multiple); return emulate_multiple(regs, addr, reg, nb, flags, instr, swiz); + } /* Verify the address of the operand */ if (unlikely(user_mode(regs) && @@ -816,8 +824,12 @@ int fix_alignment(struct pt_regs *regs) } /* Special case for 16-byte FP loads and stores */ - if (nb == 16) + if (nb == 16) { + PPC_WARN_EMULATED(fp_pair); return emulate_fp_pair(addr, reg, flags); + } + + PPC_WARN_EMULATED(unaligned); /* If we are loading, get the data from user space, else * get it from register values diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index e981d1ce191..561b6465231 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -122,8 +122,6 @@ int main(void) DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); DEFINE(PACACURRENT, offsetof(struct paca_struct, __current)); DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr)); - DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real)); - DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr)); DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); @@ -132,35 +130,30 @@ int main(void) DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending)); - DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); - DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); - DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp)); #ifdef CONFIG_PPC_MM_SLICES DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, context.low_slices_psize)); DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct, context.high_slices_psize)); DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def)); +#endif /* CONFIG_PPC_MM_SLICES */ +#ifdef CONFIG_PPC_STD_MMU_64 + DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real)); + DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr)); + DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); + DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); + DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp)); +#ifdef CONFIG_PPC_MM_SLICES DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp)); #else DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp)); - #endif /* CONFIG_PPC_MM_SLICES */ DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); - DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr)); - DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); - DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); - DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr)); - DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); - DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr)); - DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset)); - DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); - DEFINE(SLBSHADOW_STACKVSID, offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid)); DEFINE(SLBSHADOW_STACKESID, @@ -170,6 +163,15 @@ int main(void) DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); +#endif /* CONFIG_PPC_STD_MMU_64 */ + DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); + DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); + DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); + DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr)); + DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); + DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); + DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset)); + DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); #endif /* CONFIG_PPC64 */ /* RTAS */ diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 3e33fb933d9..4a24a2fc457 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -427,7 +427,8 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER7 (architected)", .cpu_features = CPU_FTRS_POWER7, .cpu_user_features = COMMON_USER_POWER7, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTR_HPTE_TABLE | + MMU_FTR_TLBIE_206, .icache_bsize = 128, .dcache_bsize = 128, .machine_check = machine_check_generic, @@ -441,7 +442,8 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER7 (raw)", .cpu_features = CPU_FTRS_POWER7, .cpu_user_features = COMMON_USER_POWER7, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTR_HPTE_TABLE | + MMU_FTR_TLBIE_206, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c new file mode 100644 index 00000000000..68ccf11e4f1 --- /dev/null +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -0,0 +1,163 @@ +/* + * Contains routines needed to support swiotlb for ppc. + * + * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include <linux/dma-mapping.h> +#include <linux/pfn.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pci.h> + +#include <asm/machdep.h> +#include <asm/swiotlb.h> +#include <asm/dma.h> +#include <asm/abs_addr.h> + +int swiotlb __read_mostly; +unsigned int ppc_swiotlb_enable; + +void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t addr) +{ + unsigned long pfn = PFN_DOWN(swiotlb_bus_to_phys(hwdev, addr)); + void *pageaddr = page_address(pfn_to_page(pfn)); + + if (pageaddr != NULL) + return pageaddr + (addr % PAGE_SIZE); + return NULL; +} + +dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) +{ + return paddr + get_dma_direct_offset(hwdev); +} + +phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr) + +{ + return baddr - get_dma_direct_offset(hwdev); +} + +/* + * Determine if an address needs bounce buffering via swiotlb. + * Going forward I expect the swiotlb code to generalize on using + * a dma_ops->addr_needs_map, and this function will move from here to the + * generic swiotlb code. + */ +int +swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr, + size_t size) +{ + struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev); + + BUG_ON(!dma_ops); + return dma_ops->addr_needs_map(hwdev, addr, size); +} + +/* + * Determine if an address is reachable by a pci device, or if we must bounce. + */ +static int +swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) +{ + u64 mask = dma_get_mask(hwdev); + dma_addr_t max; + struct pci_controller *hose; + struct pci_dev *pdev = to_pci_dev(hwdev); + + hose = pci_bus_to_host(pdev->bus); + max = hose->dma_window_base_cur + hose->dma_window_size; + + /* check that we're within mapped pci window space */ + if ((addr + size > max) | (addr < hose->dma_window_base_cur)) + return 1; + + return !is_buffer_dma_capable(mask, addr, size); +} + +static int +swiotlb_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) +{ + return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); +} + + +/* + * At the moment, all platforms that use this code only require + * swiotlb to be used if we're operating on HIGHMEM. Since + * we don't ever call anything other than map_sg, unmap_sg, + * map_page, and unmap_page on highmem, use normal dma_ops + * for everything else. + */ +struct dma_mapping_ops swiotlb_dma_ops = { + .alloc_coherent = dma_direct_alloc_coherent, + .free_coherent = dma_direct_free_coherent, + .map_sg = swiotlb_map_sg_attrs, + .unmap_sg = swiotlb_unmap_sg_attrs, + .dma_supported = swiotlb_dma_supported, + .map_page = swiotlb_map_page, + .unmap_page = swiotlb_unmap_page, + .addr_needs_map = swiotlb_addr_needs_map, + .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, + .sync_single_range_for_device = swiotlb_sync_single_range_for_device, + .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, + .sync_sg_for_device = swiotlb_sync_sg_for_device +}; + +struct dma_mapping_ops swiotlb_pci_dma_ops = { + .alloc_coherent = dma_direct_alloc_coherent, + .free_coherent = dma_direct_free_coherent, + .map_sg = swiotlb_map_sg_attrs, + .unmap_sg = swiotlb_unmap_sg_attrs, + .dma_supported = swiotlb_dma_supported, + .map_page = swiotlb_map_page, + .unmap_page = swiotlb_unmap_page, + .addr_needs_map = swiotlb_pci_addr_needs_map, + .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, + .sync_single_range_for_device = swiotlb_sync_single_range_for_device, + .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, + .sync_sg_for_device = swiotlb_sync_sg_for_device +}; + +static int ppc_swiotlb_bus_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + + /* We are only intereted in device addition */ + if (action != BUS_NOTIFY_ADD_DEVICE) + return 0; + + /* May need to bounce if the device can't address all of DRAM */ + if (dma_get_mask(dev) < lmb_end_of_DRAM()) + set_dma_ops(dev, &swiotlb_dma_ops); + + return NOTIFY_DONE; +} + +static struct notifier_block ppc_swiotlb_plat_bus_notifier = { + .notifier_call = ppc_swiotlb_bus_notify, + .priority = 0, +}; + +static struct notifier_block ppc_swiotlb_of_bus_notifier = { + .notifier_call = ppc_swiotlb_bus_notify, + .priority = 0, +}; + +int __init swiotlb_setup_bus_notifier(void) +{ + bus_register_notifier(&platform_bus_type, + &ppc_swiotlb_plat_bus_notifier); + bus_register_notifier(&of_platform_bus_type, + &ppc_swiotlb_of_bus_notifier); + + return 0; +} diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 6b02793dc75..20a60d661ba 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -19,7 +19,7 @@ * default the offset is PCI_DRAM_OFFSET. */ -static unsigned long get_dma_direct_offset(struct device *dev) +unsigned long get_dma_direct_offset(struct device *dev) { if (dev) return (unsigned long)dev->archdata.dma_data; diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S new file mode 100644 index 00000000000..eb898112e57 --- /dev/null +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -0,0 +1,978 @@ +/* + * This file contains the 64-bit "server" PowerPC variant + * of the low level exception handling including exception + * vectors, exception return, part of the slb and stab + * handling and other fixed offset specific things. + * + * This file is meant to be #included from head_64.S due to + * position dependant assembly. + * + * Most of this originates from head_64.S and thus has the same + * copyright history. + * + */ + +/* + * We layout physical memory as follows: + * 0x0000 - 0x00ff : Secondary processor spin code + * 0x0100 - 0x2fff : pSeries Interrupt prologs + * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs + * 0x6000 - 0x6fff : Initial (CPU0) segment table + * 0x7000 - 0x7fff : FWNMI data area + * 0x8000 - : Early init and support code + */ + + +/* + * SPRG Usage + * + * Register Definition + * + * SPRG0 reserved for hypervisor + * SPRG1 temp - used to save gpr + * SPRG2 temp - used to save gpr + * SPRG3 virt addr of paca + */ + +/* + * This is the start of the interrupt handlers for pSeries + * This code runs with relocation off. + * Code from here to __end_interrupts gets copied down to real + * address 0x100 when we are running a relocatable kernel. + * Therefore any relative branches in this section must only + * branch to labels in this section. + */ + . = 0x100 + .globl __start_interrupts +__start_interrupts: + + STD_EXCEPTION_PSERIES(0x100, system_reset) + + . = 0x200 +_machine_check_pSeries: + HMT_MEDIUM + mtspr SPRN_SPRG1,r13 /* save r13 */ + EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) + + . = 0x300 + .globl data_access_pSeries +data_access_pSeries: + HMT_MEDIUM + mtspr SPRN_SPRG1,r13 +BEGIN_FTR_SECTION + mtspr SPRN_SPRG2,r12 + mfspr r13,SPRN_DAR + mfspr r12,SPRN_DSISR + srdi r13,r13,60 + rlwimi r13,r12,16,0x20 + mfcr r12 + cmpwi r13,0x2c + beq do_stab_bolted_pSeries + mtcrf 0x80,r12 + mfspr r12,SPRN_SPRG2 +END_FTR_SECTION_IFCLR(CPU_FTR_SLB) + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) + + . = 0x380 + .globl data_access_slb_pSeries +data_access_slb_pSeries: + HMT_MEDIUM + mtspr SPRN_SPRG1,r13 + mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ + std r3,PACA_EXSLB+EX_R3(r13) + mfspr r3,SPRN_DAR + std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ + mfcr r9 +#ifdef __DISABLED__ + /* Keep that around for when we re-implement dynamic VSIDs */ + cmpdi r3,0 + bge slb_miss_user_pseries +#endif /* __DISABLED__ */ + std r10,PACA_EXSLB+EX_R10(r13) + std r11,PACA_EXSLB+EX_R11(r13) + std r12,PACA_EXSLB+EX_R12(r13) + mfspr r10,SPRN_SPRG1 + std r10,PACA_EXSLB+EX_R13(r13) + mfspr r12,SPRN_SRR1 /* and SRR1 */ +#ifndef CONFIG_RELOCATABLE + b .slb_miss_realmode +#else + /* + * We can't just use a direct branch to .slb_miss_realmode + * because the distance from here to there depends on where + * the kernel ends up being put. + */ + mfctr r11 + ld r10,PACAKBASE(r13) + LOAD_HANDLER(r10, .slb_miss_realmode) + mtctr r10 + bctr +#endif + + STD_EXCEPTION_PSERIES(0x400, instruction_access) + + . = 0x480 + .globl instruction_access_slb_pSeries +instruction_access_slb_pSeries: + HMT_MEDIUM + mtspr SPRN_SPRG1,r13 + mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ + std r3,PACA_EXSLB+EX_R3(r13) + mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ + std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ + mfcr r9 +#ifdef __DISABLED__ + /* Keep that around for when we re-implement dynamic VSIDs */ + cmpdi r3,0 + bge slb_miss_user_pseries +#endif /* __DISABLED__ */ + std r10,PACA_EXSLB+EX_R10(r13) + std r11,PACA_EXSLB+EX_R11(r13) + std r12,PACA_EXSLB+EX_R12(r13) + mfspr r10,SPRN_SPRG1 + std r10,PACA_EXSLB+EX_R13(r13) + mfspr r12,SPRN_SRR1 /* and SRR1 */ +#ifndef CONFIG_RELOCATABLE + b .slb_miss_realmode +#else + mfctr r11 + ld r10,PACAKBASE(r13) + LOAD_HANDLER(r10, .slb_miss_realmode) + mtctr r10 + bctr +#endif + + MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) + STD_EXCEPTION_PSERIES(0x600, alignment) + STD_EXCEPTION_PSERIES(0x700, program_check) + STD_EXCEPTION_PSERIES(0x800, fp_unavailable) + MASKABLE_EXCEPTION_PSERIES(0x900, decrementer) + STD_EXCEPTION_PSERIES(0xa00, trap_0a) + STD_EXCEPTION_PSERIES(0xb00, trap_0b) + + . = 0xc00 + .globl system_call_pSeries +system_call_pSeries: + HMT_MEDIUM +BEGIN_FTR_SECTION + cmpdi r0,0x1ebe + beq- 1f +END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) + mr r9,r13 + mfspr r13,SPRN_SPRG3 + mfspr r11,SPRN_SRR0 + ld r12,PACAKBASE(r13) + ld r10,PACAKMSR(r13) + LOAD_HANDLER(r12, system_call_entry) + mtspr SPRN_SRR0,r12 + mfspr r12,SPRN_SRR1 + mtspr SPRN_SRR1,r10 + rfid + b . /* prevent speculative execution */ + +/* Fast LE/BE switch system call */ +1: mfspr r12,SPRN_SRR1 + xori r12,r12,MSR_LE + mtspr SPRN_SRR1,r12 + rfid /* return to userspace */ + b . + + STD_EXCEPTION_PSERIES(0xd00, single_step) + STD_EXCEPTION_PSERIES(0xe00, trap_0e) + + /* We need to deal with the Altivec unavailable exception + * here which is at 0xf20, thus in the middle of the + * prolog code of the PerformanceMonitor one. A little + * trickery is thus necessary + */ + . = 0xf00 + b performance_monitor_pSeries + + . = 0xf20 + b altivec_unavailable_pSeries + + . = 0xf40 + b vsx_unavailable_pSeries + +#ifdef CONFIG_CBE_RAS + HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) +#endif /* CONFIG_CBE_RAS */ + STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) +#ifdef CONFIG_CBE_RAS + HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance) +#endif /* CONFIG_CBE_RAS */ + STD_EXCEPTION_PSERIES(0x1700, altivec_assist) +#ifdef CONFIG_CBE_RAS + HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal) +#endif /* CONFIG_CBE_RAS */ + + . = 0x3000 + +/*** pSeries interrupt support ***/ + + /* moved from 0xf00 */ + STD_EXCEPTION_PSERIES(., performance_monitor) + STD_EXCEPTION_PSERIES(., altivec_unavailable) + STD_EXCEPTION_PSERIES(., vsx_unavailable) + +/* + * An interrupt came in while soft-disabled; clear EE in SRR1, + * clear paca->hard_enabled and return. + */ +masked_interrupt: + stb r10,PACAHARDIRQEN(r13) + mtcrf 0x80,r9 + ld r9,PACA_EXGEN+EX_R9(r13) + mfspr r10,SPRN_SRR1 + rldicl r10,r10,48,1 /* clear MSR_EE */ + rotldi r10,r10,16 + mtspr SPRN_SRR1,r10 + ld r10,PACA_EXGEN+EX_R10(r13) + mfspr r13,SPRN_SPRG1 + rfid + b . + + .align 7 +do_stab_bolted_pSeries: + mtcrf 0x80,r12 + mfspr r12,SPRN_SPRG2 + EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) + +#ifdef CONFIG_PPC_PSERIES +/* + * Vectors for the FWNMI option. Share common code. + */ + .globl system_reset_fwnmi + .align 7 +system_reset_fwnmi: + HMT_MEDIUM + mtspr SPRN_SPRG1,r13 /* save r13 */ + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) + + .globl machine_check_fwnmi + .align 7 +machine_check_fwnmi: + HMT_MEDIUM + mtspr SPRN_SPRG1,r13 /* save r13 */ + EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) + +#endif /* CONFIG_PPC_PSERIES */ + +#ifdef __DISABLED__ +/* + * This is used for when the SLB miss handler has to go virtual, + * which doesn't happen for now anymore but will once we re-implement + * dynamic VSIDs for shared page tables + */ +slb_miss_user_pseries: + std r10,PACA_EXGEN+EX_R10(r13) + std r11,PACA_EXGEN+EX_R11(r13) + std r12,PACA_EXGEN+EX_R12(r13) + mfspr r10,SPRG1 + ld r11,PACA_EXSLB+EX_R9(r13) + ld r12,PACA_EXSLB+EX_R3(r13) + std r10,PACA_EXGEN+EX_R13(r13) + std r11,PACA_EXGEN+EX_R9(r13) + std r12,PACA_EXGEN+EX_R3(r13) + clrrdi r12,r13,32 + mfmsr r10 + mfspr r11,SRR0 /* save SRR0 */ + ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ + ori r10,r10,MSR_IR|MSR_DR|MSR_RI + mtspr SRR0,r12 + mfspr r12,SRR1 /* and SRR1 */ + mtspr SRR1,r10 + rfid + b . /* prevent spec. execution */ +#endif /* __DISABLED__ */ + + .align 7 + .globl __end_interrupts +__end_interrupts: + +/* + * Code from here down to __end_handlers is invoked from the + * exception prologs above. Because the prologs assemble the + * addresses of these handlers using the LOAD_HANDLER macro, + * which uses an addi instruction, these handlers must be in + * the first 32k of the kernel image. + */ + +/*** Common interrupt handlers ***/ + + STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) + + /* + * Machine check is different because we use a different + * save area: PACA_EXMC instead of PACA_EXGEN. + */ + .align 7 + .globl machine_check_common +machine_check_common: + EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) + FINISH_NAP + DISABLE_INTS + bl .save_nvgprs + addi r3,r1,STACK_FRAME_OVERHEAD + bl .machine_check_exception + b .ret_from_except + + STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) + STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) + STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) + STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) + STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) + STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) + STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) +#ifdef CONFIG_ALTIVEC + STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) +#else + STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) +#endif +#ifdef CONFIG_CBE_RAS + STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) + STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) + STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) +#endif /* CONFIG_CBE_RAS */ + + .align 7 +system_call_entry: + b system_call_common + +/* + * Here we have detected that the kernel stack pointer is bad. + * R9 contains the saved CR, r13 points to the paca, + * r10 contains the (bad) kernel stack pointer, + * r11 and r12 contain the saved SRR0 and SRR1. + * We switch to using an emergency stack, save the registers there, + * and call kernel_bad_stack(), which panics. + */ +bad_stack: + ld r1,PACAEMERGSP(r13) + subi r1,r1,64+INT_FRAME_SIZE + std r9,_CCR(r1) + std r10,GPR1(r1) + std r11,_NIP(r1) + std r12,_MSR(r1) + mfspr r11,SPRN_DAR + mfspr r12,SPRN_DSISR + std r11,_DAR(r1) + std r12,_DSISR(r1) + mflr r10 + mfctr r11 + mfxer r12 + std r10,_LINK(r1) + std r11,_CTR(r1) + std r12,_XER(r1) + SAVE_GPR(0,r1) + SAVE_GPR(2,r1) + SAVE_4GPRS(3,r1) + SAVE_2GPRS(7,r1) + SAVE_10GPRS(12,r1) + SAVE_10GPRS(22,r1) + lhz r12,PACA_TRAP_SAVE(r13) + std r12,_TRAP(r1) + addi r11,r1,INT_FRAME_SIZE + std r11,0(r1) + li r12,0 + std r12,0(r11) + ld r2,PACATOC(r13) +1: addi r3,r1,STACK_FRAME_OVERHEAD + bl .kernel_bad_stack + b 1b + +/* + * Here r13 points to the paca, r9 contains the saved CR, + * SRR0 and SRR1 are saved in r11 and r12, + * r9 - r13 are saved in paca->exgen. + */ + .align 7 + .globl data_access_common +data_access_common: + mfspr r10,SPRN_DAR + std r10,PACA_EXGEN+EX_DAR(r13) + mfspr r10,SPRN_DSISR + stw r10,PACA_EXGEN+EX_DSISR(r13) + EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) + ld r3,PACA_EXGEN+EX_DAR(r13) + lwz r4,PACA_EXGEN+EX_DSISR(r13) + li r5,0x300 + b .do_hash_page /* Try to handle as hpte fault */ + + .align 7 + .globl instruction_access_common +instruction_access_common: + EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) + ld r3,_NIP(r1) + andis. r4,r12,0x5820 + li r5,0x400 + b .do_hash_page /* Try to handle as hpte fault */ + +/* + * Here is the common SLB miss user that is used when going to virtual + * mode for SLB misses, that is currently not used + */ +#ifdef __DISABLED__ + .align 7 + .globl slb_miss_user_common +slb_miss_user_common: + mflr r10 + std r3,PACA_EXGEN+EX_DAR(r13) + stw r9,PACA_EXGEN+EX_CCR(r13) + std r10,PACA_EXGEN+EX_LR(r13) + std r11,PACA_EXGEN+EX_SRR0(r13) + bl .slb_allocate_user + + ld r10,PACA_EXGEN+EX_LR(r13) + ld r3,PACA_EXGEN+EX_R3(r13) + lwz r9,PACA_EXGEN+EX_CCR(r13) + ld r11,PACA_EXGEN+EX_SRR0(r13) + mtlr r10 + beq- slb_miss_fault + + andi. r10,r12,MSR_RI /* check for unrecoverable exception */ + beq- unrecov_user_slb + mfmsr r10 + +.machine push +.machine "power4" + mtcrf 0x80,r9 +.machine pop + + clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ + mtmsrd r10,1 + + mtspr SRR0,r11 + mtspr SRR1,r12 + + ld r9,PACA_EXGEN+EX_R9(r13) + ld r10,PACA_EXGEN+EX_R10(r13) + ld r11,PACA_EXGEN+EX_R11(r13) + ld r12,PACA_EXGEN+EX_R12(r13) + ld r13,PACA_EXGEN+EX_R13(r13) + rfid + b . + +slb_miss_fault: + EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) + ld r4,PACA_EXGEN+EX_DAR(r13) + li r5,0 + std r4,_DAR(r1) + std r5,_DSISR(r1) |