aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-06 03:16:12 +0900
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-06 03:16:12 +0900
commit5f3d2f2e1a63679cf1c4a4210f2f1cc2f335bef6 (patch)
tree9189bd6c81fe5f982a7ae45d2f3d900176658509 /arch/powerpc/kernel
parent283dbd82055eb70ff3b469f812d9c695f18c9641 (diff)
parentd900bd7366463fd96a907b2c212242e2b68b27d8 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Benjamin Herrenschmidt: "Some highlights in addition to the usual batch of fixes: - 64TB address space support for 64-bit processes by Aneesh Kumar - Gavin Shan did a major cleanup & re-organization of our EEH support code (IBM fancy PCI error handling & recovery infrastructure) which paves the way for supporting different platform backends, along with some rework of the PCIe code for the PowerNV platform in order to remove home made resource allocations and instead use the generic code (which is possible after some small improvements to it done by Gavin). - Uprobes support by Ananth N Mavinakayanahalli - A pile of embedded updates from Freescale folks, including new SoC and board supports, more KVM stuff including preparing for 64-bit BookE KVM support, ePAPR 1.1 updates, etc..." Fixup trivial conflicts in drivers/scsi/ipr.c * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (146 commits) powerpc/iommu: Fix multiple issues with IOMMU pools code powerpc: Fix VMX fix for memcpy case driver/mtd:IFC NAND:Initialise internal SRAM before any write powerpc/fsl-pci: use 'Header Type' to identify PCIE mode powerpc/eeh: Don't release eeh_mutex in eeh_phb_pe_get powerpc: Remove tlb batching hack for nighthawk powerpc: Set paca->data_offset = 0 for boot cpu powerpc/perf: Sample only if SIAR-Valid bit is set in P7+ powerpc/fsl-pci: fix warning when CONFIG_SWIOTLB is disabled powerpc/mpc85xx: Update interrupt handling for IFC controller powerpc/85xx: Enable USB support in p1023rds_defconfig powerpc/smp: Do not disable IPI interrupts during suspend powerpc/eeh: Fix crash on converting OF node to edev powerpc/eeh: Lock module while handling EEH event powerpc/kprobe: Don't emulate store when kprobe stwu r1 powerpc/kprobe: Complete kprobe and migrate exception frame powerpc/kprobe: Introduce a new thread flag powerpc: Remove unused __get_user64() and __put_user64() powerpc/eeh: Global mutex to protect PE tree powerpc/eeh: Remove EEH PE for normal PCI hotplug ...
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S74
-rw-r--r--arch/powerpc/kernel/cputable.c4
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c22
-rw-r--r--arch/powerpc/kernel/dma.c3
-rw-r--r--arch/powerpc/kernel/entry_32.S47
-rw-r--r--arch/powerpc/kernel/entry_64.S35
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S212
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S127
-rw-r--r--arch/powerpc/kernel/fadump.c3
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S46
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c25
-rw-r--r--arch/powerpc/kernel/ibmebus.c1
-rw-r--r--arch/powerpc/kernel/iommu.c5
-rw-r--r--arch/powerpc/kernel/irq.c8
-rw-r--r--arch/powerpc/kernel/machine_kexec.c14
-rw-r--r--arch/powerpc/kernel/paca.c1
-rw-r--r--arch/powerpc/kernel/pci-common.c16
-rw-r--r--arch/powerpc/kernel/process.c16
-rw-r--r--arch/powerpc/kernel/prom.c4
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/ptrace.c3
-rw-r--r--arch/powerpc/kernel/rtas_flash.c7
-rw-r--r--arch/powerpc/kernel/rtas_pci.c5
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/signal.c8
-rw-r--r--arch/powerpc/kernel/smp.c14
-rw-r--r--arch/powerpc/kernel/time.c8
-rw-r--r--arch/powerpc/kernel/traps.c1
-rw-r--r--arch/powerpc/kernel/uprobes.c184
-rw-r--r--arch/powerpc/kernel/vdso.c4
-rw-r--r--arch/powerpc/kernel/vio.c1
33 files changed, 734 insertions, 171 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index bb282dd8161..cde12f8a4eb 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -96,6 +96,7 @@ obj-$(CONFIG_MODULES) += ppc_ksyms.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_UPROBES) += uprobes.o
obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index e8995727b1c..7523539cfe9 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -206,6 +206,7 @@ int main(void)
DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost));
+ DEFINE(PACA_SPRG3, offsetof(struct paca_struct, sprg3));
#endif /* CONFIG_PPC64 */
/* RTAS */
@@ -534,7 +535,6 @@ int main(void)
HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
- HSTATE_FIELD(HSTATE_SPRG3, sprg3);
HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
HSTATE_FIELD(HSTATE_NAPPING, napping);
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 69fdd2322a6..dcd881937f7 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -16,6 +16,8 @@
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/ppc_asm.h>
+#include <asm/mmu-book3e.h>
+#include <asm/asm-offsets.h>
_GLOBAL(__e500_icache_setup)
mfspr r0, SPRN_L1CSR1
@@ -73,27 +75,81 @@ _GLOBAL(__setup_cpu_e500v2)
mtlr r4
blr
_GLOBAL(__setup_cpu_e500mc)
- mr r5, r4
- mflr r4
+_GLOBAL(__setup_cpu_e5500)
+ mflr r5
bl __e500_icache_setup
bl __e500_dcache_setup
bl __setup_e500mc_ivors
- mtlr r4
+ /*
+ * We only want to touch IVOR38-41 if we're running on hardware
+ * that supports category E.HV. The architectural way to determine
+ * this is MMUCFG[LPIDSIZE].
+ */
+ mfspr r3, SPRN_MMUCFG
+ rlwinm. r3, r3, 0, MMUCFG_LPIDSIZE
+ beq 1f
+ bl __setup_ehv_ivors
+ b 2f
+1:
+ lwz r3, CPU_SPEC_FEATURES(r4)
+ /* We need this check as cpu_setup is also called for
+ * the secondary cores. So, if we have already cleared
+ * the feature on the primary core, avoid doing it on the
+ * secondary core.
+ */
+ andis. r6, r3, CPU_FTR_EMB_HV@h
+ beq 2f
+ rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
+ stw r3, CPU_SPEC_FEATURES(r4)
+2:
+ mtlr r5
blr
#endif
-/* Right now, restore and setup are the same thing */
+
+#ifdef CONFIG_PPC_BOOK3E_64
_GLOBAL(__restore_cpu_e5500)
-_GLOBAL(__setup_cpu_e5500)
mflr r4
bl __e500_icache_setup
bl __e500_dcache_setup
-#ifdef CONFIG_PPC_BOOK3E_64
bl .__setup_base_ivors
bl .setup_perfmon_ivor
bl .setup_doorbell_ivors
+ /*
+ * We only want to touch IVOR38-41 if we're running on hardware
+ * that supports category E.HV. The architectural way to determine
+ * this is MMUCFG[LPIDSIZE].
+ */
+ mfspr r10,SPRN_MMUCFG
+ rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
+ beq 1f
bl .setup_ehv_ivors
-#else
- bl __setup_e500mc_ivors
-#endif
+1:
mtlr r4
blr
+
+_GLOBAL(__setup_cpu_e5500)
+ mflr r5
+ bl __e500_icache_setup
+ bl __e500_dcache_setup
+ bl .__setup_base_ivors
+ bl .setup_perfmon_ivor
+ bl .setup_doorbell_ivors
+ /*
+ * We only want to touch IVOR38-41 if we're running on hardware
+ * that supports category E.HV. The architectural way to determine
+ * this is MMUCFG[LPIDSIZE].
+ */
+ mfspr r10,SPRN_MMUCFG
+ rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
+ beq 1f
+ bl .setup_ehv_ivors
+ b 2f
+1:
+ ld r10,CPU_SPEC_FEATURES(r4)
+ LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV)
+ andc r10,r10,r9
+ std r10,CPU_SPEC_FEATURES(r4)
+2:
+ mtlr r5
+ blr
+#endif
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 455faa38987..0514c21f138 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2016,7 +2016,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_cpu_type = "ppc/e500mc",
.oprofile_type = PPC_OPROFILE_FSL_EMB,
.cpu_setup = __setup_cpu_e5500,
+#ifndef CONFIG_PPC32
.cpu_restore = __restore_cpu_e5500,
+#endif
.machine_check = machine_check_e500mc,
.platform = "ppce5500",
},
@@ -2034,7 +2036,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_cpu_type = "ppc/e6500",
.oprofile_type = PPC_OPROFILE_FSL_EMB,
.cpu_setup = __setup_cpu_e5500,
+#ifndef CONFIG_PPC32
.cpu_restore = __restore_cpu_e5500,
+#endif
.machine_check = machine_check_e500mc,
.platform = "ppce6500",
},
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 46943651da2..bd1a2aba599 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -12,6 +12,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/memblock.h>
#include <linux/pfn.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -20,7 +21,6 @@
#include <asm/machdep.h>
#include <asm/swiotlb.h>
#include <asm/dma.h>
-#include <asm/abs_addr.h>
unsigned int ppc_swiotlb_enable;
@@ -105,3 +105,23 @@ int __init swiotlb_setup_bus_notifier(void)
&ppc_swiotlb_plat_bus_notifier);
return 0;
}
+
+void swiotlb_detect_4g(void)
+{
+ if ((memblock_end_of_DRAM() - 1) > 0xffffffff)
+ ppc_swiotlb_enable = 1;
+}
+
+static int __init swiotlb_late_init(void)
+{
+ if (ppc_swiotlb_enable) {
+ swiotlb_print_info();
+ set_pci_dma_ops(&swiotlb_dma_ops);
+ ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
+ } else {
+ swiotlb_free();
+ }
+
+ return 0;
+}
+subsys_initcall(swiotlb_late_init);
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 355b9d84b0f..8032b97ccdc 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -14,7 +14,6 @@
#include <linux/pci.h>
#include <asm/vio.h>
#include <asm/bug.h>
-#include <asm/abs_addr.h>
#include <asm/machdep.h>
/*
@@ -50,7 +49,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
return NULL;
ret = page_address(page);
memset(ret, 0, size);
- *dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
+ *dma_handle = __pa(ret) + get_dma_offset(dev);
return ret;
#endif
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index ead5016b02d..af37528da49 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -831,19 +831,56 @@ restore_user:
bnel- load_dbcr0
#endif
-#ifdef CONFIG_PREEMPT
b restore
/* N.B. the only way to get here is from the beq following ret_from_except. */
resume_kernel:
- /* check current_thread_info->preempt_count */
+ /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
CURRENT_THREAD_INFO(r9, r1)
+ lwz r8,TI_FLAGS(r9)
+ andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
+ beq+ 1f
+
+ addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
+
+ lwz r3,GPR1(r1)
+ subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
+ mr r4,r1 /* src: current exception frame */
+ mr r1,r3 /* Reroute the trampoline frame to r1 */
+
+ /* Copy from the original to the trampoline. */
+ li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
+ li r6,0 /* start offset: 0 */
+ mtctr r5
+2: lwzx r0,r6,r4
+ stwx r0,r6,r3
+ addi r6,r6,4
+ bdnz 2b
+
+ /* Do real store operation to complete stwu */
+ lwz r5,GPR1(r1)
+ stw r8,0(r5)
+
+ /* Clear _TIF_EMULATE_STACK_STORE flag */
+ lis r11,_TIF_EMULATE_STACK_STORE@h
+ addi r5,r9,TI_FLAGS
+0: lwarx r8,0,r5
+ andc r8,r8,r11
+#ifdef CONFIG_IBM405_ERR77
+ dcbt 0,r5
+#endif
+ stwcx. r8,0,r5
+ bne- 0b
+1:
+
+#ifdef CONFIG_PREEMPT
+ /* check current_thread_info->preempt_count */
lwz r0,TI_PREEMPT(r9)
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
- lwz r0,TI_FLAGS(r9)
- andi. r0,r0,_TIF_NEED_RESCHED
+ andi. r8,r8,_TIF_NEED_RESCHED
beq+ restore
+ lwz r3,_MSR(r1)
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -864,8 +901,6 @@ resume_kernel:
*/
bl trace_hardirqs_on
#endif
-#else
-resume_kernel:
#endif /* CONFIG_PREEMPT */
/* interrupts are hard-disabled at this point */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index b40e0b4815b..0e931aaffca 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -593,6 +593,41 @@ _GLOBAL(ret_from_except_lite)
b .ret_from_except
resume_kernel:
+ /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
+ CURRENT_THREAD_INFO(r9, r1)
+ ld r8,TI_FLAGS(r9)
+ andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
+ beq+ 1f
+
+ addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
+
+ lwz r3,GPR1(r1)
+ subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
+ mr r4,r1 /* src: current exception frame */
+ mr r1,r3 /* Reroute the trampoline frame to r1 */
+
+ /* Copy from the original to the trampoline. */
+ li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
+ li r6,0 /* start offset: 0 */
+ mtctr r5
+2: ldx r0,r6,r4
+ stdx r0,r6,r3
+ addi r6,r6,8
+ bdnz 2b
+
+ /* Do real store operation to complete stwu */
+ lwz r5,GPR1(r1)
+ std r8,0(r5)
+
+ /* Clear _TIF_EMULATE_STACK_STORE flag */
+ lis r11,_TIF_EMULATE_STACK_STORE@h
+ addi r5,r9,TI_FLAGS
+ ldarx r4,0,r5
+ andc r4,r4,r11
+ stdcx. r4,0,r5
+ bne- 0b
+1:
+
#ifdef CONFIG_PREEMPT
/* Check if we need to preempt */
andi. r0,r4,_TIF_NEED_RESCHED
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 98be7f0cd22..4684e33a26c 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -25,6 +25,8 @@
#include <asm/ppc-opcode.h>
#include <asm/mmu.h>
#include <asm/hw_irq.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_booke_hv_asm.h>
/* XXX This will ultimately add space for a special exception save
* structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
@@ -35,16 +37,18 @@
#define SPECIAL_EXC_FRAME_SIZE INT_FRAME_SIZE
/* Exception prolog code for all exceptions */
-#define EXCEPTION_PROLOG(n, type, addition) \
+#define EXCEPTION_PROLOG(n, intnum, type, addition) \
mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \
mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \
std r10,PACA_EX##type+EX_R10(r13); \
std r11,PACA_EX##type+EX_R11(r13); \
+ PROLOG_STORE_RESTORE_SCRATCH_##type; \
mfcr r10; /* save CR */ \
+ mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \
+ DO_KVM intnum,SPRN_##type##_SRR1; /* KVM hook */ \
+ stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \
addition; /* additional code for that exc. */ \
std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \
- stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \
- mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \
type##_SET_KSTACK; /* get special stack if necessary */\
andi. r10,r11,MSR_PR; /* save stack pointer */ \
beq 1f; /* branch around if supervisor */ \
@@ -59,6 +63,10 @@
#define SPRN_GEN_SRR0 SPRN_SRR0
#define SPRN_GEN_SRR1 SPRN_SRR1
+#define GDBELL_SET_KSTACK GEN_SET_KSTACK
+#define SPRN_GDBELL_SRR0 SPRN_GSRR0
+#define SPRN_GDBELL_SRR1 SPRN_GSRR1
+
#define CRIT_SET_KSTACK \
ld r1,PACA_CRIT_STACK(r13); \
subi r1,r1,SPECIAL_EXC_FRAME_SIZE;
@@ -77,29 +85,46 @@
#define SPRN_MC_SRR0 SPRN_MCSRR0
#define SPRN_MC_SRR1 SPRN_MCSRR1
-#define NORMAL_EXCEPTION_PROLOG(n, addition) \
- EXCEPTION_PROLOG(n, GEN, addition##_GEN(n))
+#define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \
+ EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
+
+#define CRIT_EXCEPTION_PROLOG(n, intnum, addition) \
+ EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n))
-#define CRIT_EXCEPTION_PROLOG(n, addition) \
- EXCEPTION_PROLOG(n, CRIT, addition##_CRIT(n))
+#define DBG_EXCEPTION_PROLOG(n, intnum, addition) \
+ EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n))
-#define DBG_EXCEPTION_PROLOG(n, addition) \
- EXCEPTION_PROLOG(n, DBG, addition##_DBG(n))
+#define MC_EXCEPTION_PROLOG(n, intnum, addition) \
+ EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n))
-#define MC_EXCEPTION_PROLOG(n, addition) \
- EXCEPTION_PROLOG(n, MC, addition##_MC(n))
+#define GDBELL_EXCEPTION_PROLOG(n, intnum, addition) \
+ EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n))
+/*
+ * Store user-visible scratch in PACA exception slots and restore proper value
+ */
+#define PROLOG_STORE_RESTORE_SCRATCH_GEN
+#define PROLOG_STORE_RESTORE_SCRATCH_GDBELL
+#define PROLOG_STORE_RESTORE_SCRATCH_DBG
+#define PROLOG_STORE_RESTORE_SCRATCH_MC
+
+#define PROLOG_STORE_RESTORE_SCRATCH_CRIT \
+ mfspr r10,SPRN_SPRG_CRIT_SCRATCH; /* get r13 */ \
+ std r10,PACA_EXCRIT+EX_R13(r13); \
+ ld r11,PACA_SPRG3(r13); \
+ mtspr SPRN_SPRG_CRIT_SCRATCH,r11;
/* Variants of the "addition" argument for the prolog
*/
#define PROLOG_ADDITION_NONE_GEN(n)
+#define PROLOG_ADDITION_NONE_GDBELL(n)
#define PROLOG_ADDITION_NONE_CRIT(n)
#define PROLOG_ADDITION_NONE_DBG(n)
#define PROLOG_ADDITION_NONE_MC(n)
#define PROLOG_ADDITION_MASKABLE_GEN(n) \
- lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
- cmpwi cr0,r11,0; /* yes -> go out of line */ \
+ lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
+ cmpwi cr0,r10,0; /* yes -> go out of line */ \
beq masked_interrupt_book3e_##n
#define PROLOG_ADDITION_2REGS_GEN(n) \
@@ -233,9 +258,9 @@ exc_##n##_bad_stack: \
1:
-#define MASKABLE_EXCEPTION(trapnum, label, hdlr, ack) \
+#define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack) \
START_EXCEPTION(label); \
- NORMAL_EXCEPTION_PROLOG(trapnum, PROLOG_ADDITION_MASKABLE) \
+ NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\
EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE) \
ack(r8); \
CHECK_NAPPING(); \
@@ -286,7 +311,8 @@ interrupt_end_book3e:
/* Critical Input Interrupt */
START_EXCEPTION(critical_input);
- CRIT_EXCEPTION_PROLOG(0x100, PROLOG_ADDITION_NONE)
+ CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
+ PROLOG_ADDITION_NONE)
// EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE)
// bl special_reg_save_crit
// CHECK_NAPPING();
@@ -297,7 +323,8 @@ interrupt_end_book3e:
/* Machine Check Interrupt */
START_EXCEPTION(machine_check);
- CRIT_EXCEPTION_PROLOG(0x200, PROLOG_ADDITION_NONE)
+ MC_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_MACHINE_CHECK,
+ PROLOG_ADDITION_NONE)
// EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE)
// bl special_reg_save_mc
// addi r3,r1,STACK_FRAME_OVERHEAD
@@ -308,7 +335,8 @@ interrupt_end_book3e:
/* Data Storage Interrupt */
START_EXCEPTION(data_storage)
- NORMAL_EXCEPTION_PROLOG(0x300, PROLOG_ADDITION_2REGS)
+ NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE,
+ PROLOG_ADDITION_2REGS)
mfspr r14,SPRN_DEAR
mfspr r15,SPRN_ESR
EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_DISABLE)
@@ -316,18 +344,21 @@ interrupt_end_book3e:
/* Instruction Storage Interrupt */
START_EXCEPTION(instruction_storage);
- NORMAL_EXCEPTION_PROLOG(0x400, PROLOG_ADDITION_2REGS)
+ NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE,
+ PROLOG_ADDITION_2REGS)
li r15,0
mr r14,r10
EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_DISABLE)
b storage_fault_common
/* External Input Interrupt */
- MASKABLE_EXCEPTION(0x500, external_input, .do_IRQ, ACK_NONE)
+ MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL,
+ external_input, .do_IRQ, ACK_NONE)
/* Alignment */
START_EXCEPTION(alignment);
- NORMAL_EXCEPTION_PROLOG(0x600, PROLOG_ADDITION_2REGS)
+ NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT,
+ PROLOG_ADDITION_2REGS)
mfspr r14,SPRN_DEAR
mfspr r15,SPRN_ESR
EXCEPTION_COMMON(0x600, PACA_EXGEN, INTS_KEEP)
@@ -335,7 +366,8 @@ interrupt_end_book3e:
/* Program Interrupt */
START_EXCEPTION(program);
- NORMAL_EXCEPTION_PROLOG(0x700, PROLOG_ADDITION_1REG)
+ NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM,
+ PROLOG_ADDITION_1REG)
mfspr r14,SPRN_ESR
EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE)
std r14,_DSISR(r1)
@@ -347,7 +379,8 @@ interrupt_end_book3e:
/* Floating Point Unavailable Interrupt */
START_EXCEPTION(fp_unavailable);
- NORMAL_EXCEPTION_PROLOG(0x800, PROLOG_ADDITION_NONE)
+ NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL,
+ PROLOG_ADDITION_NONE)
/* we can probably do a shorter exception entry for that one... */
EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP)
ld r12,_MSR(r1)
@@ -362,14 +395,17 @@ interrupt_end_book3e:
b .ret_from_except
/* Decrementer Interrupt */
- MASKABLE_EXCEPTION(0x900, decrementer, .timer_interrupt, ACK_DEC)
+ MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER,
+ decrementer, .timer_interrupt, ACK_DEC)
/* Fixed Interval Timer Interrupt */
- MASKABLE_EXCEPTION(0x980, fixed_interval, .unknown_exception, ACK_FIT)
+ MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT,
+ fixed_interval, .unknown_exception, ACK_FIT)
/* Watchdog Timer Interrupt */
START_EXCEPTION(watchdog);
- CRIT_EXCEPTION_PROLOG(0x9f0, PROLOG_ADDITION_NONE)
+ CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG,
+ PROLOG_ADDITION_NONE)
// EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE)
// bl special_reg_save_crit
// CHECK_NAPPING();
@@ -388,7 +424,8 @@ interrupt_end_book3e:
/* Auxiliary Processor Unavailable Interrupt */
START_EXCEPTION(ap_unavailable);
- NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE)
+ NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL,
+ PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_DISABLE)
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -397,7 +434,8 @@ interrupt_end_book3e:
/* Debug exception as a critical interrupt*/
START_EXCEPTION(debug_crit);
- CRIT_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS)
+ CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
+ PROLOG_ADDITION_2REGS)
/*
* If there is a single step or branch-taken exception in an
@@ -431,7 +469,7 @@ interrupt_end_book3e:
mtcr r10
ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */
ld r11,PACA_EXCRIT+EX_R11(r13)
- mfspr r13,SPRN_SPRG_CRIT_SCRATCH
+ ld r13,PACA_EXCRIT+EX_R13(r13)
rfci
/* Normal debug exception */
@@ -444,7 +482,7 @@ interrupt_end_book3e:
/* Now we mash up things to make it look like we are coming on a
* normal exception
*/
- mfspr r15,SPRN_SPRG_CRIT_SCRATCH
+ ld r15,PACA_EXCRIT+EX_R13(r13)
mtspr SPRN_SPRG_GEN_SCRATCH,r15
mfspr r14,SPRN_DBSR
EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE)
@@ -462,7 +500,8 @@ kernel_dbg_exc:
/* Debug exception as a debug interrupt*/
START_EXCEPTION(debug_debug);
- DBG_EXCEPTION_PROLOG(0xd08, PROLOG_ADDITION_2REGS)
+ DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
+ PROLOG_ADDITION_2REGS)
/*
* If there is a single step or branch-taken exception in an
@@ -523,18 +562,21 @@ kernel_dbg_exc:
b .ret_from_except
START_EXCEPTION(perfmon);
- NORMAL_EXCEPTION_PROLOG(0x260, PROLOG_ADDITION_NONE)
+ NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
+ PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE)
addi r3,r1,STACK_FRAME_OVERHEAD
bl .performance_monitor_exception
b .ret_from_except_lite
/* Doorbell interrupt */
- MASKABLE_EXCEPTION(0x280, doorbell, .doorbell_exception, ACK_NONE)
+ MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
+ doorbell, .doorbell_exception, ACK_NONE)
/* Doorbell critical Interrupt */
START_EXCEPTION(doorbell_crit);
- CRIT_EXCEPTION_PROLOG(0x2a0, PROLOG_ADDITION_NONE)
+ CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL,
+ PROLOG_ADDITION_NONE)
// EXCEPTION_COMMON(0x2a0, PACA_EXCRIT, INTS_DISABLE)
// bl special_reg_save_crit
// CHECK_NAPPING();
@@ -543,12 +585,24 @@ kernel_dbg_exc:
// b ret_from_crit_except
b .
-/* Guest Doorbell */
- MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE)
+/*
+ * Guest doorbell interrupt
+ * This general exception use GSRRx save/restore registers
+ */
+ START_EXCEPTION(guest_doorbell);
+ GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL,
+ PROLOG_ADDITION_NONE)
+ EXCEPTION_COMMON(0x2c0, PACA_EXGEN, INTS_KEEP)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .save_nvgprs
+ INTS_RESTORE_HARD
+ bl .unknown_exception
+ b .ret_from_except
/* Guest Doorbell critical Interrupt */
START_EXCEPTION(guest_doorbell_crit);
- CRIT_EXCEPTION_PROLOG(0x2e0, PROLOG_ADDITION_NONE)
+ CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT,
+ PROLOG_ADDITION_NONE)
// EXCEPTION_COMMON(0x2e0, PACA_EXCRIT, INTS_DISABLE)
// bl special_reg_save_crit
// CHECK_NAPPING();
@@ -559,7 +613,8 @@ kernel_dbg_exc:
/* Hypervisor call */
START_EXCEPTION(hypercall);
- NORMAL_EXCEPTION_PROLOG(0x310, PROLOG_ADDITION_NONE)
+ NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL,
+ PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x310, PACA_EXGEN, INTS_KEEP)
addi r3,r1,STACK_FRAME_OVERHEAD
bl .save_nvgprs
@@ -569,7 +624,8 @@ kernel_dbg_exc:
/* Embedded Hypervisor priviledged */
START_EXCEPTION(ehpriv);
- NORMAL_EXCEPTION_PROLOG(0x320, PROLOG_ADDITION_NONE)
+ NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV,
+ PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x320, PACA_EXGEN, INTS_KEEP)
addi r3,r1,STACK_FRAME_OVERHEAD
bl .save_nvgprs
@@ -582,44 +638,42 @@ kernel_dbg_exc:
* accordingly and if the interrupt is level sensitive, we hard disable
*/
+.macro masked_interrupt_book3e paca_irq full_mask
+ lbz r10,PACAIRQHAPPENED(r13)
+ ori r10,r10,\paca_irq
+ stb r10,PACAIRQHAPPENED(r13)
+
+ .if \full_mask == 1
+ rldicl r10,r11,48,1 /* clear MSR_EE */
+ rotldi r11,r10,16
+ mtspr SPRN_SRR1,r11
+ .endif
+
+ lwz r11,PACA_EXGEN+EX_CR(r13)
+ mtcr r11
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ ld r11,PACA_EXGEN+EX_R11(r13)
+ mfspr r13,SPRN_SPRG_GEN_SCRATCH
+ rfi
+ b .
+.endm
+
masked_interrupt_book3e_0x500:
- /* XXX When adding support for EPR, use PACA_IRQ_EE_EDGE */
- li r11,PACA_IRQ_EE
- b masked_interrupt_book3e_full_mask
+ // XXX When adding support for EPR, use PACA_IRQ_EE_EDGE
+ masked_interrupt_book3e PACA_IRQ_EE 1
masked_interrupt_book3e_0x900:
- ACK_DEC(r11);
- li r11,PACA_IRQ_DEC
- b masked_interrupt_book3e_no_mask
+ ACK_DEC(r10);
+ masked_interrupt_book3e PACA_IRQ_DEC 0
+
masked_interrupt_book3e_0x980:
- ACK_FIT(r11);
- li r11,PACA_IRQ_DEC
- b masked_interrupt_book3e_no_mask
+ ACK_FIT(r10);
+ masked_interrupt_book3e PACA_IRQ_DEC 0
+
masked_interrupt_book3e_0x280:
masked_interrupt_book3e_0x2c0:
- li r11,PACA_IRQ_DBELL
- b masked_interrupt_book3e_no_mask
+ masked_interrupt_book3e PACA_IRQ_DBELL 0
-masked_interrupt_book3e_no_mask:
- mtcr r10
- lbz r10,PACAIRQHAPPENED(r13)
- or r10,r10,r11
- stb r10,PACAIRQHAPPENED(r13)
- b 1f
-masked_interrupt_book3e_full_mask:
- mtcr r10
- lbz r10,PACAIRQHAPPENED(r13)
- or r10,r10,r11
- stb r10,PACAIRQHAPPENED(r13)
- mfspr r10,SPRN_SRR1
- rldicl r11,r10,48,1 /* clear MSR_EE */
- rotldi r10,r11,16
- mtspr SPRN_SRR1,r10
-1: ld r10,PACA_EXGEN+EX_R10(r13);
- ld r11,PACA_EXGEN+EX_R11(r13);
- mfspr r13,SPRN_SPRG_GEN_SCRATCH;
- rfi
- b .
/*
* Called from arch_local_irq_enable when an interrupt needs
* to be resent. r3 contains either 0x500,0x900,0x260 or 0x280
@@ -1302,25 +1356,11 @@ _GLOBAL(setup_perfmon_ivor)
_GLOBAL(setup_doorbell_ivors)
SET_IVOR(36, 0x280) /* Processor Doorbell */
SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */
-
- /* Check MMUCFG[LPIDSIZE] to determine if we have category E.HV */
- mfspr r10,SPRN_MMUCFG
- rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
- beqlr
-
- SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
- SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
blr
_GLOBAL(setup_ehv_ivors)
- /*
- * We may be running as a guest and lack E.HV even on a chip
- * that normally has it.
- */
- mfspr r10,SPRN_MMUCFG
- rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
- beqlr
-
SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */
SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */
+ SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
+ SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
blr
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 39aa97d3ff8..10b658ad65e 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -275,6 +275,31 @@ vsx_unavailable_pSeries_1:
STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
+ . = 0x1500
+ .global denorm_Hypervisor
+denorm_exception_hv:
+ HMT_MEDIUM
+ mtspr SPRN_SPRG_HSCRATCH0,r13
+ mfspr r13,SPRN_SPRG_HPACA
+ std r9,PACA_EXGEN+EX_R9(r13)
+ std r10,PACA_EXGEN+EX_R10(r13)
+ std r11,PACA_EXGEN+EX_R11(r13)
+ std r12,PACA_EXGEN+EX_R12(r13)
+ mfspr r9,SPRN_SPRG_HSCRATCH0
+ std r9,PACA_EXGEN+EX_R13(r13)
+ mfcr r9
+
+#ifdef CONFIG_PPC_DENORMALISATION
+ mfspr r10,SPRN_HSRR1
+ mfspr r11,SPRN_HSRR0 /* save HSRR0 */
+ andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
+ addi r11,r11,-4 /* HSRR0 is next instruction */
+ bne+ denorm_assist
+#endif
+
+ EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
+ KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
+
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
@@ -336,6 +361,103 @@ do_stab_bolted_pSeries:
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
+#ifdef CONFIG_PPC_DENORMALISATION
+denorm_assist:
+BEGIN_FTR_SECTION
+/*
+ * To denormalise we need to move a copy of the register to itself.
+ * For POWER6 do that here for all FP regs.
+ */
+ mfmsr r10
+ ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
+ xori r10,r10,(MSR_FE0|MSR_FE1)
+ mtmsrd r10
+ sync
+ fmr 0,0
+ fmr 1,1
+ fmr 2,2
+ fmr 3,3
+ fmr 4,4
+ fmr 5,5
+ fmr 6,6
+ fmr 7,7
+ fmr 8,8
+ fmr 9,9
+ fmr 10,10
+ fmr 11,11
+ fmr 12,12
+ fmr 13,13
+ fmr 14,14
+ fmr 15,15
+ fmr 16,16
+ fmr 17,17
+ fmr 18,18
+ fmr 19,19
+ fmr 20,20
+ fmr 21,21
+ fmr 22,22
+ fmr 23,23
+ fmr 24,24
+ fmr 25,25
+ fmr 26,26
+ fmr 27,27
+ fmr 28,28
+ fmr 29,29
+ fmr 30,30
+ fmr 31,31
+FTR_SECTION_ELSE
+/*
+ * To denormalise we need to move a copy of the register to itself.
+ * For POWER7 do that here for the first 32 VSX registers only.
+ */
+ mfmsr r10
+ oris r10,r10,MSR_VSX@h
+ mtmsrd r10
+ sync
+ XVCPSGNDP(0,0,0)
+ XVCPSGNDP(1,1,1)
+ XVCPSGNDP(2,2,2)
+ XVCPSGNDP(3,3,3)
+ XVCPSGNDP(4,4,4)
+ XVCPSGNDP(5,5,5)
+ XVCPSGNDP(6,6,6)
+ XVCPSGNDP(7,7,7)
+ XVCPSGNDP(8,8,8)
+ XVCPSGNDP(9,9,9)
+ XVCPSGNDP(10,10,10)
+ XVCPSGNDP(11,11,11)
+ XVCPSGNDP(12,12,12)
+ XVCPSGNDP(13,13,13)
+ XVCPSGNDP(14,14,14)
+ XVCPSGNDP(15,15,15)
+ XVCPSGNDP(16,16,16)
+ XVCPSGNDP(17,17,17)
+ XVCPSGNDP(18,18,18)
+ XVCPSGNDP(19,19,19)
+ XVCPSGNDP(20,20,20)
+ XVCPSGNDP(21,21,21)
+ XVCPSGNDP(22,22,22)
+ XVCPSGNDP(23,23,23)
+ XVCPSGNDP(24,24,24)
+ XVCPSGNDP(25,25,25)
+ XVCPSGNDP(26,26,26)
+ XVCPSGNDP(27,27,27)
+ XVCPSGNDP(28,28,28)
+ XVCPSGNDP(29,29,29)
+ XVCPSGNDP(30,30,30)
+ XVCPSGNDP(31,31,31)
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
+ mtspr SPRN_HSRR0,r11
+ mtcrf 0x80,r9
+ ld r9,PACA_EXGEN+EX_R9(r13)
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ ld r11,PACA_EXGEN+EX_R11(r13)
+ ld r12,PACA_EXGEN+EX_R12(r13)
+ ld r13,PACA_EXGEN+EX_R13(r13)
+ HRFID
+ b .
+#endif
+
.align 7
/* moved from 0xe00 */
STD_EXCEPTION_HV(., 0xe02, h_data_storage)
@@ -495,6 +617,7 @@ machine_check_common:
STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
+ STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
#ifdef CONFIG_ALTIVEC
STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
#else
@@ -960,7 +1083,9 @@ _GLOBAL(do_stab_bolted)
rldimi r10,r11,7,52 /* r10 = first ste of the group */
/* Calculate VSID */
- /* This is a kernel address, so protovsid = ESID */
+ /* This is a kernel address, so protovsid = ESID | 1 << 37 */
+ li r9,0x1
+ rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
ASM_VSID_SCRAMBLE(r11, r9, 256M)
rldic r9,r11,12,16 /* r9 = vsid << 12 */
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 18bdf74fa16..06c8202a69c 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -289,8 +289,7 @@ int __init fadump_reserve_mem(void)
else
memory_limit = memblock_end_of_DRAM();
printk(KERN_INFO "Adjusted memory_limit for firmware-assisted"
- " dump, now %#016llx\n",
- (unsigned long long)memory_limit);
+ " dump, now %#016llx\n", memory_limit);
}
if (memory_limit)
memory_boundary = memory_limit;
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 0f59863c3ad..6f62a737f60 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -895,15 +895,11 @@ _GLOBAL(__setup_e500mc_ivors)
mtspr SPRN_IVOR36,r3
li r3,CriticalDoorbell@l
mtspr SPRN_IVOR37,r3
+ sync
+ blr
- /*
- * We only want to touch IVOR38-41 if we're running on hardware
- * that supports category E.HV. The architectural way to determine
- * this is MMUCFG[LPIDSIZE].
- */
- mfspr r3, SPRN_MMUCFG
- andis. r3, r3, MMUCFG_LPIDSIZE@h
- beq no_hv
+/* setup ehv ivors for */
+_GLOBAL(__setup_ehv_ivors)
li r3,GuestDoorbell@l
mtspr SPRN_IVOR38,r3
li r3,CriticalGuestDoorbell@l
@@ -912,14 +908,8 @@ _GLOBAL(__setup_e500mc_ivors)
mtspr SPRN_IVOR40,r3
li r3,Ehvpriv@l
mtspr SPRN_IVOR41,r3
-skip_hv_ivors:
sync
blr
-no_hv:
- lwz r3, CPU_SPEC_FEATURES(r5)
- rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
- stw r3, CPU_SPEC_FEATURES(r5)
- b skip_hv_ivors
#ifdef CONFIG_SPE
/*
@@ -1043,6 +1033,34 @@ _GLOBAL(flush_dcache_L1)
blr
+/* Flush L1 d-cache, invalidate and disable d-cache and i-cache */
+_GLOBAL(__flush_disable_L1)
+ mflr r10
+ bl flush_dcache_L1 /* Flush L1 d-cache */
+ mtlr r10
+
+ mfspr r4, SPRN_L1CSR0 /* Invalidate and disable d-cache */
+ li r5, 2
+ rlwimi r4, r5, 0, 3
+
+ msync
+ isync
+ mtspr SPRN_L1CSR0, r4
+ isync
+
+1: mfspr r4, SPRN_L1CSR0 /* Wait for the invalidate to finish */
+ andi. r4, r4, 2
+ bne 1b
+
+ mfspr r4, SPRN_L1CSR1 /* Invalidate and disable i-cache */
+ li r5, 2
+ rlwimi r4, r5, 0, 3
+
+ mtspr SPRN_L1CSR1, r4
+ isync
+
+ blr
+
#ifdef CONFIG_SMP
/* When we get here, r24 needs to hold the CPU # */
.globl __secondary_start
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 956a4c496de..a89cae481b0 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -73,7 +73,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
* If so, DABR will be populated in single_step_dabr_instruction().
*/
if (current->thread.last_hit_ubp != bp)
- set_dabr(info->address | info->type | DABR_TRANSLATION);
+ set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
return 0;
}
@@ -97,7 +97,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
}
*slot = NULL;
- set_dabr(0);
+ set_dabr(0, 0);
}
/*
@@ -170,6 +170,13 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
info->address = bp->attr.bp_addr;
info->len = bp->attr.bp_len;
+ info->dabrx = DABRX_ALL;
+ if (bp->attr.exclude_user)
+ info->dabrx &= ~DABRX_USER;
+ if (bp->attr.exclude_kernel)
+ info->dabrx &= ~DABRX_KERNEL;
+ if (bp->attr.exclude_hv)
+ info->dabrx &= ~DABRX_HYP;
/*
* Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
@@ -197,7 +204,7 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
info = counter_arch_bp(tsk->thread.last_hit_ubp);
regs->msr &= ~MSR_SE;
- set_dabr(info->address | info->type | DABR_TRANSLATION);
+ set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
tsk->thread.last_hit_ubp = NULL;
}
@@ -215,7 +222,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
unsigned long dar = regs->dar;
/* Disable breakpoints during exception handling */
- set_dabr(0);
+ set_dabr(0, 0);
/*
* The counter may be concurrently released but that can only
@@ -281,7 +288,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
if (!info->extraneous_interrupt)
perf_bp_event(bp, regs);
- set_dabr(info->address | info->type | DABR_TRANSLATION);
+ set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
out:
rcu_read_unlock();
return rc;
@@ -294,7 +301,7 @@ int __kprobes single_step_dabr_instruction(struct die_args *args)
{
struct pt_regs *regs = args->regs;
struct perf_event *bp = NULL;
- struct arch_hw_breakpoint *bp_info;
+ struct arch_hw_breakpoint *info;
bp = current->thread.last_hit_ubp;
/*
@@ -304,16 +311,16 @@ int __kprobes single_step_dabr_instruction(struct die_args *args)
if (!bp)
return NOTIFY_DONE;
- bp_info = counter_arch_bp(bp);
+ info = counter_arch_bp(bp);
/*
* We shall invoke the user-defined callback function in the single
* stepping handler to confirm to 'trigger-after-execute' semantics
*/
- if (!bp_info->extraneous_interrupt)
+ if (!info->extraneous_interrupt)
perf_bp_event(bp, regs);
- set_dabr(bp_info->address | bp_info->type | DABR_TRANSLATION);
+ set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
current->thread.last_hit_ubp = NULL;
/*
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index b01d14eeca8..8220baa46fa 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -47,7 +47,6 @@
#include <linux/stat.h>
#include <linux/of_platform.h>
#include <asm/ibmebus.h>
-#include <asm/abs_addr.h>
static struct device ibmebus_bus_device = { /* fake "parent" device */
.init_name = "ibmebus",
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index ff5a6ce027b..8226c6cb348 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -215,7 +215,8 @@ static unsigned long iommu_range_alloc(struct device *dev,
spin_lock_irqsave(&(pool->lock), flags);
again:
- if ((pass == 0) && handle && *handle)
+ if ((pass == 0) && handle && *handle &&
+ (*handle >= pool->start) && (*handle < pool->end))
start = *handle;
else
start = pool->hint;
@@ -236,7 +237,9 @@ again:
* but on second pass, start at 0 in pool 0.
*/
if ((start & mask) >= limit || pass > 0) {
+ spin_unlock(&(pool->lock));
pool = &(tbl->pools[0]);
+ spin_lock(&(pool->lock));
start = pool->start;
} else {
start &= mask;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 1f017bb7a7c..71413f41278 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -489,10 +489,10 @@ void do_IRQ(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs);
unsigned int irq;
- trace_irq_entry(regs);
-
irq_enter();
+ trace_irq_entry(regs);
+
check_stack_overflow();
/*
@@ -511,10 +511,10 @@ void do_IRQ(struct pt_regs *regs)
else
__get_cpu_var(irq_stat).spurious_irqs++;
+ trace_irq_exit(regs);
+
irq_exit();
set_irq_regs(old_regs);
-
- trace_irq_exit(regs);
}
void __init init_IRQ(void)
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 5df77779440..fa9f6c72f55 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -165,7 +165,7 @@ void __init reserve_crashkernel(void)
if (memory_limit && memory_limit <= crashk_res.end) {
memory_limit = crashk_res.end + 1;
printk("Adjusted memory limit for crashkernel, now 0x%llx\n",
- (unsigned long long)memory_limit);
+ memory_limit);
}
printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
@@ -204,6 +204,12 @@ static struct property crashk_size_prop = {
.value = &crashk_size,
};
+static struct property memory_limit_prop = {
+ .name = "linux,memory-limit",
+ .length = sizeof(unsigned long long),
+ .value = &memory_limit,
+};
+
static void __init export_crashk_values(struct device_node *node)
{
struct property *prop;
@@ -223,6 +229,12 @@ static void __init export_crashk_values(struct device_node *node)
crashk_size = resource_size(&crashk_res);
prom_add_property(node, &crashk_size_prop);
}
+
+ /*
+ * memory_limit is required by the kexec-tools to limit the
+ * crash regions to the actual memory used.
+ */
+ prom_update_property(node, &memory_limit_prop);
}
static int __init kexec_setup(void)
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index fbe1a12dc7f..cd6da855090 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -142,6 +142,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
new_paca->hw_cpu_id = 0xffff;
new_paca->kexec_state = KEXEC_STATE_NONE;
new_paca->__current = &init_task;
+ new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
#ifdef CONFIG_PPC_STD_MMU_64
new_paca->slb_shadow_ptr = &slb_shadow[cpu];
#endif /* CONFIG_PPC_STD_MMU_64 */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 43fea543d68..7f94f760dd0 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -980,13 +980,14 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
if (i >= 3 && bus->self->transparent)
continue;
- /* If we are going to re-assign everything, mark the resource
- * as unset and move it down to 0
+ /* If we're going to reassign everything, we can
+ * shrink the P2P resource to have size as being
+ * of 0 in order to save space.
*/
if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
res->flags |= IORESOURCE_UNSET;
- res->end -= res->start;
res->start = 0;
+ res->end = -1;
continue;
}
@@ -1248,7 +1249,14 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
pr_warning("PCI: Cannot allocate resource region "
"%d of PCI bridge %d, will remap\n", i, bus->number);
clear_resource:
- res->start = res->end = 0;
+ /* The resource might be figured out when doing
+ * reassignment based on the resources required
+ * by the downstream PCI devices. Here we set
+ * the size of the resource to be 0 in order to
+ * save more space.
+ */
+ res->start = 0;
+ res->end = -1;
res->flags = 0;
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index e9cb51f5f80..d5ad666efd8 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -258,6 +258,7 @@ void do_send_trap(struct pt_regs *regs, unsigned long address,
{
siginfo_t info;
+ current->thread.trap_nr = signal_code;
if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
11, SIGSEGV) == NOTIFY_STOP)
return;
@@ -275,6 +276,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address,
{
siginfo_t info;
+ current->thread.trap_nr = TRAP_HWBKPT;
if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
11, SIGSEGV) == NOTIFY_STOP)
return;
@@ -283,7 +285,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address,
return;
/* Clear the DABR */
- set_dabr(0);
+ set_dabr(0, 0);
/* Deliver the signal to userspace */
info.si_signo = SIGTRAP;
@@ -364,18 +366,19 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
{
if (thread->dabr) {
thread->dabr = 0;
- set_dabr(0);
+ thread->dabrx = 0;
+ set_dabr(0, 0);
}
}
#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
-int set_dabr(unsigned long dabr)
+int set_dabr(unsigned long dabr, unsigned long dabrx)
{
__get_cpu_var(current_dabr) = dabr;
if (ppc_md.set_dabr)
- return ppc_md.set_dabr(dabr);
+ return ppc_md.set_dabr(dabr, dabrx);
/* XXX should we have a CPU_FTR_HAS_DABR ? */
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -385,9 +388,8 @@ int set_dabr(unsigned long dabr)
#endif
#elif defined(CONFIG_PPC_BOOK3S)
mtspr(SPRN_DABR, dabr);
+ mtspr(SPRN_DABRX, dabrx);
#endif
-
-
return 0;
}
@@ -480,7 +482,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
*/
#ifndef CONFIG_HAVE_HW_BREAKPOINT
if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
- set_dabr(new->thread.dabr);
+ set_dabr(new->thread.dabr, new->thread.dabrx);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index f191bf02943..37725e86651 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -78,7 +78,7 @@ static int __init early_parse_mem(char *p)
return 1;
memory_limit = PAGE_ALIGN(memparse(p, &p));
- DBG("memory limit = 0x%llx\n", (unsigned long long)memory_limit);
+ DBG("memory limit = 0x%llx\n", memory_limit);
return 0;
}
@@ -661,7 +661,7 @@ void __init early_init_devtree(void *params)
/* make sure we've parsed cmdline for mem= before this */
if (memory_limit)
- first_memblock_size = min(first_memblock_size, memory_limit);
+ first_memblock_size = min_t(u64, first_memblock_size, memory_limit);
setup_initial_memory_limit(memstart_addr, first_memblock_size);
/* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 47834a3f493..cb6c123722a 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1748,7 +1748,7 @@ static void __init prom_initialize_tce_table(void)
* else will impact performance, so we always allocate 8MB.
* Anton
*/
- if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
+ if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p))
minsize = 8UL << 20;
else
minsize = 4UL << 20;
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index c10fc28b909..79d8e56470d 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -960,6 +960,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
thread->ptrace_bps[0] = bp;
ptrace_put_breakpoints(task);
thread->dabr = data;
+ thread->dabrx = DABRX_ALL;
return 0;
}
@@ -983,6 +984,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
/* Move contents to the DABR register */
task->thread.dabr = data;
+ task->thread.dabrx = DABRX_ALL;
#else /* CONFIG_PPC_ADV_DEBUG_REGS */
/* As described above, it was assumed 3 bits were passed with the data
* address, but we will assume only the mode bits will be passed
@@ -1397,6 +1399,7 @@ static long ppc_set_hwdebug(struct task_struct *child,
dabr |= DABR_DATA_WRITE;
child->thread.dabr = dabr;
+ child->thread.dabrx = DABRX_ALL;
return 1;
#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 2c0ee640563..20b0120db0c 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -21,7 +21,6 @@
#include <asm/delay.h>
#include <asm/uaccess.h>
#include <asm/rtas.h>
-#include <asm/abs_addr.h>
#define MODULE_VERS "1.0"
#define MODULE_NAME "rtas_flash"
@@ -582,7 +581,7 @@ static void rtas_flash_firmware(int reboot_type)
flist = (struct flash_block_list *)&rtas_data_buf[0];
flist->num_blocks = 0;
flist->next = rtas_firmware_flash_list;
- rtas_block_list = virt_to_abs(flist);
+ rtas_block_list = __pa(flist);
if (rtas_block_list >= 4UL*1024*1024*1024) {
printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n");
spin_unlock(&rtas_data_buf_lock);
@@ -596,13 +595,13 @@ static void rtas_flash_firmware(int reboot_type)
for (f = flist; f; f = next) {
/* Translate data addrs to absolute */
for (i = 0; i < f->num_blocks; i++) {
- f->blocks[i].data = (char *)virt_to_abs(f->blocks[i].data);
+ f->blocks[i].data = (char *)__pa(f->blocks[i].data);
image_size += f->blocks[i].length;
}
next = f->next;
/* Don't translate NULL pointer for last entry */
if (f->next)
- f->next = (struct flash_block_list *)virt_to_abs(f->next);
+ f->next = (struct flash_block_list *)__pa(f->next);
else
f->next = NULL;
/* make num_blocks into the version/length field */
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 179af906dcd..6de63e3250b 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -81,7 +81,7 @@ int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
return PCIBIOS_DEVICE_NOT_FOUND;
if (returnval == EEH_IO_ERROR_VALUE(size) &&
- eeh_dn_check_failure (pdn->node, NULL))
+ eeh_dev_check_failure(of_node_to_eeh_dev(pdn->node)))
return PCIBIOS_DEVICE_NOT_FOUND;
return PCIBIOS_SUCCESSFUL;
@@ -275,9 +275,6 @@ void __init find_and_init_phbs(void)
of_node_put(root);
pci_devs_phb_init();
- /* Create EEH devices for all PHBs */
- eeh_dev_phb_init();
-
/*
* PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
* in chosen.
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 389bd4f0cdb..efb6a41b313 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -208,6 +208,8 @@ void __init early_setup(unsigned long dt_ptr)
/* Fix up paca fields required for the boot cpu */
get_paca()->cpu_start = 1;
+ /* Allow percpu accesses to "work" until we setup percpu data */
+ get_paca()->data_offset = 0;
/* Probe the machine type */
probe_machine();
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 5c023c9cf16..a2dc75793bd 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -11,6 +11,7 @@
#include <linux/tracehook.h>
#include <linux/signal.h>
+#include <linux/uprobes.h>
#include <linux/key.h>
#include <asm/hw_breakpoint.h>
#include <asm/uaccess.h>
@@ -130,7 +131,7 @@ static int do_signal(struct pt_regs *regs)
* triggered inside the kernel.
*/
if (current->thread.dabr)
- set_dabr(current->thread.dabr);
+ set_dabr(current->thread.dabr, current->thread.dabrx);
#endif
/* Re-enable the breakpoints for the signal stack */
thread_change_pc(current, regs);
@@ -157,6 +158,11 @@ static int do_signal(struct pt_regs *regs)
void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
{
+ if (thread_info_flags & _TIF_UPROBE) {
+ clear_thread_flag(TIF_UPROBE);
+ uprobe_notify_resume(regs);
+ }
+
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8d4214afc21..2b952b5386f 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -102,7 +102,7 @@ int __devinit smp_generic_kick_cpu(int nr)
* Ok it's not there, so it might be soft-unplugged, let's
* try to bring it back
*/
- per_cpu(cpu_state, nr) = CPU_UP_PREPARE;
+ generic_set_cpu_up(nr);
smp_wmb();
smp_send_reschedule(nr);
#endif /* CONFIG_HOTPLUG_CPU */
@@ -171,7 +171,7 @@ int smp_request_message_ipi(int virq, int msg)
}
#endif
err = request_irq(virq, smp_ipi_action[msg],
- IRQF_PERCPU | IRQF_NO_THREAD,
+ IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
smp_ipi_name[msg], 0);
WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
virq, smp_ipi_name[msg], err);
@@ -413,6 +413,16 @@ void generic_set_cpu_dead(unsigned int cpu)
per_cpu(cpu_state, cpu) = CPU_DEAD;
}
+/*
+ * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
+ * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
+ * which makes the delay in generic_cpu_die() not happen.
+ */
+void generic_set_cpu_up(unsigned int cpu)
+{
+ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+}
+
int generic_check_cpu_restart(unsigned int cpu)
{
return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index eaa9d0e6abc..c9986fd400d 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -508,8 +508,6 @@ void timer_interrupt(struct pt_regs * regs)
*/
may_hard_irq_enable();
- trace_timer_interrupt_entry(regs);
-
__get_cpu_var(irq_stat).timer_irqs++;
#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
@@ -520,6 +518,8 @@ void timer_interrupt(struct pt_regs * regs)
old_regs = set_irq_regs(regs);
irq_enter();
+ trace_timer_interrupt_entry(regs);
+
if (test_irq_work_pending()) {
clear_irq_work_pending();
irq_work_run();
@@ -544,10 +544,10 @@ void timer_interrupt(struct pt_regs * regs)
}
#endif
+ trace_timer_interrupt_exit(regs);
+
irq_exit();
set_irq_regs(old_regs);
-
- trace_timer_interrupt_exit(regs);
}
/*
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index ae0843fa7a6..32518401af6 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -251,6 +251,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
local_irq_enable();
+ current->thread.trap_nr = code;
memset(&info, 0, sizeof(info));
info.si_signo = signr;
info.si_code = code;
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
new file mode 100644
index 00000000000..d2d46d1014f
--- /dev/null
+++ b/arch/powerpc/kernel/uprobes.c
@@ -0,0 +1,184 @@
+/*
+ * User-space Probes (UProbes) for powerpc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2007-2012
+ *
+ * Adapted from the x86 port by Ananth N Mavinakayanahalli <ananth@in.ibm.com>
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/uprobes.h>
+#include <linux/uaccess.h>
+#include <linux/kdebug.h>
+
+#include <asm/sstep.h>
+
+#define UPROBE_TRAP_NR UINT_MAX
+
+/**
+ * arch_uprobe_analyze_insn
+ * @mm: the probed address space.
+ * @arch_uprobe: the probepoint information.
+ * @addr: vaddr to probe.
+ * Return 0 on success or a -ve number on error.
+ */
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
+ struct mm_struct *mm, unsigned long addr)
+{
+ if (addr & 0x03)
+ return -EINVAL;
+
+ /*
+ * We currently don't support a uprobe on an already
+ * existing breakpoint instruction underneath
+ */
+ if (is_trap(auprobe->ainsn))
+ return -ENOTSUPP;
+ return 0;
+}
+
+/*
+ * arch_uprobe_pre_xol - prepare to execute out of line.
+ * @auprobe: the probepoint information.
+ * @regs: reflects the saved user state of current task.
+ */
+int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct arch_uprobe_task *autask = &current->utask->autask;
+
+ autask->saved_trap_nr = current->thread.trap_nr;
+ current->thread.trap_nr = UPROBE_TRAP_NR;
+ regs->nip = current->utask->xol_vaddr;
+ return 0;
+}
+
+/**
+ * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
+ * @regs: Reflects the saved state of the task after it has hit a breakpoint
+ * instruction.
+ * Return the address of the breakpoint instruction.
+ */
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+/*
+ * If xol insn itself traps and generates a signal (SIGILL/SIGSEGV/etc),
+ * then detect the case where a singlestepped instruction jumps back to its
+ * own address. It is assumed that anything like do_page_fault/do_trap/etc
+ * sets thread.trap_nr != UINT_MAX.
+ *
+ * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
+ * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
+ * UPROBE_TRAP_NR == UINT_MAX set by arch_uprobe_pre_xol().
+ */
+bool arch_uprobe_xol_was_trapped(struct task_struct *t)
+{
+ if (t->thread.trap_nr != UPROBE_TRAP_NR)
+ return true;
+
+ return false;
+}
+
+/*
+ * Called after single-stepping. To avoid the SMP problems that can
+ * occur when we temporarily put back the original opcode to
+ * single-step, we single-stepped a copy of the instruction.
+ *
+ * This function prepares to resume execution after the single-step.
+ */
+int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
+
+ current->thread.trap_nr = utask->autask.saved_trap_nr;
+
+ /*
+ * On powerpc, except for loads and stores, most instructions
+ * including ones that alter code flow (branches, calls, returns)
+ * are emulated in the kernel. We get here only if the emulation
+ * support doesn't exist and have to fix-up the next instruction
+ * to be executed.
+ */
+ regs->nip = utask->vaddr + MAX_UINSN_BYTES;
+ return 0;
+}
+
+/* callback routine for handling exceptions. */
+int arch_uprobe_exception_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct die_args *args = data;
+ struct pt_regs *regs = args->regs;
+
+ /* regs == NULL is a kernel bug */
+ if (WARN_ON(!regs))
+ return NOTIFY_DONE;
+
+ /* We are only interested in userspace traps */
+ if (!user_mode(regs))
+ return NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_BPT:
+ if (uprobe_pre_sstep_notifier(regs))
+ return NOTIFY_STOP;
+ break;
+ case DIE_SSTEP:
+ if (uprobe_post_sstep_notifier(regs))
+ return NOTIFY_STOP;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+/*
+ * This function gets called when XOL instruction either gets trapped or
+ * the thread has a fatal signal, so reset the instruction pointer to its
+ * probed address.
+ */
+void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ current->thread.trap_nr = utask->autask.saved_trap_nr;
+ instruction_pointer_set(regs, utask->vaddr);
+}
+
+/*
+ * See if the instruction can be emulated.
+ * Returns true if instruction was emulated, false otherwise.
+ */
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ int ret;
+
+ /*
+ * emulate_step() returns 1 if the insn was successfully emulated.
+ * For all other cases, we need to single-step in hardware.
+ */
+ ret = emulate_step(regs, auprobe->ainsn);
+ if (ret > 0)
+ return true;
+
+ return false;
+}
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index b67db22e102..1b2076f049c 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -723,9 +723,7 @@ int __cpuinit vdso_getcpu_init(void)
val = (cpu & 0xfff) | ((node & 0xffff) << 16);
mtspr(SPRN_SPRG3, val);
-#ifdef CONFIG_KVM_BOOK3S_HANDLER
- get_paca()->kvm_hstate.sprg3 = val;
-#endif
+ get_paca()->sprg3 = val;
put_cpu();
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 02b32216bbc..201ba59738b 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -33,7 +33,6 @@
#include <asm/prom.h>
#include <asm/firmware.h>
#include <asm/tce.h>
-#include <asm/abs_addr.h>
#include <asm/page.h>
#include <asm/hvcall.h>