aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/cpu_setup_a2.S120
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S2
-rw-r--r--arch/powerpc/kernel/cputable.c61
-rw-r--r--arch/powerpc/kernel/eeh.c38
-rw-r--r--arch/powerpc/kernel/eeh_driver.c24
-rw-r--r--arch/powerpc/kernel/eeh_event.c21
-rw-r--r--arch/powerpc/kernel/eeh_pe.c60
-rw-r--r--arch/powerpc/kernel/entry_64.S6
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S16
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S64
-rw-r--r--arch/powerpc/kernel/ftrace.c52
-rw-r--r--arch/powerpc/kernel/head_40x.S19
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kernel/iomap.c20
-rw-r--r--arch/powerpc/kernel/kprobes.c9
-rw-r--r--arch/powerpc/kernel/module_64.c11
-rw-r--r--arch/powerpc/kernel/process.c8
-rw-r--r--arch/powerpc/kernel/prom.c7
-rw-r--r--arch/powerpc/kernel/prom_init.c211
-rw-r--r--arch/powerpc/kernel/prom_init_check.sh4
-rw-r--r--arch/powerpc/kernel/rtas_flash.c6
-rw-r--r--arch/powerpc/kernel/setup-common.c10
-rw-r--r--arch/powerpc/kernel/signal_32.c9
-rw-r--r--arch/powerpc/kernel/signal_64.c9
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/kernel/udbg.c2
-rw-r--r--arch/powerpc/kernel/udbg_16550.c11
30 files changed, 280 insertions, 531 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index fab19ec2559..670c312d914 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -43,7 +43,6 @@ obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
-obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o
obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
diff --git a/arch/powerpc/kernel/cpu_setup_a2.S b/arch/powerpc/kernel/cpu_setup_a2.S
deleted file mode 100644
index 61f079e05b6..00000000000
--- a/arch/powerpc/kernel/cpu_setup_a2.S
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * A2 specific assembly support code
- *
- * Copyright 2009 Ben Herrenschmidt, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/asm-offsets.h>
-#include <asm/ppc_asm.h>
-#include <asm/ppc-opcode.h>
-#include <asm/processor.h>
-#include <asm/reg_a2.h>
-#include <asm/reg.h>
-#include <asm/thread_info.h>
-
-/*
- * Disable thdid and class fields in ERATs to bump PID to full 14 bits capacity.
- * This also prevents external LPID accesses but that isn't a problem when not a
- * guest. Under PV, this setting will be ignored and MMUCR will return the right
- * number of PID bits we can use.
- */
-#define MMUCR1_EXTEND_PID \
- (MMUCR1_ICTID | MMUCR1_ITTID | MMUCR1_DCTID | \
- MMUCR1_DTTID | MMUCR1_DCCD)
-
-/*
- * Use extended PIDs if enabled.
- * Don't clear the ERATs on context sync events and enable I & D LRU.
- * Enable ERAT back invalidate when tlbwe overwrites an entry.
- */
-#define INITIAL_MMUCR1 \
- (MMUCR1_EXTEND_PID | MMUCR1_CSINV_NEVER | MMUCR1_IRRE | \
- MMUCR1_DRRE | MMUCR1_TLBWE_BINV)
-
-_GLOBAL(__setup_cpu_a2)
- /* Some of these are actually thread local and some are
- * core local but doing it always won't hurt
- */
-
-#ifdef CONFIG_PPC_ICSWX
- /* Make sure ACOP starts out as zero */
- li r3,0
- mtspr SPRN_ACOP,r3
-
- /* Skip the following if we are in Guest mode */
- mfmsr r3
- andis. r0,r3,MSR_GS@h
- bne _icswx_skip_guest
-
- /* Enable icswx instruction */
- mfspr r3,SPRN_A2_CCR2
- ori r3,r3,A2_CCR2_ENABLE_ICSWX
- mtspr SPRN_A2_CCR2,r3
-
- /* Unmask all CTs in HACOP */
- li r3,-1
- mtspr SPRN_HACOP,r3
-_icswx_skip_guest:
-#endif /* CONFIG_PPC_ICSWX */
-
- /* Enable doorbell */
- mfspr r3,SPRN_A2_CCR2
- oris r3,r3,A2_CCR2_ENABLE_PC@h
- mtspr SPRN_A2_CCR2,r3
- isync
-
- /* Setup CCR0 to disable power saving for now as it's busted
- * in the current implementations. Setup CCR1 to wake on
- * interrupts normally (we write the default value but who
- * knows what FW may have clobbered...)
- */
- li r3,0
- mtspr SPRN_A2_CCR0, r3
- LOAD_REG_IMMEDIATE(r3,0x0f0f0f0f)
- mtspr SPRN_A2_CCR1, r3
-
- /* Initialise MMUCR1 */
- lis r3,INITIAL_MMUCR1@h
- ori r3,r3,INITIAL_MMUCR1@l
- mtspr SPRN_MMUCR1,r3
-
- /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */
- LOAD_REG_IMMEDIATE(r3, 0x000a7531)
- mtspr SPRN_MMUCR2,r3
-
- /* Set MMUCR3 to write all thids bit to the TLB */
- LOAD_REG_IMMEDIATE(r3, 0x0000000f)
- mtspr SPRN_MMUCR3,r3
-
- /* Don't do ERAT stuff if running guest mode */
- mfmsr r3
- andis. r0,r3,MSR_GS@h
- bne 1f
-
- /* Now set the I-ERAT watermark to 15 */
- lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h
- mtspr SPRN_MMUCR0, r4
- li r4,A2_IERAT_SIZE-1
- PPC_ERATWE(R4,R4,3)
-
- /* Now set the D-ERAT watermark to 31 */
- lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h
- mtspr SPRN_MMUCR0, r4
- li r4,A2_DERAT_SIZE-1
- PPC_ERATWE(R4,R4,3)
-
- /* And invalidate the beast just in case. That won't get rid of
- * a bolted entry though it will be in LRU and so will go away eventually
- * but let's not bother for now
- */
- PPC_ERATILX(0,0,R0)
-1:
- blr
-
-_GLOBAL(__restore_cpu_a2)
- b __setup_cpu_a2
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 1557e7c2c7e..46733535cc0 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -56,6 +56,7 @@ _GLOBAL(__setup_cpu_power8)
li r0,0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
+ ori r3, r3, LPCR_PECEDH
bl __init_LPCR
bl __init_HFSCR
bl __init_tlb_power8
@@ -74,6 +75,7 @@ _GLOBAL(__restore_cpu_power8)
li r0,0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
+ ori r3, r3, LPCR_PECEDH
bl __init_LPCR
bl __init_HFSCR
bl __init_tlb_power8
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index c1faade6506..0c157642c2a 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -109,7 +109,8 @@ extern void __restore_cpu_e6500(void);
PPC_FEATURE_PSERIES_PERFMON_COMPAT)
#define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \
PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_DSCR | \
- PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR)
+ PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \
+ PPC_FEATURE2_VEC_CRYPTO)
#define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\
PPC_FEATURE_TRUE_LE | \
PPC_FEATURE_HAS_ALTIVEC_COMP)
@@ -526,6 +527,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
+ { /* Power8 DD1: Does not support doorbell IPIs */
+ .pvr_mask = 0xffffff00,
+ .pvr_value = 0x004d0100,
+ .cpu_name = "POWER8 (raw)",
+ .cpu_features = CPU_FTRS_POWER8_DD1,
+ .cpu_user_features = COMMON_USER_POWER8,
+ .cpu_user_features2 = COMMON_USER2_POWER8,
+ .mmu_features = MMU_FTRS_POWER8,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .oprofile_cpu_type = "ppc64/power8",
+ .oprofile_type = PPC_OPROFILE_INVALID,
+ .cpu_setup = __setup_cpu_power8,
+ .cpu_restore = __restore_cpu_power8,
+ .flush_tlb = __flush_tlb_power8,
+ .machine_check_early = __machine_check_early_realmode_p8,
+ .platform = "power8",
+ },
{ /* Power8 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x004d0000,
@@ -2148,44 +2169,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
}
#endif /* CONFIG_PPC32 */
#endif /* CONFIG_E500 */
-
-#ifdef CONFIG_PPC_A2
- { /* Standard A2 (>= DD2) + FPU core */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00480000,
- .cpu_name = "A2 (>= DD2)",
- .cpu_features = CPU_FTRS_A2,
- .cpu_user_features = COMMON_USER_PPC64,
- .mmu_features = MMU_FTRS_A2,
- .icache_bsize = 64,
- .dcache_bsize = 64,
- .num_pmcs = 0,
- .cpu_setup = __setup_cpu_a2,
- .cpu_restore = __restore_cpu_a2,
- .machine_check = machine_check_generic,
- .platform = "ppca2",
- },
- { /* This is a default entry to get going, to be replaced by
- * a real one at some stage
- */
-#define CPU_FTRS_BASE_BOOK3E (CPU_FTR_USE_TB | \
- CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_SMT | \
- CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
- .pvr_mask = 0x00000000,
- .pvr_value = 0x00000000,
- .cpu_name = "Book3E",
- .cpu_features = CPU_FTRS_BASE_BOOK3E,
- .cpu_user_features = COMMON_USER_PPC64,
- .mmu_features = MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX |
- MMU_FTR_USE_TLBIVAX_BCAST |
- MMU_FTR_LOCK_BCAST_INVAL,
- .icache_bsize = 64,
- .dcache_bsize = 64,
- .num_pmcs = 0,
- .machine_check = machine_check_generic,
- .platform = "power6",
- },
-#endif /* CONFIG_PPC_A2 */
};
static struct cpu_spec the_cpu_spec;
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 7051ea3101b..86e25702aac 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -330,8 +330,8 @@ static int eeh_phb_check_failure(struct eeh_pe *pe)
eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
eeh_serialize_unlock(flags);
- pr_err("EEH: PHB#%x failure detected\n",
- phb_pe->phb->global_number);
+ pr_err("EEH: PHB#%x failure detected, location: %s\n",
+ phb_pe->phb->global_number, eeh_pe_loc_get(phb_pe));
dump_stack();
eeh_send_failure_event(phb_pe);
@@ -358,10 +358,11 @@ out:
int eeh_dev_check_failure(struct eeh_dev *edev)
{
int ret;
+ int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
unsigned long flags;
struct device_node *dn;
struct pci_dev *dev;
- struct eeh_pe *pe;
+ struct eeh_pe *pe, *parent_pe, *phb_pe;
int rc = 0;
const char *location;
@@ -439,14 +440,34 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
*/
if ((ret < 0) ||
(ret == EEH_STATE_NOT_SUPPORT) ||
- (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) ==
- (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) {
+ ((ret & active_flags) == active_flags)) {
eeh_stats.false_positives++;
pe->false_positives++;
rc = 0;
goto dn_unlock;
}
+ /*
+ * It should be corner case that the parent PE has been
+ * put into frozen state as well. We should take care
+ * that at first.
+ */
+ parent_pe = pe->parent;
+ while (parent_pe) {
+ /* Hit the ceiling ? */
+ if (parent_pe->type & EEH_PE_PHB)
+ break;
+
+ /* Frozen parent PE ? */
+ ret = eeh_ops->get_state(parent_pe, NULL);
+ if (ret > 0 &&
+ (ret & active_flags) != active_flags)
+ pe = parent_pe;
+
+ /* Next parent level */
+ parent_pe = parent_pe->parent;
+ }
+
eeh_stats.slot_resets++;
/* Avoid repeated reports of this failure, including problems
@@ -460,8 +481,11 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
* a stack trace will help the device-driver authors figure
* out what happened. So print that out.
*/
- pr_err("EEH: Frozen PE#%x detected on PHB#%x\n",
- pe->addr, pe->phb->global_number);
+ phb_pe = eeh_phb_pe_get(pe->phb);
+ pr_err("EEH: Frozen PHB#%x-PE#%x detected\n",
+ pe->phb->global_number, pe->addr);
+ pr_err("EEH: PE location: %s, PHB location: %s\n",
+ eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe));
dump_stack();
eeh_send_failure_event(pe);
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 7100a5b96e7..420da61d4ce 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -447,8 +447,9 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
* PE reset (for 3 times), we try to clear the frozen state
* for 3 times as well.
*/
-static int eeh_clear_pe_frozen_state(struct eeh_pe *pe)
+static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
{
+ struct eeh_pe *pe = (struct eeh_pe *)data;
int i, rc;
for (i = 0; i < 3; i++) {
@@ -461,13 +462,24 @@ static int eeh_clear_pe_frozen_state(struct eeh_pe *pe)
}
/* The PE has been isolated, clear it */
- if (rc)
+ if (rc) {
pr_warn("%s: Can't clear frozen PHB#%x-PE#%x (%d)\n",
__func__, pe->phb->global_number, pe->addr, rc);
- else
+ return (void *)pe;
+ }
+
+ return NULL;
+}
+
+static int eeh_clear_pe_frozen_state(struct eeh_pe *pe)
+{
+ void *rc;
+
+ rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, NULL);
+ if (!rc)
eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
- return rc;
+ return rc ? -EIO : 0;
}
/**
@@ -758,7 +770,7 @@ static void eeh_handle_special_event(void)
eeh_serialize_lock(&flags);
/* Purge all events */
- eeh_remove_event(NULL);
+ eeh_remove_event(NULL, true);
list_for_each_entry(hose, &hose_list, list_node) {
phb_pe = eeh_phb_pe_get(hose);
@@ -777,7 +789,7 @@ static void eeh_handle_special_event(void)
eeh_serialize_lock(&flags);
/* Purge all events of the PHB */
- eeh_remove_event(pe);
+ eeh_remove_event(pe, true);
if (rc == EEH_NEXT_ERR_DEAD_PHB)
eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c
index 72d748b56c8..4eefb6e34db 100644
--- a/arch/powerpc/kernel/eeh_event.c
+++ b/arch/powerpc/kernel/eeh_event.c
@@ -152,24 +152,33 @@ int eeh_send_failure_event(struct eeh_pe *pe)
/**
* eeh_remove_event - Remove EEH event from the queue
* @pe: Event binding to the PE
+ * @force: Event will be removed unconditionally
*
* On PowerNV platform, we might have subsequent coming events
* is part of the former one. For that case, those subsequent
* coming events are totally duplicated and unnecessary, thus
* they should be removed.
*/
-void eeh_remove_event(struct eeh_pe *pe)
+void eeh_remove_event(struct eeh_pe *pe, bool force)
{
unsigned long flags;
struct eeh_event *event, *tmp;
+ /*
+ * If we have NULL PE passed in, we have dead IOC
+ * or we're sure we can report all existing errors
+ * by the caller.
+ *
+ * With "force", the event with associated PE that
+ * have been isolated, the event won't be removed
+ * to avoid event lost.
+ */
spin_lock_irqsave(&eeh_eventlist_lock, flags);
list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) {
- /*
- * If we don't have valid PE passed in, that means
- * we already have event corresponding to dead IOC
- * and all events should be purged.
- */
+ if (!force && event->pe &&
+ (event->pe->state & EEH_PE_ISOLATED))
+ continue;
+
if (!pe) {
list_del(&event->list);
kfree(event);
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 995c2a28463..fbd01eba447 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -792,6 +792,66 @@ void eeh_pe_restore_bars(struct eeh_pe *pe)
}
/**
+ * eeh_pe_loc_get - Retrieve location code binding to the given PE
+ * @pe: EEH PE
+ *
+ * Retrieve the location code of the given PE. If the primary PE bus
+ * is root bus, we will grab location code from PHB device tree node
+ * or root port. Otherwise, the upstream bridge's device tree node
+ * of the primary PE bus will be checked for the location code.
+ */
+const char *eeh_pe_loc_get(struct eeh_pe *pe)
+{
+ struct pci_controller *hose;
+ struct pci_bus *bus = eeh_pe_bus_get(pe);
+ struct pci_dev *pdev;
+ struct device_node *dn;
+ const char *loc;
+
+ if (!bus)
+ return "N/A";
+
+ /* PHB PE or root PE ? */
+ if (pci_is_root_bus(bus)) {
+ hose = pci_bus_to_host(bus);
+ loc = of_get_property(hose->dn,
+ "ibm,loc-code", NULL);
+ if (loc)
+ return loc;
+ loc = of_get_property(hose->dn,
+ "ibm,io-base-loc-code", NULL);
+ if (loc)
+ return loc;
+
+ pdev = pci_get_slot(bus, 0x0);
+ } else {
+ pdev = bus->self;
+ }
+
+ if (!pdev) {
+ loc = "N/A";
+ goto out;
+ }
+
+ dn = pci_device_to_OF_node(pdev);
+ if (!dn) {
+ loc = "N/A";
+ goto out;
+ }
+
+ loc = of_get_property(dn, "ibm,loc-code", NULL);
+ if (!loc)
+ loc = of_get_property(dn, "ibm,slot-location-code", NULL);
+ if (!loc)
+ loc = "N/A";
+
+out:
+ if (pci_is_root_bus(bus) && pdev)
+ pci_dev_put(pdev);
+ return loc;
+}
+
+/**
* eeh_pe_bus_get - Retrieve PCI bus according to the given PE
* @pe: EEH PE
*
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 911d45366f5..6528c5e2cc4 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -428,12 +428,6 @@ BEGIN_FTR_SECTION
std r24,THREAD_VRSAVE(r3)
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_PPC64
-BEGIN_FTR_SECTION
- mfspr r25,SPRN_DSCR
- std r25,THREAD_DSCR(r3)
-END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
-#endif
and. r0,r0,r22
beq+ 1f
andc r22,r22,r0
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 771b4e92e5d..bb9cac6c805 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1467,22 +1467,6 @@ a2_tlbinit_after_linear_map:
.globl a2_tlbinit_after_iprot_flush
a2_tlbinit_after_iprot_flush:
-#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
- /* Now establish early debug mappings if applicable */
- /* Restore the MAS0 we used for linear mapping load */
- mtspr SPRN_MAS0,r11
-
- lis r3,(MAS1_VALID | MAS1_IPROT)@h
- ori r3,r3,(BOOK3E_PAGESZ_4K << MAS1_TSIZE_SHIFT)
- mtspr SPRN_MAS1,r3
- LOAD_REG_IMMEDIATE(r3, WSP_UART_VIRT | MAS2_I | MAS2_G)
- mtspr SPRN_MAS2,r3
- LOAD_REG_IMMEDIATE(r3, WSP_UART_PHYS | MAS3_SR | MAS3_SW)
- mtspr SPRN_MAS7_MAS3,r3
- /* re-use the MAS8 value from the linear mapping */
- tlbwe
-#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
-
PPC_TLBILX(0,0,R0)
sync
isync
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 20f11eb4dff..a7d36b19221 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -439,9 +439,9 @@ BEGIN_FTR_SECTION
* R9 = CR
* Original R9 to R13 is saved on PACA_EXMC
*
- * Switch to mc_emergency stack and handle re-entrancy (though we
- * currently don't test for overflow). Save MCE registers srr1,
- * srr0, dar and dsisr and then set ME=1
+ * Switch to mc_emergency stack and handle re-entrancy (we limit
+ * the nested MCE upto level 4 to avoid stack overflow).
+ * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
*
* We use paca->in_mce to check whether this is the first entry or
* nested machine check. We increment paca->in_mce to track nested
@@ -464,6 +464,9 @@ BEGIN_FTR_SECTION
0: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
addi r10,r10,1 /* increment paca->in_mce */
sth r10,PACA_IN_MCE(r13)
+ /* Limit nested MCE to level 4 to avoid stack overflow */
+ cmpwi r10,4
+ bgt 2f /* Check if we hit limit of 4 */
std r11,GPR1(r1) /* Save r1 on the stack. */
std r11,0(r1) /* make stack chain pointer */
mfspr r11,SPRN_SRR0 /* Save SRR0 */
@@ -482,10 +485,23 @@ BEGIN_FTR_SECTION
ori r11,r11,MSR_RI /* turn on RI bit */
ld r12,PACAKBASE(r13) /* get high part of &label */
LOAD_HANDLER(r12, machine_check_handle_early)
- mtspr SPRN_SRR0,r12
+1: mtspr SPRN_SRR0,r12
mtspr SPRN_SRR1,r11
rfid
b . /* prevent speculative execution */
+2:
+ /* Stack overflow. Stay on emergency stack and panic.
+ * Keep the ME bit off while panic-ing, so that if we hit
+ * another machine check we checkstop.
+ */
+ addi r1,r1,INT_FRAME_SIZE /* go back to previous stack frame */
+ ld r11,PACAKMSR(r13)
+ ld r12,PACAKBASE(r13)
+ LOAD_HANDLER(r12, unrecover_mce)
+ li r10,MSR_ME
+ andc r11,r11,r10 /* Turn off MSR_ME */
+ b 1b
+ b . /* prevent speculative execution */
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
machine_check_pSeries:
@@ -1389,6 +1405,7 @@ machine_check_handle_early:
bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_early
+ std r3,RESULT(r1) /* Save result */
ld r12,_MSR(r1)
#ifdef CONFIG_PPC_P7_NAP
/*
@@ -1443,11 +1460,33 @@ machine_check_handle_early:
*/
andi. r11,r12,MSR_RI
bne 2f
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- bl unrecoverable_exception
- b 1b
+1: mfspr r11,SPRN_SRR0
+ ld r10,PACAKBASE(r13)
+ LOAD_HANDLER(r10,unrecover_mce)
+ mtspr SPRN_SRR0,r10
+ ld r10,PACAKMSR(r13)
+ /*
+ * We are going down. But there are chances that we might get hit by
+ * another MCE during panic path and we may run into unstable state
+ * with no way out. Hence, turn ME bit off while going down, so that
+ * when another MCE is hit during panic path, system will checkstop
+ * and hypervisor will get restarted cleanly by SP.
+ */
+ li r3,MSR_ME
+ andc r10,r10,r3 /* Turn off MSR_ME */
+ mtspr SPRN_SRR1,r10
+ rfid
+ b .
2:
/*
+ * Check if we have successfully handled/recovered from error, if not
+ * then stay on emergency stack and panic.
+ */
+ ld r3,RESULT(r1) /* Load result */
+ cmpdi r3,0 /* see if we handled MCE successfully */
+
+ beq 1b /* if !handled then panic */
+ /*
* Return from MC interrupt.
* Queue up the MCE event so that we can log it later, while
* returning from kernel or opal call.
@@ -1460,6 +1499,17 @@ machine_check_handle_early:
MACHINE_CHECK_HANDLER_WINDUP
b machine_check_pSeries
+unrecover_mce:
+ /* Invoke machine_check_exception to print MCE event and panic. */
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl machine_check_exception
+ /*
+ * We will not reach here. Even if we did, there is no way out. Call
+ * unrecoverable_exception and die.
+ */
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl unrecoverable_exception
+ b 1b
/*
* r13 points to the PACA, r9 contains the saved CR,
* r12 contain the saved SRR1, SRR0 is still ready for return
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index f202d0731b0..d178834fe50 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -10,6 +10,8 @@
*
*/
+#define pr_fmt(fmt) "ftrace-powerpc: " fmt
+
#include <linux/spinlock.h>
#include <linux/hardirq.h>
#include <linux/uaccess.h>
@@ -105,7 +107,7 @@ __ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned int op;
- unsigned long ptr;
+ unsigned long entry, ptr;
unsigned long ip = rec->ip;
void *tramp;
@@ -115,7 +117,7 @@ __ftrace_make_nop(struct module *mod,
/* Make sure that that this is still a 24bit jump */
if (!is_bl_op(op)) {
- printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
+ pr_err("Not expected bl: opcode is %x\n", op);
return -EINVAL;
}
@@ -125,21 +127,21 @@ __ftrace_make_nop(struct module *mod,
pr_devel("ip:%lx jumps to %p", ip, tramp);
if (!is_module_trampoline(tramp)) {
- printk(KERN_ERR "Not a trampoline\n");
+ pr_err("Not a trampoline\n");
return -EINVAL;
}
if (module_trampoline_target(mod, tramp, &ptr)) {
- printk(KERN_ERR "Failed to get trampoline target\n");
+ pr_err("Failed to get trampoline target\n");
return -EFAULT;
}
pr_devel("trampoline target %lx", ptr);
+ entry = ppc_global_function_entry((void *)addr);
/* This should match what was called */
- if (ptr != ppc_function_entry((void *)addr)) {
- printk(KERN_ERR "addr %lx does not match expected %lx\n",
- ptr, ppc_function_entry((void *)addr));
+ if (ptr != entry) {
+ pr_err("addr %lx does not match expected %lx\n", ptr, entry);
return -EINVAL;
}
@@ -179,7 +181,7 @@ __ftrace_make_nop(struct module *mod,
/* Make sure that that this is still a 24bit jump */
if (!is_bl_op(op)) {
- printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
+ pr_err("Not expected bl: opcode is %x\n", op);
return -EINVAL;
}
@@ -198,7 +200,7 @@ __ftrace_make_nop(struct module *mod,
/* Find where the trampoline jumps to */
if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
- printk(KERN_ERR "Failed to read %lx\n", tramp);
+ pr_err("Failed to read %lx\n", tramp);
return -EFAULT;
}
@@ -209,7 +211,7 @@ __ftrace_make_nop(struct module *mod,
((jmp[1] & 0xffff0000) != 0x398c0000) ||
(jmp[2] != 0x7d8903a6) ||
(jmp[3] != 0x4e800420)) {
- printk(KERN_ERR "Not a trampoline\n");
+ pr_err("Not a trampoline\n");
return -EINVAL;
}
@@ -221,8 +223,7 @@ __ftrace_make_nop(struct module *mod,
pr_devel(" %lx ", tramp);
if (tramp != addr) {
- printk(KERN_ERR
- "Trampoline location %08lx does not match addr\n",
+ pr_err("Trampoline location %08lx does not match addr\n",
tramp);
return -EINVAL;
}
@@ -263,15 +264,13 @@ int ftrace_make_nop(struct module *mod,
*/
if (!rec->arch.mod) {
if (!mod) {
- printk(KERN_ERR "No module loaded addr=%lx\n",
- addr);
+ pr_err("No module loaded addr=%lx\n", addr);
return -EFAULT;
}
rec->arch.mod = mod;
} else if (mod) {
if (mod != rec->arch.mod) {
- printk(KERN_ERR
- "Record mod %p not equal to passed in mod %p\n",
+ pr_err("Record mod %p not equal to passed in mod %p\n",
rec->arch.mod, mod);
return -EINVAL;
}
@@ -307,26 +306,25 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
* The load offset is different depending on the ABI. For simplicity
* just mask it out when doing the compare.
*/
- if ((op[0] != 0x48000008) || ((op[1] & 0xffff00000) != 0xe8410000)) {
- printk(KERN_ERR "Unexpected call sequence: %x %x\n",
- op[0], op[1]);
+ if ((op[0] != 0x48000008) || ((op[1] & 0xffff0000) != 0xe8410000)) {
+ pr_err("Unexpected call sequence: %x %x\n", op[0], op[1]);
return -EINVAL;
}
/* If we never set up a trampoline to ftrace_caller, then bail */
if (!rec->arch.mod->arch.tramp) {
- printk(KERN_ERR "No ftrace trampoline\n");
+ pr_err("No ftrace trampoline\n");
return -EINVAL;
}
/* Ensure branch is within 24 bits */
- if (create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
- printk(KERN_ERR "Branch out of range");
+ if (!create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
+ pr_err("Branch out of range\n");
return -EINVAL;
}
if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
- printk(KERN_ERR "REL24 out of range!\n");
+ pr_err("REL24 out of range!\n");
return -EINVAL;
}
@@ -345,13 +343,13 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
/* It should be pointing to a nop */
if (op != PPC_INST_NOP) {
- printk(KERN_ERR "Expected NOP but have %x\n", op);
+ pr_err("Expected NOP but have %x\n", op);
return -EINVAL;
}
/* If we never set up a trampoline to ftrace_caller, then bail */
if (!rec->arch.mod->arch.tramp) {
- printk(KERN_ERR "No ftrace trampoline\n");
+ pr_err("No ftrace trampoline\n");
return -EINVAL;
}
@@ -359,7 +357,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
op = create_branch((unsigned int *)ip,
rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
if (!op) {
- printk(KERN_ERR "REL24 out of range!\n");
+ pr_err("REL24 out of range!\n");
return -EINVAL;
}
@@ -397,7 +395,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
* already have a module defined.
*/
if (!rec->arch.mod) {
- printk(KERN_ERR "No module loaded\n");
+ pr_err("No module loaded\n");
return -EINVAL;
}
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 67ee0d6c107..7d7d8635227 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -930,25 +930,6 @@ initial_mmu:
tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */
tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */
-#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE)
-
- /* Load a TLB entry for the UART, so that ppc4xx_progress() can use
- * the UARTs nice and early. We use a 4k real==virtual mapping. */
-
- lis r3,SERIAL_DEBUG_IO_BASE@h
- ori r3,r3,SERIAL_DEBUG_IO_BASE@l
- mr r4,r3
- clrrwi r4,r4,12
- ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
-
- clrrwi r3,r3,12
- ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
-
- li r0,0 /* TLB slot 0 */
- tlbwe r4,r0,TLB_DATA
- tlbwe r3,r0,TLB_TAG
-#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */
-
isync
/* Establish the exception vector base
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 2480256272d..5cf3d367190 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -131,7 +131,7 @@ _GLOBAL(power7_nap)
_GLOBAL(power7_sleep)
li r3,1
- li r4,0
+ li r4,1
b power7_powersave_common
/* No return */
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index b82227e7e21..12e48d56f77 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -23,7 +23,7 @@ unsigned int ioread16(void __iomem *addr)
}
unsigned int ioread16be(void __iomem *addr)
{
- return in_be16(addr);
+ return readw_be(addr);
}
unsigned int ioread32(void __iomem *addr)
{
@@ -31,7 +31,7 @@ unsigned int ioread32(void __iomem *addr)
}
unsigned int ioread32be(void __iomem *addr)
{
- return in_be32(addr);
+ return readl_be(addr);
}
EXPORT_SYMBOL(ioread8);
EXPORT_SYMBOL(ioread16);
@@ -49,7 +49,7 @@ void iowrite16(u16 val, void __iomem *addr)
}
void iowrite16be(u16 val, void __iomem *addr)
{
- out_be16(addr, val);
+ writew_be(val, addr);
}
void iowrite32(u32 val, void __iomem *addr)
{
@@ -57,7 +57,7 @@ void iowrite32(u32 val, void __iomem *addr)
}
void iowrite32be(u32 val, void __iomem *addr)
{
- out_be32(addr, val);
+ writel_be(val, addr);
}
EXPORT_SYMBOL(iowrite8);
EXPORT_SYMBOL(iowrite16);
@@ -75,15 +75,15 @@ EXPORT_SYMBOL(iowrite32be);
*/
void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
{
- _insb((u8 __iomem *) addr, dst, count);
+ readsb(addr, dst, count);
}
void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
{
- _insw_ns((u16 __iomem *) addr, dst, count);
+ readsw(addr, dst, count);
}
void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
{
- _insl_ns((u32 __iomem *) addr, dst, count);
+ readsl(addr, dst, count);
}
EXPORT_SYMBOL(ioread8_rep);
EXPORT_SYMBOL(ioread16_rep);
@@ -91,15 +91,15 @@ EXPORT_SYMBOL(ioread32_rep);
void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
{
- _outsb((u8 __iomem *) addr, src, count);
+ writesb(addr, src, count);
}
void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
{
- _outsw_ns((u16 __iomem *) addr, src, count);
+ writesw(addr, src, count);
}
void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
{
- _outsl_ns((u32 __iomem *) addr, src, count);
+ writesl(addr, src, count);
}
EXPORT_SYMBOL(iowrite8_rep);
EXPORT_SYMBOL(iowrite16_rep);
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 90fab64d911..2f72af82513 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
+#include <asm/code-patching.h>
#include <asm/cacheflush.h>
#include <asm/sstep.h>
#include <asm/uaccess.h>
@@ -491,12 +492,10 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
return ret;
}
-#ifdef CONFIG_PPC64
unsigned long arch_deref_entry_point(void *entry)
{
- return ((func_descr_t *)entry)->entry;
+ return ppc_global_function_entry(entry);
}
-#endif
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
@@ -508,8 +507,12 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
/* setup return addr to the jprobe handler routine */
regs->nip = arch_deref_entry_point(jp->entry);
#ifdef CONFIG_PPC64
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+ regs->gpr[12] = (unsigned long)jp->entry;
+#else
regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
#endif
+#endif
return 1;
}
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 077d2ce6c5a..d807ee626af 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -315,8 +315,17 @@ static void dedotify_versions(struct modversion_info *vers,
struct modversion_info *end;
for (end = (void *)vers + size; vers < end; vers++)
- if (vers->name[0] == '.')
+ if (vers->name[0] == '.') {
memmove(vers->name, vers->name+1, strlen(vers->name));
+#ifdef ARCH_RELOCATES_KCRCTAB
+ /* The TOC symbol has no CRC computed. To avoid CRC
+ * check failing, we must force it to the expected
+ * value (see CRC check in module.c).
+ */
+ if (!strcmp(vers->name, "TOC."))
+ vers->crc = -(unsigned long)reloc_start;
+#endif
+ }
}
/* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 8a1edbe26b8..be99774d3f4 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -755,15 +755,15 @@ struct task_struct *__switch_to(struct task_struct *prev,
WARN_ON(!irqs_disabled());
- /* Back up the TAR across context switches.
+ /* Back up the TAR and DSCR across context switches.
* Note that the TAR is not available for use in the kernel. (To
* provide this, the TAR should be backed up/restored on exception
* entry/exit instead, and be in pt_regs. FIXME, this should be in
* pt_regs anyway (for debug).)
- * Save the TAR here before we do treclaim/trecheckpoint as these
- * will change the TAR.
+ * Save the TAR and DSCR here before we do treclaim/trecheckpoint as
+ * these will change them.
*/
- save_tar(&prev->thread);
+ save_early_sprs(&prev->thread);
__switch_to_tm(prev);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 613a860a203..b694b073097 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -662,13 +662,6 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL);
#endif
- /* Pre-initialize the cmd_line with the content of boot_commmand_line,
- * which will be empty except when the content of the variable has
- * been overriden by a bootloading mechanism. This happens typically
- * with HAL takeover
- */
- strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
-
/* Retrieve various informations from the /chosen node of the
* device-tree, including the platform type, initrd location and
* size, TCE reserve, and more ...
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 078145acf7f..1a85d8f9673 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1268,201 +1268,6 @@ static u64 __initdata prom_opal_base;
static u64 __initdata prom_opal_entry;
#endif
-#ifdef __BIG_ENDIAN__
-/* XXX Don't change this structure without updating opal-takeover.S */
-static struct opal_secondary_data {
- s64 ack; /* 0 */
- u64 go; /* 8 */
- struct opal_takeover_args args; /* 16 */
-} opal_secondary_data;
-
-static u64 __initdata prom_opal_align;
-static u64 __initdata prom_opal_size;
-static int __initdata prom_rtas_start_cpu;
-static u64 __initdata prom_rtas_data;
-static u64 __initdata prom_rtas_entry;
-
-extern char opal_secondary_entry;
-
-static void __init prom_query_opal(void)
-{
- long rc;
-
- /* We must not query for OPAL presence on a machine that
- * supports TNK takeover (970 blades), as this uses the same
- * h-call with different arguments and will crash
- */
- if (PHANDLE_VALID(call_prom("finddevice", 1, 1,
- ADDR("/tnk-memory-map")))) {
- prom_printf("TNK takeover detected, skipping OPAL check\n");
- return;
- }
-
- prom_printf("Querying for OPAL presence... ");
-
- rc = opal_query_takeover(&prom_opal_size,
- &prom_opal_align);
- prom_debug("(rc = %ld) ", rc);
- if (rc != 0) {
- prom_printf("not there.\n");
- return;
- }
- of_platform = PLATFORM_OPAL;
- prom_printf(" there !\n");
- prom_debug(" opal_size = 0x%lx\n", prom_opal_size);
- prom_debug(" opal_align = 0x%lx\n", prom_opal_align);
- if (prom_opal_align < 0x10000)
- prom_opal_align = 0x10000;
-}
-
-static int __init prom_rtas_call(int token, int nargs, int nret,
- int *outputs, ...)
-{
- struct rtas_args rtas_args;
- va_list list;
- int i;
-
- rtas_args.token = token;
- rtas_args.nargs = nargs;
- rtas_args.nret = nret;
- rtas_args.rets = (rtas_arg_t *)&(rtas_args.args[nargs]);
- va_start(list, outputs);
- for (i = 0; i < nargs; ++i)
- rtas_args.args[i] = va_arg(list, rtas_arg_t);
- va_end(list);
-
- for (i = 0; i < nret; ++i)
- rtas_args.rets[i] = 0;
-
- opal_enter_rtas(&rtas_args, prom_rtas_data,
- prom_rtas_entry);
-
- if (nret > 1 && outputs != NULL)
- for (i = 0; i < nret-1; ++i)
- outputs[i] = rtas_args.rets[i+1];
- return (nret > 0)? rtas_args.rets[0]: 0;
-}
-
-static void __init prom_opal_hold_cpus(void)
-{
- int i, cnt, cpu, rc;
- long j;
- phandle node;
- char type[64];
- u32 servers[8];
- void *entry = (unsigned long *)&opal_secondary_entry;
- struct opal_secondary_data *data = &opal_secondary_data;
-
- prom_debug("prom_opal_hold_cpus: start...\n");
- prom_debug(" - entry = 0x%x\n", entry);
- prom_debug(" - data = 0x%x\n", data);
-
- data->ack = -1;
- data->go = 0;
-
- /* look for cpus */
- for (node = 0; prom_next_node(&node); ) {
- type[0] = 0;
- prom_getprop(node, "device_type", type, sizeof(type));
- if (strcmp(type, "cpu") != 0)
- continue;
-
- /* Skip non-configured cpus. */
- if (prom_getprop(node, "status", type, sizeof(type)) > 0)
- if (strcmp(type, "okay") != 0)
- continue;
-
- cnt = prom_getprop(node, "ibm,ppc-interrupt-server#s", servers,
- sizeof(servers));
- if (cnt == PROM_ERROR)
- break;
- cnt >>= 2;
- for (i = 0; i < cnt; i++) {
- cpu = servers[i];
- prom_debug("CPU %d ... ", cpu);
- if (cpu == prom.cpu) {
- prom_debug("booted !\n");
- continue;
- }
- prom_debug("starting ... ");
-
- /* Init the acknowledge var which will be reset by
- * the secondary cpu when it awakens from its OF
- * spinloop.
- */
- data->ack = -1;
- rc = prom_rtas_call(prom_rtas_start_cpu, 3, 1,
- NULL, cpu, entry, data);
- prom_debug("rtas rc=%d ...", rc);
-
- for (j = 0; j < 100000000 && data->ack == -1; j++) {
- HMT_low();
- mb();
- }
- HMT_medium();
- if (data->ack != -1)
- prom_debug("done, PIR=0x%x\n", data->ack);
- else
- prom_debug("timeout !\n");
- }
- }
- prom_debug("prom_opal_hold_cpus: end...\n");
-}
-
-static void __init prom_opal_takeover(void)
-{
- struct opal_secondary_data *data = &opal_secondary_data;
- struct opal_takeover_args *args = &data->args;
- u64 align = prom_opal_align;
- u64 top_addr, opal_addr;
-
- args->k_image = (u64)_stext;
- args->k_size = _end - _stext;
- args->k_entry = 0;
- args->k_entry2 = 0x60;
-
- top_addr = _ALIGN_UP(args->k_size, align);
-
- if (prom_initrd_start != 0) {
- args->rd_image = prom_initrd_start;
- args->rd_size = prom_initrd_end - args->rd_image;
- args->rd_loc = top_addr;
- top_addr = _ALIGN_UP(args->rd_loc + args->rd_size, align);
- }
-
- /* Pickup an address for the HAL. We want to go really high
- * up to avoid problem with future kexecs. On the other hand
- * we don't want to be all over the TCEs on P5IOC2 machines
- * which are going to be up there too. We assume the machine
- * has plenty of memory, and we ask for the HAL for now to
- * be just below the 1G point, or above the initrd
- */
- opal_addr = _ALIGN_DOWN(0x40000000 - prom_opal_size, align);
- if (opal_addr < top_addr)
- opal_addr = top_addr;
- args->hal_addr = opal_addr;
-
- /* Copy the command line to the kernel image */
- strlcpy(boot_command_line, prom_cmd_line,
- COMMAND_LINE_SIZE);
-
- prom_debug(" k_image = 0x%lx\n", args->k_image);
- prom_debug(" k_size = 0x%lx\n", args->k_size);
- prom_debug(" k_entry = 0x%lx\n", args->k_entry);
- prom_debug(" k_entry2 = 0x%lx\n", args->k_entry2);
- prom_debug(" hal_addr = 0x%lx\n", args->hal_addr);
- prom_debug(" rd_image = 0x%lx\n", args->rd_image);
- prom_debug(" rd_size = 0x%lx\n", args->rd_size);
- prom_debug(" rd_loc = 0x%lx\n", args->rd_loc);
- prom_printf("Performing OPAL takeover,this can take a few minutes..\n");
- prom_close_stdin();
- mb();
- data->go = 1;
- for (;;)
- opal_do_takeover(args);
-}
-#endif /* __BIG_ENDIAN__ */
-
/*
* Allocate room for and instantiate OPAL
*/
@@ -1597,12 +1402,6 @@ static void __init prom_instantiate_rtas(void)
&val, sizeof(val)) != PROM_ERROR)
rtas_has_query_cpu_stopped = true;
-#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
- /* PowerVN takeover hack */
- prom_rtas_data = base;
- prom_rtas_entry = entry;
- prom_getprop(rtas_node, "start-cpu", &prom_rtas_start_cpu, 4);
-#endif
prom_debug("rtas base = 0x%x\n", base);
prom_debug("rtas entry = 0x%x\n", entry);
prom_debug("rtas size = 0x%x\n", (long)size);
@@ -3027,16 +2826,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
prom_instantiate_rtas();
#ifdef CONFIG_PPC_POWERNV
-#ifdef __BIG_ENDIAN__
- /* Detect HAL and try instanciating it & doing takeover */
- if (of_platform == PLATFORM_PSERIES_LPAR) {
- prom_query_opal();
- if (of_platform == PLATFORM_OPAL) {
- prom_opal_hold_cpus();
- prom_opal_takeover();
- }
- } else
-#endif /* __BIG_ENDIAN__ */
if (of_platform == PLATFORM_OPAL)
prom_instantiate_opal();
#endif /* CONFIG_PPC_POWERNV */
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
index 77aa1e95e90..fe8e54b9ef7 100644
--- a/arch/powerpc/kernel/prom_init_check.sh
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -21,9 +21,7 @@ _end enter_prom memcpy memset reloc_offset __secondary_hold
__secondary_hold_acknowledge __secondary_hold_spinloop __start
strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224
reloc_got2 kernstart_addr memstart_addr linux_banner _stext
-opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry
-boot_command_line __prom_init_toc_start __prom_init_toc_end
-btext_setup_display TOC."
+__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC."
NM="$1"
OBJ="$2"
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 658e89d2025..db2b482af65 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -611,17 +611,19 @@ static void rtas_flash_firmware(int reboot_type)
for (f = flist; f; f = next) {
/* Translate data addrs to absolute */
for (i = 0; i < f->num_blocks; i++) {
- f->blocks[i].data = (char *)__pa(f->blocks[i].data);
+ f->blocks[i].data = (char *)cpu_to_be64(__pa(f->blocks[i].data));
image_size += f->blocks[i].length;
+ f->blocks[i].length = cpu_to_be64(f->blocks[i].length);
}
next = f->next;
/* Don't translate NULL pointer for last entry */
if (f->next)
- f->next = (struct flash_block_list *)__pa(f->next);
+ f->next = (struct flash_block_list *)cpu_to_be64(__pa(f->next));
else
f->next = NULL;
/* make num_blocks into the version/length field */
f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
+ f->num_blocks = cpu_to_be64(f->num_blocks);
}
printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index d4d418376f9..e5b022c55cc 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -469,9 +469,17 @@ void __init smp_setup_cpu_maps(void)
}
for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
+ bool avail;
+
DBG(" thread %d -> cpu %d (hard id %d)\n",
j, cpu, be32_to_cpu(intserv[j]));
- set_cpu_present(cpu, true);
+
+ avail = of_device_is_available(dn);
+ if (!avail)
+ avail = !of_property_match_string(dn,
+ "enable-method", "spin-table");
+
+ set_cpu_present(cpu, avail);
set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j]));
set_cpu_possible(cpu, true);
cpu++;
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 4e47db686b5..1bc5a1755ed 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -54,7 +54,6 @@
#include "signal.h"
-#undef DEBUG_SIG
#ifdef CONFIG_PPC64
#define sys_rt_sigreturn compat_sys_rt_sigreturn
@@ -1063,10 +1062,6 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
return 1;
badframe:
-#ifdef DEBUG_SIG
- printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
- regs, frame, newsp);
-#endif
if (show_unhandled_signals)
printk_ratelimited(KERN_INFO
"%s[%d]: bad frame in handle_rt_signal32: "
@@ -1484,10 +1479,6 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
return 1;
badframe:
-#ifdef DEBUG_SIG
- printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
- regs, frame, newsp);
-#endif
if (show_unhandled_signals)
printk_ratelimited(KERN_INFO
"%s[%d]: bad frame in handle_signal32: "
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index d501dc4dc3e..97c1e4b683f 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -38,7 +38,6 @@
#include "signal.h"
-#define DEBUG_SIG 0
#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
#define FP_REGS_SIZE sizeof(elf_fpregset_t)
@@ -700,10 +699,6 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
return 0;
badframe:
-#if DEBUG_SIG
- printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n",
- regs, uc, &uc->uc_mcontext);
-#endif
if (show_unhandled_signals)
printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
current->comm, current->pid, "rt_sigreturn",
@@ -809,10 +804,6 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
return 1;
badframe:
-#if DEBUG_SIG
- printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n",
- regs, frame, newsp);
-#endif
if (show_unhandled_signals)
printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
current->comm, current->pid, "setup_rt_frame",
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 7753af2d261..1007fb802e6 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -747,9 +747,9 @@ int setup_profiling_timer(unsigned int multiplier)
#ifdef CONFIG_SCHED_SMT
/* cpumask of CPUs with asymetric SMT dependancy */
-static const int powerpc_smt_flags(void)
+static int powerpc_smt_flags(void)
{
- int flags = SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES;
+ int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 7e711bdcc6d..9fff9cdcc51 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -551,7 +551,7 @@ void timer_interrupt(struct pt_regs * regs)
may_hard_irq_enable();
-#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
+#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
#endif
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 1bd7ca298fa..239f1cde3ff 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -295,6 +295,8 @@ long machine_check_early(struct pt_regs *regs)
{
long handled = 0;
+ __get_cpu_var(irq_stat).mce_exceptions++;
+
if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
handled = cur_cpu_spec->machine_check_early(regs);
return handled;
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index a15837519dc..b7aa07279a6 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -62,8 +62,6 @@ void __init udbg_early_init(void)
udbg_init_cpm();
#elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO)
udbg_init_usbgecko();
-#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP)
- udbg_init_wsp();
#elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS)
/* In memory console */
udbg_init_memcons();
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 75702e207b2..6e7c4923b5e 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -296,14 +296,3 @@ void __init udbg_init_40x_realmode(void)
}
#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
-
-
-#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
-
-void __init udbg_init_wsp(void)
-{
- udbg_uart_init_mmio((void *)WSP_UART_VIRT, 1);
- udbg_uart_setup(57600, 50000000);
-}
-
-#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */