aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/pseries
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/pseries')
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig33
-rw-r--r--arch/powerpc/platforms/pseries/Makefile10
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c4
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c103
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c12
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c920
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c319
-rw-r--r--arch/powerpc/platforms/pseries/eeh_dev.c112
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c552
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c142
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pe.c652
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c149
-rw-r--r--arch/powerpc/platforms/pseries/eeh_sysfs.c75
-rw-r--r--arch/powerpc/platforms/pseries/event_sources.c8
-rw-r--r--arch/powerpc/platforms/pseries/firmware.c69
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c40
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c114
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S10
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c2
-rw-r--r--arch/powerpc/platforms/pseries/hvconsole.c19
-rw-r--r--arch/powerpc/platforms/pseries/hvcserver.c5
-rw-r--r--arch/powerpc/platforms/pseries/io_event_irq.c8
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c235
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c194
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c710
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c84
-rw-r--r--arch/powerpc/platforms/pseries/msi.c98
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c499
-rw-r--r--arch/powerpc/platforms/pseries/pci.c64
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c91
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h307
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c368
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h14
-rw-r--r--arch/powerpc/platforms/pseries/pseries_energy.c41
-rw-r--r--arch/powerpc/platforms/pseries/ras.c28
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c5
-rw-r--r--arch/powerpc/platforms/pseries/rng.c45
-rw-r--r--arch/powerpc/platforms/pseries/scanlog.c36
-rw-r--r--arch/powerpc/platforms/pseries/setup.c177
-rw-r--r--arch/powerpc/platforms/pseries/smp.c74
-rw-r--r--arch/powerpc/platforms/pseries/suspend.c68
42 files changed, 2237 insertions, 4261 deletions
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 837cf49357e..756b482f819 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -17,6 +17,11 @@ config PPC_PSERIES
select PPC_NATIVE
select PPC_PCI_CHOICE if EXPERT
select ZLIB_DEFLATE
+ select PPC_DOORBELL
+ select HAVE_CONTEXT_TRACKING
+ select HOTPLUG_CPU if SMP
+ select ARCH_RANDOM
+ select PPC_DOORBELL
default y
config PPC_SPLPAR
@@ -29,14 +34,9 @@ config PPC_SPLPAR
processors, that is, which share physical processors between
two or more partitions.
-config EEH
- bool
- depends on PPC_PSERIES && PCI
- default y
-
config PSERIES_MSI
bool
- depends on PCI_MSI && EEH
+ depends on PCI_MSI && PPC_PSERIES && EEH
default y
config PSERIES_ENERGY
@@ -112,6 +112,18 @@ config CMM
will be reused for other LPARs. The interface allows firmware to
balance memory across many LPARs.
+config HV_PERF_CTRS
+ bool "Hypervisor supplied PMU events (24x7 & GPCI)"
+ default y
+ depends on PERF_EVENTS && PPC_PSERIES
+ help
+ Enable access to hypervisor supplied counters in perf. Currently,
+ this enables code that uses the hcall GetPerfCounterInfo and 24x7
+ interfaces to retrieve counters. GPCI exists on Power 6 and later
+ systems. 24x7 is available on Power 8 systems.
+
+ If unsure, select Y.
+
config DTL
bool "Dispatch Trace Log"
depends on PPC_SPLPAR && DEBUG_FS
@@ -121,12 +133,3 @@ config DTL
which are accessible through a debugfs file.
Say N if you are unsure.
-
-config PSERIES_IDLE
- bool "Cpuidle driver for pSeries platforms"
- depends on CPU_IDLE
- depends on PPC_PSERIES
- default y
- help
- Select this option to enable processor idle state management
- through cpuidle subsystem.
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 890622b87c8..03480796af9 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -1,14 +1,12 @@
-ccflags-$(CONFIG_PPC64) := -mno-minimal-toc
+ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
ccflags-$(CONFIG_PPC_PSERIES_DEBUG) += -DDEBUG
obj-y := lpar.o hvCall.o nvram.o reconfig.o \
setup.o iommu.o event_sources.o ras.o \
- firmware.o power.o dlpar.o mobility.o
+ firmware.o power.o dlpar.o mobility.o rng.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCANLOG) += scanlog.o
-obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \
- eeh_driver.o eeh_event.o eeh_sysfs.o \
- eeh_pseries.o
+obj-$(CONFIG_EEH) += eeh_pseries.o
obj-$(CONFIG_KEXEC) += kexec.o
obj-$(CONFIG_PCI) += pci.o pci_dlpar.o
obj-$(CONFIG_PSERIES_MSI) += msi.o
@@ -23,7 +21,7 @@ obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_DTL) += dtl.o
obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o
-obj-$(CONFIG_PSERIES_IDLE) += processor_idle.o
+obj-$(CONFIG_LPARCFG) += lparcfg.o
ifeq ($(CONFIG_PPC_PSERIES),y)
obj-$(CONFIG_SUSPEND) += suspend.o
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index c638535753d..2d8bf15879f 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -25,7 +25,6 @@
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/gfp.h>
-#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/oom.h>
@@ -40,8 +39,7 @@
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <linux/memory.h>
-
-#include "plpar_wrappers.h"
+#include <asm/plpar_wrappers.h>
#define CMM_DRIVER_VERSION "1.0.0"
#define CMM_DEFAULT_DELAY 1
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index a1a7b9a67ff..2d0b4d68a40 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -11,7 +11,6 @@
*/
#include <linux/kernel.h>
-#include <linux/kref.h>
#include <linux/notifier.h>
#include <linux/spinlock.h>
#include <linux/cpu.h>
@@ -63,26 +62,32 @@ static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
return prop;
}
-static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
+static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
+ const char *path)
{
struct device_node *dn;
char *name;
+ /* If parent node path is "/" advance path to NULL terminator to
+ * prevent double leading slashs in full_name.
+ */
+ if (!path[1])
+ path++;
+
dn = kzalloc(sizeof(*dn), GFP_KERNEL);
if (!dn)
return NULL;
- /* The configure connector reported name does not contain a
- * preceding '/', so we allocate a buffer large enough to
- * prepend this to the full_name.
- */
name = (char *)ccwa + ccwa->name_offset;
- dn->full_name = kasprintf(GFP_KERNEL, "/%s", name);
+ dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name);
if (!dn->full_name) {
kfree(dn);
return NULL;
}
+ of_node_set_flag(dn, OF_DYNAMIC);
+ of_node_init(dn);
+
return dn;
}
@@ -120,7 +125,8 @@ void dlpar_free_cc_nodes(struct device_node *dn)
#define CALL_AGAIN -2
#define ERR_CFG_USE -9003
-struct device_node *dlpar_configure_connector(u32 drc_index)
+struct device_node *dlpar_configure_connector(u32 drc_index,
+ struct device_node *parent)
{
struct device_node *dn;
struct device_node *first_dn = NULL;
@@ -129,6 +135,7 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
struct property *last_property = NULL;
struct cc_workarea *ccwa;
char *data_buf;
+ const char *parent_path = parent->full_name;
int cc_token;
int rc = -1;
@@ -162,7 +169,7 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
break;
case NEXT_SIBLING:
- dn = dlpar_parse_cc_node(ccwa);
+ dn = dlpar_parse_cc_node(ccwa, parent_path);
if (!dn)
goto cc_error;
@@ -172,13 +179,17 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
break;
case NEXT_CHILD:
- dn = dlpar_parse_cc_node(ccwa);
+ if (first_dn)
+ parent_path = last_dn->full_name;
+
+ dn = dlpar_parse_cc_node(ccwa, parent_path);
if (!dn)
goto cc_error;
- if (!first_dn)
+ if (!first_dn) {
+ dn->parent = parent;
first_dn = dn;
- else {
+ } else {
dn->parent = last_dn;
if (last_dn)
last_dn->child = dn;
@@ -202,6 +213,7 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
case PREV_PARENT:
last_dn = last_dn->parent;
+ parent_path = last_dn->parent->full_name;
break;
case CALL_AGAIN:
@@ -256,8 +268,6 @@ int dlpar_attach_node(struct device_node *dn)
{
int rc;
- of_node_set_flag(dn, OF_DYNAMIC);
- kref_init(&dn->kref);
dn->parent = derive_parent(dn->full_name);
if (!dn->parent)
return -ENOMEM;
@@ -275,8 +285,15 @@ int dlpar_attach_node(struct device_node *dn)
int dlpar_detach_node(struct device_node *dn)
{
+ struct device_node *child;
int rc;
+ child = of_get_next_child(dn, NULL);
+ while (child) {
+ dlpar_detach_node(child);
+ child = of_get_next_child(dn, child);
+ }
+
rc = of_detach_node(dn);
if (rc)
return rc;
@@ -382,57 +399,42 @@ out:
static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
{
- struct device_node *dn;
+ struct device_node *dn, *parent;
unsigned long drc_index;
- char *cpu_name;
int rc;
- cpu_hotplug_driver_lock();
rc = strict_strtoul(buf, 0, &drc_index);
- if (rc) {
- rc = -EINVAL;
- goto out;
- }
+ if (rc)
+ return -EINVAL;
- dn = dlpar_configure_connector(drc_index);
- if (!dn) {
- rc = -EINVAL;
- goto out;
- }
+ parent = of_find_node_by_path("/cpus");
+ if (!parent)
+ return -ENODEV;
- /* configure-connector reports cpus as living in the base
- * directory of the device tree. CPUs actually live in the
- * cpus directory so we need to fixup the full_name.
- */
- cpu_name = kasprintf(GFP_KERNEL, "/cpus%s", dn->full_name);
- if (!cpu_name) {
- dlpar_free_cc_nodes(dn);
- rc = -ENOMEM;
- goto out;
- }
+ dn = dlpar_configure_connector(drc_index, parent);
+ if (!dn)
+ return -EINVAL;
- kfree(dn->full_name);
- dn->full_name = cpu_name;
+ of_node_put(parent);
rc = dlpar_acquire_drc(drc_index);
if (rc) {
dlpar_free_cc_nodes(dn);
- rc = -EINVAL;
- goto out;
+ return -EINVAL;
}
rc = dlpar_attach_node(dn);
if (rc) {
dlpar_release_drc(drc_index);
dlpar_free_cc_nodes(dn);
- goto out;
+ return rc;
}
rc = dlpar_online_cpu(dn);
-out:
- cpu_hotplug_driver_unlock();
+ if (rc)
+ return rc;
- return rc ? rc : count;
+ return count;
}
static int dlpar_offline_cpu(struct device_node *dn)
@@ -505,30 +507,27 @@ static ssize_t dlpar_cpu_release(const char *buf, size_t count)
return -EINVAL;
}
- cpu_hotplug_driver_lock();
rc = dlpar_offline_cpu(dn);
if (rc) {
of_node_put(dn);
- rc = -EINVAL;
- goto out;
+ return -EINVAL;
}
rc = dlpar_release_drc(*drc_index);
if (rc) {
of_node_put(dn);
- goto out;
+ return rc;
}
rc = dlpar_detach_node(dn);
if (rc) {
dlpar_acquire_drc(*drc_index);
- goto out;
+ return rc;
}
of_node_put(dn);
-out:
- cpu_hotplug_driver_unlock();
- return rc ? rc : count;
+
+ return count;
}
static int __init pseries_dlpar_init(void)
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index a7648543c59..7d61498e45c 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -20,7 +20,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/spinlock.h>
@@ -29,8 +28,7 @@
#include <asm/firmware.h>
#include <asm/lppaca.h>
#include <asm/debug.h>
-
-#include "plpar_wrappers.h"
+#include <asm/plpar_wrappers.h>
struct dtl {
struct dtl_entry *buf;
@@ -57,7 +55,7 @@ static u8 dtl_event_mask = 0x7;
*/
static int dtl_buf_entries = N_DISPATCH_LOG;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
struct dtl_ring {
u64 write_index;
struct dtl_entry *write_ptr;
@@ -87,7 +85,7 @@ static void consume_dtle(struct dtl_entry *dtle, u64 index)
barrier();
/* check for hypervisor ring buffer overflow, ignore this entry if so */
- if (index + N_DISPATCH_LOG < vpa->dtl_idx)
+ if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
return;
++wp;
@@ -142,7 +140,7 @@ static u64 dtl_current_index(struct dtl *dtl)
return per_cpu(dtl_rings, dtl->cpu).write_index;
}
-#else /* CONFIG_VIRT_CPU_ACCOUNTING */
+#else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static int dtl_start(struct dtl *dtl)
{
@@ -188,7 +186,7 @@ static u64 dtl_current_index(struct dtl *dtl)
{
return lppaca_of(dtl->cpu).dtl_idx;
}
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static int dtl_enable(struct dtl *dtl)
{
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
deleted file mode 100644
index 9a04322b173..00000000000
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ /dev/null
@@ -1,920 +0,0 @@
-/*
- * Copyright IBM Corporation 2001, 2005, 2006
- * Copyright Dave Engebretsen & Todd Inglett 2001
- * Copyright Linas Vepstas 2005, 2006
- * Copyright 2001-2012 IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com>
- */
-
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/pci.h>
-#include <linux/proc_fs.h>
-#include <linux/rbtree.h>
-#include <linux/seq_file.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-#include <linux/of.h>
-
-#include <linux/atomic.h>
-#include <asm/eeh.h>
-#include <asm/eeh_event.h>
-#include <asm/io.h>
-#include <asm/machdep.h>
-#include <asm/ppc-pci.h>
-#include <asm/rtas.h>
-
-
-/** Overview:
- * EEH, or "Extended Error Handling" is a PCI bridge technology for
- * dealing with PCI bus errors that can't be dealt with within the
- * usual PCI framework, except by check-stopping the CPU. Systems
- * that are designed for high-availability/reliability cannot afford
- * to crash due to a "mere" PCI error, thus the need for EEH.
- * An EEH-capable bridge operates by converting a detected error
- * into a "slot freeze", taking the PCI adapter off-line, making
- * the slot behave, from the OS'es point of view, as if the slot
- * were "empty": all reads return 0xff's and all writes are silently
- * ignored. EEH slot isolation events can be triggered by parity
- * errors on the address or data busses (e.g. during posted writes),
- * which in turn might be caused by low voltage on the bus, dust,
- * vibration, humidity, radioactivity or plain-old failed hardware.
- *
- * Note, however, that one of the leading causes of EEH slot
- * freeze events are buggy device drivers, buggy device microcode,
- * or buggy device hardware. This is because any attempt by the
- * device to bus-master data to a memory address that is not
- * assigned to the device will trigger a slot freeze. (The idea
- * is to prevent devices-gone-wild from corrupting system memory).
- * Buggy hardware/drivers will have a miserable time co-existing
- * with EEH.
- *
- * Ideally, a PCI device driver, when suspecting that an isolation
- * event has occurred (e.g. by reading 0xff's), will then ask EEH
- * whether this is the case, and then take appropriate steps to
- * reset the PCI slot, the PCI device, and then resume operations.
- * However, until that day, the checking is done here, with the
- * eeh_check_failure() routine embedded in the MMIO macros. If
- * the slot is found to be isolated, an "EEH Event" is synthesized
- * and sent out for processing.
- */
-
-/* If a device driver keeps reading an MMIO register in an interrupt
- * handler after a slot isolation event, it might be broken.
- * This sets the threshold for how many read attempts we allow
- * before printing an error message.
- */
-#define EEH_MAX_FAILS 2100000
-
-/* Time to wait for a PCI slot to report status, in milliseconds */
-#define PCI_BUS_RESET_WAIT_MSEC (60*1000)
-
-/* Platform dependent EEH operations */
-struct eeh_ops *eeh_ops = NULL;
-
-int eeh_subsystem_enabled;
-EXPORT_SYMBOL(eeh_subsystem_enabled);
-
-/*
- * EEH probe mode support. The intention is to support multiple
- * platforms for EEH. Some platforms like pSeries do PCI emunation
- * based on device tree. However, other platforms like powernv probe
- * PCI devices from hardware. The flag is used to distinguish that.
- * In addition, struct eeh_ops::probe would be invoked for particular
- * OF node or PCI device so that the corresponding PE would be created
- * there.
- */
-int eeh_probe_mode;
-
-/* Global EEH mutex */
-DEFINE_MUTEX(eeh_mutex);
-
-/* Lock to avoid races due to multiple reports of an error */
-static DEFINE_RAW_SPINLOCK(confirm_error_lock);
-
-/* Buffer for reporting pci register dumps. Its here in BSS, and
- * not dynamically alloced, so that it ends up in RMO where RTAS
- * can access it.
- */
-#define EEH_PCI_REGS_LOG_LEN 4096
-static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN];
-
-/*
- * The struct is used to maintain the EEH global statistic
- * information. Besides, the EEH global statistics will be
- * exported to user space through procfs
- */
-struct eeh_stats {
- u64 no_device; /* PCI device not found */
- u64 no_dn; /* OF node not found */
- u64 no_cfg_addr; /* Config address not found */
- u64 ignored_check; /* EEH check skipped */
- u64 total_mmio_ffs; /* Total EEH checks */
- u64 false_positives; /* Unnecessary EEH checks */
- u64 slot_resets; /* PE reset */
-};
-
-static struct eeh_stats eeh_stats;
-
-#define IS_BRIDGE(class_code) (((class_code)<<16) == PCI_BASE_CLASS_BRIDGE)
-
-/**
- * eeh_gather_pci_data - Copy assorted PCI config space registers to buff
- * @edev: device to report data for
- * @buf: point to buffer in which to log
- * @len: amount of room in buffer
- *
- * This routine captures assorted PCI configuration space data,
- * and puts them into a buffer for RTAS error logging.
- */
-static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
-{
- struct device_node *dn = eeh_dev_to_of_node(edev);
- struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
- u32 cfg;
- int cap, i;
- int n = 0;
-
- n += scnprintf(buf+n, len-n, "%s\n", dn->full_name);
- printk(KERN_WARNING "EEH: of node=%s\n", dn->full_name);
-
- eeh_ops->read_config(dn, PCI_VENDOR_ID, 4, &cfg);
- n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg);
- printk(KERN_WARNING "EEH: PCI device/vendor: %08x\n", cfg);
-
- eeh_ops->read_config(dn, PCI_COMMAND, 4, &cfg);
- n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg);
- printk(KERN_WARNING "EEH: PCI cmd/status register: %08x\n", cfg);
-
- if (!dev) {
- printk(KERN_WARNING "EEH: no PCI device for this of node\n");
- return n;
- }
-
- /* Gather bridge-specific registers */
- if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) {
- eeh_ops->read_config(dn, PCI_SEC_STATUS, 2, &cfg);
- n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg);
- printk(KERN_WARNING "EEH: Bridge secondary status: %04x\n", cfg);
-
- eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &cfg);
- n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg);
- printk(KERN_WARNING "EEH: Bridge control: %04x\n", cfg);
- }
-
- /* Dump out the PCI-X command and status regs */
- cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
- if (cap) {
- eeh_ops->read_config(dn, cap, 4, &cfg);
- n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg);
- printk(KERN_WARNING "EEH: PCI-X cmd: %08x\n", cfg);
-
- eeh_ops->read_config(dn, cap+4, 4, &cfg);
- n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg);
- printk(KERN_WARNING "EEH: PCI-X status: %08x\n", cfg);
- }
-
- /* If PCI-E capable, dump PCI-E cap 10, and the AER */
- cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
- if (cap) {
- n += scnprintf(buf+n, len-n, "pci-e cap10:\n");
- printk(KERN_WARNING
- "EEH: PCI-E capabilities and status follow:\n");
-
- for (i=0; i<=8; i++) {
- eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
- n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
- printk(KERN_WARNING "EEH: PCI-E %02x: %08x\n", i, cfg);
- }
-
- cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
- if (cap) {
- n += scnprintf(buf+n, len-n, "pci-e AER:\n");
- printk(KERN_WARNING
- "EEH: PCI-E AER capability register set follows:\n");
-
- for (i=0; i<14; i++) {
- eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
- n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
- printk(KERN_WARNING "EEH: PCI-E AER %02x: %08x\n", i, cfg);
- }
- }
- }
-
- return n;
-}
-
-/**
- * eeh_slot_error_detail - Generate combined log including driver log and error log
- * @pe: EEH PE
- * @severity: temporary or permanent error log
- *
- * This routine should be called to generate the combined log, which
- * is comprised of driver log and error log. The driver log is figured
- * out from the config space of the corresponding PCI device, while
- * the error log is fetched through platform dependent function call.
- */
-void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
-{
- size_t loglen = 0;
- struct eeh_dev *edev;
-
- eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
- eeh_ops->configure_bridge(pe);
- eeh_pe_restore_bars(pe);
-
- pci_regs_buf[0] = 0;
- eeh_pe_for_each_dev(pe, edev) {
- loglen += eeh_gather_pci_data(edev, pci_regs_buf,
- EEH_PCI_REGS_LOG_LEN);
- }
-
- eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
-}
-
-/**
- * eeh_token_to_phys - Convert EEH address token to phys address
- * @token: I/O token, should be address in the form 0xA....
- *
- * This routine should be called to convert virtual I/O address
- * to physical one.
- */
-static inline unsigned long eeh_token_to_phys(unsigned long token)
-{
- pte_t *ptep;
- unsigned long pa;
-
- ptep = find_linux_pte(init_mm.pgd, token);
- if (!ptep)
- return token;
- pa = pte_pfn(*ptep) << PAGE_SHIFT;
-
- return pa | (token & (PAGE_SIZE-1));
-}
-
-/**
- * eeh_dev_check_failure - Check if all 1's data is due to EEH slot freeze
- * @edev: eeh device
- *
- * Check for an EEH failure for the given device node. Call this
- * routine if the result of a read was all 0xff's and you want to
- * find out if this is due to an EEH slot freeze. This routine
- * will query firmware for the EEH status.
- *
- * Returns 0 if there has not been an EEH error; otherwise returns
- * a non-zero value and queues up a slot isolation event notification.
- *
- * It is safe to call this routine in an interrupt context.
- */
-int eeh_dev_check_failure(struct eeh_dev *edev)
-{
- int ret;
- unsigned long flags;
- struct device_node *dn;
- struct pci_dev *dev;
- struct eeh_pe *pe;
- int rc = 0;
- const char *location;
-
- eeh_stats.total_mmio_ffs++;
-
- if (!eeh_subsystem_enabled)
- return 0;
-
- if (!edev) {
- eeh_stats.no_dn++;
- return 0;
- }
- dn = eeh_dev_to_of_node(edev);
- dev = eeh_dev_to_pci_dev(edev);
- pe = edev->pe;
-
- /* Access to IO BARs might get this far and still not want checking. */
- if (!pe) {
- eeh_stats.ignored_check++;
- pr_debug("EEH: Ignored check for %s %s\n",
- eeh_pci_name(dev), dn->full_name);
- return 0;
- }
-
- if (!pe->addr && !pe->config_addr) {
- eeh_stats.no_cfg_addr++;
- return 0;
- }
-
- /* If we already have a pending isolation event for this
- * slot, we know it's bad already, we don't need to check.
- * Do this checking under a lock; as multiple PCI devices
- * in one slot might report errors simultaneously, and we
- * only want one error recovery routine running.
- */
- raw_spin_lock_irqsave(&confirm_error_lock, flags);
- rc = 1;
- if (pe->state & EEH_PE_ISOLATED) {
- pe->check_count++;
- if (pe->check_count % EEH_MAX_FAILS == 0) {
- location = of_get_property(dn, "ibm,loc-code", NULL);
- printk(KERN_ERR "EEH: %d reads ignored for recovering device at "
- "location=%s driver=%s pci addr=%s\n",
- pe->check_count, location,
- eeh_driver_name(dev), eeh_pci_name(dev));
- printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n",
- eeh_driver_name(dev));
- dump_stack();
- }
- goto dn_unlock;
- }
-
- /*
- * Now test for an EEH failure. This is VERY expensive.
- * Note that the eeh_config_addr may be a parent device
- * in the case of a device behind a bridge, or it may be
- * function zero of a multi-function device.
- * In any case they must share a common PHB.
- */
- ret = eeh_ops->get_state(pe, NULL);
-
- /* Note that config-io to empty slots may fail;
- * they are empty when they don't have children.
- * We will punt with the following conditions: Failure to get
- * PE's state, EEH not support and Permanently unavailable
- * state, PE is in good state.
- */
- if ((ret < 0) ||
- (ret == EEH_STATE_NOT_SUPPORT) ||
- (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) ==
- (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) {
- eeh_stats.false_positives++;
- pe->false_positives++;
- rc = 0;
- goto dn_unlock;
- }
-
- eeh_stats.slot_resets++;
-
- /* Avoid repeated reports of this failure, including problems
- * with other functions on this device, and functions under
- * bridges.
- */
- eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
- raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
-
- eeh_send_failure_event(pe);
-
- /* Most EEH events are due to device driver bugs. Having
- * a stack trace will help the device-driver authors figure
- * out what happened. So print that out.
- */
- WARN(1, "EEH: failure detected\n");
- return 1;
-
-dn_unlock:
- raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
- return rc;
-}
-
-EXPORT_SYMBOL_GPL(eeh_dev_check_failure);
-
-/**
- * eeh_check_failure - Check if all 1's data is due to EEH slot freeze
- * @token: I/O token, should be address in the form 0xA....
- * @val: value, should be all 1's (XXX why do we need this arg??)
- *
- * Check for an EEH failure at the given token address. Call this
- * routine if the result of a read was all 0xff's and you want to
- * find out if this is due to an EEH slot freeze event. This routine
- * will query firmware for the EEH status.
- *
- * Note this routine is safe to call in an interrupt context.
- */
-unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
-{
- unsigned long addr;
- struct eeh_dev *edev;
-
- /* Finding the phys addr + pci device; this is pretty quick. */
- addr = eeh_token_to_phys((unsigned long __force) token);
- edev = eeh_addr_cache_get_dev(addr);
- if (!edev) {
- eeh_stats.no_device++;
- return val;
- }
-
- eeh_dev_check_failure(edev);
-
- pci_dev_put(eeh_dev_to_pci_dev(edev));
- return val;
-}
-
-EXPORT_SYMBOL(eeh_check_failure);
-
-
-/**
- * eeh_pci_enable - Enable MMIO or DMA transfers for this slot
- * @pe: EEH PE
- *
- * This routine should be called to reenable frozen MMIO or DMA
- * so that it would work correctly again. It's useful while doing
- * recovery or log collection on the indicated device.
- */
-int eeh_pci_enable(struct eeh_pe *pe, int function)
-{
- int rc;
-
- rc = eeh_ops->set_option(pe, function);
- if (rc)
- pr_warning("%s: Unexpected state change %d on PHB#%d-PE#%x, err=%d\n",
- __func__, function, pe->phb->global_number, pe->addr, rc);
-
- rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
- if (rc > 0 && (rc & EEH_STATE_MMIO_ENABLED) &&
- (function == EEH_OPT_THAW_MMIO))
- return 0;
-
- return rc;
-}
-
-/**
- * pcibios_set_pcie_slot_reset - Set PCI-E reset state
- * @dev: pci device struct
- * @state: reset state to enter
- *
- * Return value:
- * 0 if success
- */
-int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
-{
- struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
- struct eeh_pe *pe = edev->pe;
-
- if (!pe) {
- pr_err("%s: No PE found on PCI device %s\n",
- __func__, pci_name(dev));
- return -EINVAL;
- }
-
- switch (state) {
- case pcie_deassert_reset:
- eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
- break;
- case pcie_hot_reset:
- eeh_ops->reset(pe, EEH_RESET_HOT);
- break;
- case pcie_warm_reset:
- eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
- break;
- default:
- return -EINVAL;
- };
-
- return 0;
-}
-
-/**
- * eeh_set_pe_freset - Check the required reset for the indicated device
- * @data: EEH device
- * @flag: return value
- *
- * Each device might have its preferred reset type: fundamental or
- * hot reset. The routine is used to collected the information for
- * the indicated device and its children so that the bunch of the
- * devices could be reset properly.
- */
-static void *eeh_set_dev_freset(void *data, void *flag)
-{
- struct pci_dev *dev;
- unsigned int *freset = (unsigned int *)flag;
- struct eeh_dev *edev = (struct eeh_dev *)data;
-
- dev = eeh_dev_to_pci_dev(edev);
- if (dev)
- *freset |= dev->needs_freset;
-
- return NULL;
-}
-
-/**
- * eeh_reset_pe_once - Assert the pci #RST line for 1/4 second
- * @pe: EEH PE
- *
- * Assert the PCI #RST line for 1/4 second.
- */
-static void eeh_reset_pe_once(struct eeh_pe *pe)
-{
- unsigned int freset = 0;
-
- /* Determine type of EEH reset required for
- * Partitionable Endpoint, a hot-reset (1)
- * or a fundamental reset (3).
- * A fundamental reset required by any device under
- * Partitionable Endpoint trumps hot-reset.
- */
- eeh_pe_dev_traverse(pe, eeh_set_dev_freset, &freset);
-
- if (freset)
- eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
- else
- eeh_ops->reset(pe, EEH_RESET_HOT);
-
- /* The PCI bus requires that the reset be held high for at least
- * a 100 milliseconds. We wait a bit longer 'just in case'.
- */
-#define PCI_BUS_RST_HOLD_TIME_MSEC 250
- msleep(PCI_BUS_RST_HOLD_TIME_MSEC);
-
- /* We might get hit with another EEH freeze as soon as the
- * pci slot reset line is dropped. Make sure we don't miss
- * these, and clear the flag now.
- */
- eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
-
- eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
-
- /* After a PCI slot has been reset, the PCI Express spec requires
- * a 1.5 second idle time for the bus to stabilize, before starting
- * up traffic.
- */
-#define PCI_BUS_SETTLE_TIME_MSEC 1800
- msleep(PCI_BUS_SETTLE_TIME_MSEC);
-}
-
-/**
- * eeh_reset_pe - Reset the indicated PE
- * @pe: EEH PE
- *
- * This routine should be called to reset indicated device, including
- * PE. A PE might include multiple PCI devices and sometimes PCI bridges
- * might be involved as well.
- */
-int eeh_reset_pe(struct eeh_pe *pe)
-{
- int i, rc;
-
- /* Take three shots at resetting the bus */
- for (i=0; i<3; i++) {
- eeh_reset_pe_once(pe);
-
- rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
- if (rc == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE))
- return 0;
-
- if (rc < 0) {
- pr_err("%s: Unrecoverable slot failure on PHB#%d-PE#%x",
- __func__, pe->phb->global_number, pe->addr);
- return -1;
- }
- pr_err("EEH: bus reset %d failed on PHB#%d-PE#%x, rc=%d\n",
- i+1, pe->phb->global_number, pe->addr, rc);
- }
-
- return -1;
-}
-
-/**
- * eeh_save_bars - Save device bars
- * @edev: PCI device associated EEH device
- *
- * Save the values of the device bars. Unlike the restore
- * routine, this routine is *not* recursive. This is because
- * PCI devices are added individually; but, for the restore,
- * an entire slot is reset at a time.
- */
-void eeh_save_bars(struct eeh_dev *edev)
-{
- int i;
- struct device_node *dn;
-
- if (!edev)
- return;
- dn = eeh_dev_to_of_node(edev);
-
- for (i = 0; i < 16; i++)
- eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]);
-}
-
-/**
- * eeh_ops_register - Register platform dependent EEH operations
- * @ops: platform dependent EEH operations
- *
- * Register the platform dependent EEH operation callback
- * functions. The platform should call this function before
- * any other EEH operations.
- */
-int __init eeh_ops_register(struct eeh_ops *ops)
-{
- if (!ops->name) {
- pr_warning("%s: Invalid EEH ops name for %p\n",
- __func__, ops);
- return -EINVAL;
- }
-
- if (eeh_ops && eeh_ops != ops) {
- pr_warning("%s: EEH ops of platform %s already existing (%s)\n",
- __func__, eeh_ops->name, ops->name);
- return -EEXIST;
- }
-
- eeh_ops = ops;
-
- return 0;
-}
-
-/**
- * eeh_ops_unregister - Unreigster platform dependent EEH operations
- * @name: name of EEH platform operations
- *
- * Unregister the platform dependent EEH operation callback
- * functions.
- */
-int __exit eeh_ops_unregister(const char *name)
-{
- if (!name || !strlen(name)) {
- pr_warning("%s: Invalid EEH ops name\n",
- __func__);
- return -EINVAL;
- }
-
- if (eeh_ops && !strcmp(eeh_ops->name, name)) {
- eeh_ops = NULL;
- return 0;
- }
-
- return -EEXIST;
-}
-
-/**
- * eeh_init - EEH initialization
- *
- * Initialize EEH by trying to enable it for all of the adapters in the system.
- * As a side effect we can determine here if eeh is supported at all.
- * Note that we leave EEH on so failed config cycles won't cause a machine
- * check. If a user turns off EEH for a particular adapter they are really
- * telling Linux to ignore errors. Some hardware (e.g. POWER5) won't
- * grant access to a slot if EEH isn't enabled, and so we always enable
- * EEH for all slots/all devices.
- *
- * The eeh-force-off option disables EEH checking globally, for all slots.
- * Even if force-off is set, the EEH hardware is still enabled, so that
- * newer systems can boot.
- */
-static int __init eeh_init(void)
-{
- struct pci_controller *hose, *tmp;
- struct device_node *phb;
- int ret;
-
- /* call platform initialization function */
- if (!eeh_ops) {
- pr_warning("%s: Platform EEH operation not found\n",
- __func__);
- return -EEXIST;
- } else if ((ret = eeh_ops->init())) {
- pr_warning("%s: Failed to call platform init function (%d)\n",
- __func__, ret);
- return ret;
- }
-
- raw_spin_lock_init(&confirm_error_lock);
-
- /* Enable EEH for all adapters */
- if (eeh_probe_mode_devtree()) {
- list_for_each_entry_safe(hose, tmp,
- &hose_list, list_node) {
- phb = hose->dn;
- traverse_pci_devices(phb, eeh_ops->of_probe, NULL);
- }
- }
-
- if (eeh_subsystem_enabled)
- pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
- else
- pr_warning("EEH: No capable adapters found\n");
-
- return ret;
-}
-
-core_initcall_sync(eeh_init);
-
-/**
- * eeh_add_device_early - Enable EEH for the indicated device_node
- * @dn: device node for which to set up EEH
- *
- * This routine must be used to perform EEH initialization for PCI
- * devices that were added after system boot (e.g. hotplug, dlpar).
- * This routine must be called before any i/o is performed to the
- * adapter (inluding any config-space i/o).
- * Whether this actually enables EEH or not for this device depends
- * on the CEC architecture, type of the device, on earlier boot
- * command-line arguments & etc.
- */
-static void eeh_add_device_early(struct device_node *dn)
-{
- struct pci_controller *phb;
-
- if (!of_node_to_eeh_dev(dn))
- return;
- phb = of_node_to_eeh_dev(dn)->phb;
-
- /* USB Bus children of PCI devices will not have BUID's */
- if (NULL == phb || 0 == phb->buid)
- return;
-
- /* FIXME: hotplug support on POWERNV */
- eeh_ops->of_probe(dn, NULL);
-}
-
-/**
- * eeh_add_device_tree_early - Enable EEH for the indicated device
- * @dn: device node
- *
- * This routine must be used to perform EEH initialization for the
- * indicated PCI device that was added after system boot (e.g.
- * hotplug, dlpar).
- */
-void eeh_add_device_tree_early(struct device_node *dn)
-{
- struct device_node *sib;
-
- for_each_child_of_node(dn, sib)
- eeh_add_device_tree_early(sib);
- eeh_add_device_early(dn);
-}
-EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
-
-/**
- * eeh_add_device_late - Perform EEH initialization for the indicated pci device
- * @dev: pci device for which to set up EEH
- *
- * This routine must be used to complete EEH initialization for PCI
- * devices that were added after system boot (e.g. hotplug, dlpar).
- */
-static void eeh_add_device_late(struct pci_dev *dev)
-{
- struct device_node *dn;
- struct eeh_dev *edev;
-
- if (!dev || !eeh_subsystem_enabled)
- return;
-
- pr_debug("EEH: Adding device %s\n", pci_name(dev));
-
- dn = pci_device_to_OF_node(dev);
- edev = of_node_to_eeh_dev(dn);
- if (edev->pdev == dev) {
- pr_debug("EEH: Already referenced !\n");
- return;
- }
- WARN_ON(edev->pdev);
-
- pci_dev_get(dev);
- edev->pdev = dev;
- dev->dev.archdata.edev = edev;
-
- eeh_addr_cache_insert_dev(dev);
- eeh_sysfs_add_device(dev);
-}
-
-/**
- * eeh_add_device_tree_late - Perform EEH initialization for the indicated PCI bus
- * @bus: PCI bus
- *
- * This routine must be used to perform EEH initialization for PCI
- * devices which are attached to the indicated PCI bus. The PCI bus
- * is added after system boot through hotplug or dlpar.
- */
-void eeh_add_device_tree_late(struct pci_bus *bus)
-{
- struct pci_dev *dev;
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- eeh_add_device_late(dev);
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- struct pci_bus *subbus = dev->subordinate;
- if (subbus)
- eeh_add_device_tree_late(subbus);
- }
- }
-}
-EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
-
-/**
- * eeh_remove_device - Undo EEH setup for the indicated pci device
- * @dev: pci device to be removed
- * @purge_pe: remove the PE or not
- *
- * This routine should be called when a device is removed from
- * a running system (e.g. by hotplug or dlpar). It unregisters
- * the PCI device from the EEH subsystem. I/O errors affecting
- * this device will no longer be detected after this call; thus,
- * i/o errors affecting this slot may leave this device unusable.
- */
-static void eeh_remove_device(struct pci_dev *dev, int purge_pe)
-{
- struct eeh_dev *edev;
-
- if (!dev || !eeh_subsystem_enabled)
- return;
- edev = pci_dev_to_eeh_dev(dev);
-
- /* Unregister the device with the EEH/PCI address search system */
- pr_debug("EEH: Removing device %s\n", pci_name(dev));
-
- if (!edev || !edev->pdev) {
- pr_debug("EEH: Not referenced !\n");
- return;
- }
- edev->pdev = NULL;
- dev->dev.archdata.edev = NULL;
- pci_dev_put(dev);
-
- eeh_rmv_from_parent_pe(edev, purge_pe);
- eeh_addr_cache_rmv_dev(dev);
- eeh_sysfs_remove_device(dev);
-}
-
-/**
- * eeh_remove_bus_device - Undo EEH setup for the indicated PCI device
- * @dev: PCI device
- * @purge_pe: remove the corresponding PE or not
- *
- * This routine must be called when a device is removed from the
- * running system through hotplug or dlpar. The corresponding
- * PCI address cache will be removed.
- */
-void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe)
-{
- struct pci_bus *bus = dev->subordinate;
- struct pci_dev *child, *tmp;
-
- eeh_remove_device(dev, purge_pe);
-
- if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
- eeh_remove_bus_device(child, purge_pe);
- }
-}
-EXPORT_SYMBOL_GPL(eeh_remove_bus_device);
-
-static int proc_eeh_show(struct seq_file *m, void *v)
-{
- if (0 == eeh_subsystem_enabled) {
- seq_printf(m, "EEH Subsystem is globally disabled\n");
- seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs);
- } else {
- seq_printf(m, "EEH Subsystem is enabled\n");
- seq_printf(m,
- "no device=%llu\n"
- "no device node=%llu\n"
- "no config address=%llu\n"
- "check not wanted=%llu\n"
- "eeh_total_mmio_ffs=%llu\n"
- "eeh_false_positives=%llu\n"
- "eeh_slot_resets=%llu\n",
- eeh_stats.no_device,
- eeh_stats.no_dn,
- eeh_stats.no_cfg_addr,
- eeh_stats.ignored_check,
- eeh_stats.total_mmio_ffs,
- eeh_stats.false_positives,
- eeh_stats.slot_resets);
- }
-
- return 0;
-}
-
-static int proc_eeh_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_eeh_show, NULL);
-}
-
-static const struct file_operations proc_eeh_operations = {
- .open = proc_eeh_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init eeh_init_proc(void)
-{
- if (machine_is(pseries))
- proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
- return 0;
-}
-__initcall(eeh_init_proc);
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
deleted file mode 100644
index 5a4c8790305..00000000000
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * PCI address cache; allows the lookup of PCI devices based on I/O address
- *
- * Copyright IBM Corporation 2004
- * Copyright Linas Vepstas <linas@austin.ibm.com> 2004
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/list.h>
-#include <linux/pci.h>
-#include <linux/rbtree.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/atomic.h>
-#include <asm/pci-bridge.h>
-#include <asm/ppc-pci.h>
-
-
-/**
- * The pci address cache subsystem. This subsystem places
- * PCI device address resources into a red-black tree, sorted
- * according to the address range, so that given only an i/o
- * address, the corresponding PCI device can be **quickly**
- * found. It is safe to perform an address lookup in an interrupt
- * context; this ability is an important feature.
- *
- * Currently, the only customer of this code is the EEH subsystem;
- * thus, this code has been somewhat tailored to suit EEH better.
- * In particular, the cache does *not* hold the addresses of devices
- * for which EEH is not enabled.
- *
- * (Implementation Note: The RB tree seems to be better/faster
- * than any hash algo I could think of for this problem, even
- * with the penalty of slow pointer chases for d-cache misses).
- */
-struct pci_io_addr_range {
- struct rb_node rb_node;
- unsigned long addr_lo;
- unsigned long addr_hi;
- struct eeh_dev *edev;
- struct pci_dev *pcidev;
- unsigned int flags;
-};
-
-static struct pci_io_addr_cache {
- struct rb_root rb_root;
- spinlock_t piar_lock;
-} pci_io_addr_cache_root;
-
-static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr)
-{
- struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node;
-
- while (n) {
- struct pci_io_addr_range *piar;
- piar = rb_entry(n, struct pci_io_addr_range, rb_node);
-
- if (addr < piar->addr_lo) {
- n = n->rb_left;
- } else {
- if (addr > piar->addr_hi) {
- n = n->rb_right;
- } else {
- pci_dev_get(piar->pcidev);
- return piar->edev;
- }
- }
- }
-
- return NULL;
-}
-
-/**
- * eeh_addr_cache_get_dev - Get device, given only address
- * @addr: mmio (PIO) phys address or i/o port number
- *
- * Given an mmio phys address, or a port number, find a pci device
- * that implements this address. Be sure to pci_dev_put the device
- * when finished. I/O port numbers are assumed to be offset
- * from zero (that is, they do *not* have pci_io_addr added in).
- * It is safe to call this function within an interrupt.
- */
-struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr)
-{
- struct eeh_dev *edev;
- unsigned long flags;
-
- spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
- edev = __eeh_addr_cache_get_device(addr);
- spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
- return edev;
-}
-
-#ifdef DEBUG
-/*
- * Handy-dandy debug print routine, does nothing more
- * than print out the contents of our addr cache.
- */
-static void eeh_addr_cache_print(struct pci_io_addr_cache *cache)
-{
- struct rb_node *n;
- int cnt = 0;
-
- n = rb_first(&cache->rb_root);
- while (n) {
- struct pci_io_addr_range *piar;
- piar = rb_entry(n, struct pci_io_addr_range, rb_node);
- pr_debug("PCI: %s addr range %d [%lx-%lx]: %s\n",
- (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt,
- piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev));
- cnt++;
- n = rb_next(n);
- }
-}
-#endif
-
-/* Insert address range into the rb tree. */
-static struct pci_io_addr_range *
-eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
- unsigned long ahi, unsigned int flags)
-{
- struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node;
- struct rb_node *parent = NULL;
- struct pci_io_addr_range *piar;
-
- /* Walk tree, find a place to insert into tree */
- while (*p) {
- parent = *p;
- piar = rb_entry(parent, struct pci_io_addr_range, rb_node);
- if (ahi < piar->addr_lo) {
- p = &parent->rb_left;
- } else if (alo > piar->addr_hi) {
- p = &parent->rb_right;
- } else {
- if (dev != piar->pcidev ||
- alo != piar->addr_lo || ahi != piar->addr_hi) {
- pr_warning("PIAR: overlapping address range\n");
- }
- return piar;
- }
- }
- piar = kzalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
- if (!piar)
- return NULL;
-
- pci_dev_get(dev);
- piar->addr_lo = alo;
- piar->addr_hi = ahi;
- piar->edev = pci_dev_to_eeh_dev(dev);
- piar->pcidev = dev;
- piar->flags = flags;
-
-#ifdef DEBUG
- pr_debug("PIAR: insert range=[%lx:%lx] dev=%s\n",
- alo, ahi, pci_name(dev));
-#endif
-
- rb_link_node(&piar->rb_node, parent, p);
- rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root);
-
- return piar;
-}
-
-static void __eeh_addr_cache_insert_dev(struct pci_dev *dev)
-{
- struct device_node *dn;
- struct eeh_dev *edev;
- int i;
-
- dn = pci_device_to_OF_node(dev);
- if (!dn) {
- pr_warning("PCI: no pci dn found for dev=%s\n", pci_name(dev));
- return;
- }
-
- edev = of_node_to_eeh_dev(dn);
- if (!edev) {
- pr_warning("PCI: no EEH dev found for dn=%s\n",
- dn->full_name);
- return;
- }
-
- /* Skip any devices for which EEH is not enabled. */
- if (!edev->pe) {
-#ifdef DEBUG
- pr_info("PCI: skip building address cache for=%s - %s\n",
- pci_name(dev), dn->full_name);
-#endif
- return;
- }
-
- /* Walk resources on this device, poke them into the tree */
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- unsigned long start = pci_resource_start(dev,i);
- unsigned long end = pci_resource_end(dev,i);
- unsigned int flags = pci_resource_flags(dev,i);
-
- /* We are interested only bus addresses, not dma or other stuff */
- if (0 == (flags & (IORESOURCE_IO | IORESOURCE_MEM)))
- continue;
- if (start == 0 || ~start == 0 || end == 0 || ~end == 0)
- continue;
- eeh_addr_cache_insert(dev, start, end, flags);
- }
-}
-
-/**
- * eeh_addr_cache_insert_dev - Add a device to the address cache
- * @dev: PCI device whose I/O addresses we are interested in.
- *
- * In order to support the fast lookup of devices based on addresses,
- * we maintain a cache of devices that can be quickly searched.
- * This routine adds a device to that cache.
- */
-void eeh_addr_cache_insert_dev(struct pci_dev *dev)
-{
- unsigned long flags;
-
- /* Ignore PCI bridges */
- if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
- return;
-
- spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
- __eeh_addr_cache_insert_dev(dev);
- spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
-}
-
-static inline void __eeh_addr_cache_rmv_dev(struct pci_dev *dev)
-{
- struct rb_node *n;
-
-restart:
- n = rb_first(&pci_io_addr_cache_root.rb_root);
- while (n) {
- struct pci_io_addr_range *piar;
- piar = rb_entry(n, struct pci_io_addr_range, rb_node);
-
- if (piar->pcidev == dev) {
- rb_erase(n, &pci_io_addr_cache_root.rb_root);
- pci_dev_put(piar->pcidev);
- kfree(piar);
- goto restart;
- }
- n = rb_next(n);
- }
-}
-
-/**
- * eeh_addr_cache_rmv_dev - remove pci device from addr cache
- * @dev: device to remove
- *
- * Remove a device from the addr-cache tree.
- * This is potentially expensive, since it will walk
- * the tree multiple times (once per resource).
- * But so what; device removal doesn't need to be that fast.
- */
-void eeh_addr_cache_rmv_dev(struct pci_dev *dev)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
- __eeh_addr_cache_rmv_dev(dev);
- spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
-}
-
-/**
- * eeh_addr_cache_build - Build a cache of I/O addresses
- *
- * Build a cache of pci i/o addresses. This cache will be used to
- * find the pci device that corresponds to a given address.
- * This routine scans all pci busses to build the cache.
- * Must be run late in boot process, after the pci controllers
- * have been scanned for devices (after all device resources are known).
- */
-void __init eeh_addr_cache_build(void)
-{
- struct device_node *dn;
- struct eeh_dev *edev;
- struct pci_dev *dev = NULL;
-
- spin_lock_init(&pci_io_addr_cache_root.piar_lock);
-
- for_each_pci_dev(dev) {
- eeh_addr_cache_insert_dev(dev);
-
- dn = pci_device_to_OF_node(dev);
- if (!dn)
- continue;
-
- edev = of_node_to_eeh_dev(dn);
- if (!edev)
- continue;
-
- pci_dev_get(dev); /* matching put is in eeh_remove_device() */
- dev->dev.archdata.edev = edev;
- edev->pdev = dev;
-
- eeh_sysfs_add_device(dev);
- }
-
-#ifdef DEBUG
- /* Verify tree built up above, echo back the list of addrs. */
- eeh_addr_cache_print(&pci_io_addr_cache_root);
-#endif
-}
-
diff --git a/arch/powerpc/platforms/pseries/eeh_dev.c b/arch/powerpc/platforms/pseries/eeh_dev.c
deleted file mode 100644
index 1efa28f5fc5..00000000000
--- a/arch/powerpc/platforms/pseries/eeh_dev.c
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * The file intends to implement dynamic creation of EEH device, which will
- * be bound with OF node and PCI device simutaneously. The EEH devices would
- * be foundamental information for EEH core components to work proerly. Besides,
- * We have to support multiple situations where dynamic creation of EEH device
- * is required:
- *
- * 1) Before PCI emunation starts, we need create EEH devices according to the
- * PCI sensitive OF nodes.
- * 2) When PCI emunation is done, we need do the binding between PCI device and
- * the associated EEH device.
- * 3) DR (Dynamic Reconfiguration) would create PCI sensitive OF node. EEH device
- * will be created while PCI sensitive OF node is detected from DR.
- * 4) PCI hotplug needs redoing the binding between PCI device and EEH device. If
- * PHB is newly inserted, we also need create EEH devices accordingly.
- *
- * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/export.h>
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-
-#include <asm/pci-bridge.h>
-#include <asm/ppc-pci.h>
-
-/**
- * eeh_dev_init - Create EEH device according to OF node
- * @dn: device node
- * @data: PHB
- *
- * It will create EEH device according to the given OF node. The function
- * might be called by PCI emunation, DR, PHB hotplug.
- */
-void *eeh_dev_init(struct device_node *dn, void *data)
-{
- struct pci_controller *phb = data;
- struct eeh_dev *edev;
-
- /* Allocate EEH device */
- edev = kzalloc(sizeof(*edev), GFP_KERNEL);
- if (!edev) {
- pr_warning("%s: out of memory\n", __func__);
- return NULL;
- }
-
- /* Associate EEH device with OF node */
- PCI_DN(dn)->edev = edev;
- edev->dn = dn;
- edev->phb = phb;
- INIT_LIST_HEAD(&edev->list);
-
- return NULL;
-}
-
-/**
- * eeh_dev_phb_init_dynamic - Create EEH devices for devices included in PHB
- * @phb: PHB
- *
- * Scan the PHB OF node and its child association, then create the
- * EEH devices accordingly
- */
-void eeh_dev_phb_init_dynamic(struct pci_controller *phb)
-{
- struct device_node *dn = phb->dn;
-
- /* EEH PE for PHB */
- eeh_phb_pe_create(phb);
-
- /* EEH device for PHB */
- eeh_dev_init(dn, phb);
-
- /* EEH devices for children OF nodes */
- traverse_pci_devices(dn, eeh_dev_init, phb);
-}
-
-/**
- * eeh_dev_phb_init - Create EEH devices for devices included in existing PHBs
- *
- * Scan all the existing PHBs and create EEH devices for their OF
- * nodes and their children OF nodes
- */
-static int __init eeh_dev_phb_init(void)
-{
- struct pci_controller *phb, *tmp;
-
- list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
- eeh_dev_phb_init_dynamic(phb);
-
- pr_info("EEH: devices created\n");
-
- return 0;
-}
-
-core_initcall(eeh_dev_phb_init);
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
deleted file mode 100644
index a3fefb61097..00000000000
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ /dev/null
@@ -1,552 +0,0 @@
-/*
- * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
- * Copyright IBM Corp. 2004 2005
- * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
- *
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
- */
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <asm/eeh.h>
-#include <asm/eeh_event.h>
-#include <asm/ppc-pci.h>
-#include <asm/pci-bridge.h>
-#include <asm/prom.h>
-#include <asm/rtas.h>
-
-/**
- * eeh_pcid_name - Retrieve name of PCI device driver
- * @pdev: PCI device
- *
- * This routine is used to retrieve the name of PCI device driver
- * if that's valid.
- */
-static inline const char *eeh_pcid_name(struct pci_dev *pdev)
-{
- if (pdev && pdev->dev.driver)
- return pdev->dev.driver->name;
- return "";
-}
-
-/**
- * eeh_pcid_get - Get the PCI device driver
- * @pdev: PCI device
- *
- * The function is used to retrieve the PCI device driver for
- * the indicated PCI device. Besides, we will increase the reference
- * of the PCI device driver to prevent that being unloaded on
- * the fly. Otherwise, kernel crash would be seen.
- */
-static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
-{
- if (!pdev || !pdev->driver)
- return NULL;
-
- if (!try_module_get(pdev->driver->driver.owner))
- return NULL;
-
- return pdev->driver;
-}
-
-/**
- * eeh_pcid_put - Dereference on the PCI device driver
- * @pdev: PCI device
- *
- * The function is called to do dereference on the PCI device
- * driver of the indicated PCI device.
- */
-static inline void eeh_pcid_put(struct pci_dev *pdev)
-{
- if (!pdev || !pdev->driver)
- return;
-
- module_put(pdev->driver->driver.owner);
-}
-
-#if 0
-static void print_device_node_tree(struct pci_dn *pdn, int dent)
-{
- int i;
- struct device_node *pc;
-
- if (!pdn)
- return;
- for (i = 0; i < dent; i++)
- printk(" ");
- printk("dn=%s mode=%x \tcfg_addr=%x pe_addr=%x \tfull=%s\n",
- pdn->node->name, pdn->eeh_mode, pdn->eeh_config_addr,
- pdn->eeh_pe_config_addr, pdn->node->full_name);
- dent += 3;
- pc = pdn->node->child;
- while (pc) {
- print_device_node_tree(PCI_DN(pc), dent);
- pc = pc->sibling;
- }
-}
-#endif
-
-/**
- * eeh_disable_irq - Disable interrupt for the recovering device
- * @dev: PCI device
- *
- * This routine must be called when reporting temporary or permanent
- * error to the particular PCI device to disable interrupt of that
- * device. If the device has enabled MSI or MSI-X interrupt, we needn't
- * do real work because EEH should freeze DMA transfers for those PCI
- * devices encountering EEH errors, which includes MSI or MSI-X.
- */
-static void eeh_disable_irq(struct pci_dev *dev)
-{
- struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
-
- /* Don't disable MSI and MSI-X interrupts. They are
- * effectively disabled by the DMA Stopped state
- * when an EEH error occurs.
- */
- if (dev->msi_enabled || dev->msix_enabled)
- return;
-
- if (!irq_has_action(dev->irq))
- return;
-
- edev->mode |= EEH_DEV_IRQ_DISABLED;
- disable_irq_nosync(dev->irq);
-}
-
-/**
- * eeh_enable_irq - Enable interrupt for the recovering device
- * @dev: PCI device
- *
- * This routine must be called to enable interrupt while failed
- * device could be resumed.
- */
-static void eeh_enable_irq(struct pci_dev *dev)
-{
- struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
-
- if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
- edev->mode &= ~EEH_DEV_IRQ_DISABLED;
- enable_irq(dev->irq);
- }
-}
-
-/**
- * eeh_report_error - Report pci error to each device driver
- * @data: eeh device
- * @userdata: return value
- *
- * Report an EEH error to each device driver, collect up and
- * merge the device driver responses. Cumulative response
- * passed back in "userdata".
- */
-static void *eeh_report_error(void *data, void *userdata)
-{
- struct eeh_dev *edev = (struct eeh_dev *)data;
- struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
- enum pci_ers_result rc, *res = userdata;
- struct pci_driver *driver;
-
- /* We might not have the associated PCI device,
- * then we should continue for next one.
- */
- if (!dev) return NULL;
- dev->error_state = pci_channel_io_frozen;
-
- driver = eeh_pcid_get(dev);
- if (!driver) return NULL;
-
- eeh_disable_irq(dev);
-
- if (!driver->err_handler ||
- !driver->err_handler->error_detected) {
- eeh_pcid_put(dev);
- return NULL;
- }
-
- rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
-
- /* A driver that needs a reset trumps all others */
- if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
- if (*res == PCI_ERS_RESULT_NONE) *res = rc;
-
- eeh_pcid_put(dev);
- return NULL;
-}
-
-/**
- * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
- * @data: eeh device
- * @userdata: return value
- *
- * Tells each device driver that IO ports, MMIO and config space I/O
- * are now enabled. Collects up and merges the device driver responses.
- * Cumulative response passed back in "userdata".
- */
-static void *eeh_report_mmio_enabled(void *data, void *userdata)
-{
- struct eeh_dev *edev = (struct eeh_dev *)data;
- struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
- enum pci_ers_result rc, *res = userdata;
- struct pci_driver *driver;
-
- driver = eeh_pcid_get(dev);
- if (!driver) return NULL;
-
- if (!driver->err_handler ||
- !driver->err_handler->mmio_enabled) {
- eeh_pcid_put(dev);
- return NULL;
- }
-
- rc = driver->err_handler->mmio_enabled(dev);
-
- /* A driver that needs a reset trumps all others */
- if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
- if (*res == PCI_ERS_RESULT_NONE) *res = rc;
-
- eeh_pcid_put(dev);
- return NULL;
-}
-
-/**
- * eeh_report_reset - Tell device that slot has been reset
- * @data: eeh device
- * @userdata: return value
- *
- * This routine must be called while EEH tries to reset particular
- * PCI device so that the associated PCI device driver could take
- * some actions, usually to save data the driver needs so that the
- * driver can work again while the device is recovered.
- */
-static void *eeh_report_reset(void *data, void *userdata)
-{
- struct eeh_dev *edev = (struct eeh_dev *)data;
- struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
- enum pci_ers_result rc, *res = userdata;
- struct pci_driver *driver;
-
- if (!dev) return NULL;
- dev->error_state = pci_channel_io_normal;
-
- driver = eeh_pcid_get(dev);
- if (!driver) return NULL;
-
- eeh_enable_irq(dev);
-
- if (!driver->err_handler ||
- !driver->err_handler->slot_reset) {
- eeh_pcid_put(dev);
- return NULL;
- }
-
- rc = driver->err_handler->slot_reset(dev);
- if ((*res == PCI_ERS_RESULT_NONE) ||
- (*res == PCI_ERS_RESULT_RECOVERED)) *res = rc;
- if (*res == PCI_ERS_RESULT_DISCONNECT &&
- rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
-
- eeh_pcid_put(dev);
- return NULL;
-}
-
-/**
- * eeh_report_resume - Tell device to resume normal operations
- * @data: eeh device
- * @userdata: return value
- *
- * This routine must be called to notify the device driver that it
- * could resume so that the device driver can do some initialization
- * to make the recovered device work again.
- */
-static void *eeh_report_resume(void *data, void *userdata)
-{
- struct eeh_dev *edev = (struct eeh_dev *)data;
- struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
- struct pci_driver *driver;
-
- if (!dev) return NULL;
- dev->error_state = pci_channel_io_normal;
-
- driver = eeh_pcid_get(dev);
- if (!driver) return NULL;
-
- eeh_enable_irq(dev);
-
- if (!driver->err_handler ||
- !driver->err_handler->resume) {
- eeh_pcid_put(dev);
- return NULL;
- }
-
- driver->err_handler->resume(dev);
-
- eeh_pcid_put(dev);
- return NULL;
-}
-
-/**
- * eeh_report_failure - Tell device driver that device is dead.
- * @data: eeh device
- * @userdata: return value
- *
- * This informs the device driver that the device is permanently
- * dead, and that no further recovery attempts will be made on it.
- */
-static void *eeh_report_failure(void *data, void *userdata)
-{
- struct eeh_dev *edev = (struct eeh_dev *)data;
- struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
- struct pci_driver *driver;
-
- if (!dev) return NULL;
- dev->error_state = pci_channel_io_perm_failure;
-
- driver = eeh_pcid_get(dev);
- if (!driver) return NULL;
-
- eeh_disable_irq(dev);
-
- if (!driver->err_handler ||
- !driver->err_handler->error_detected) {
- eeh_pcid_put(dev);
- return NULL;
- }
-
- driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
-
- eeh_pcid_put(dev);
- return NULL;
-}
-
-/**
- * eeh_reset_device - Perform actual reset of a pci slot
- * @pe: EEH PE
- * @bus: PCI bus corresponding to the isolcated slot
- *
- * This routine must be called to do reset on the indicated PE.
- * During the reset, udev might be invoked because those affected
- * PCI devices will be removed and then added.
- */
-static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
-{
- int cnt, rc;
-
- /* pcibios will clear the counter; save the value */
- cnt = pe->freeze_count;
-
- /*
- * We don't remove the corresponding PE instances because
- * we need the information afterwords. The attached EEH
- * devices are expected to be attached soon when calling
- * into pcibios_add_pci_devices().
- */
- if (bus)
- __pcibios_remove_pci_devices(bus, 0);
-
- /* Reset the pci controller. (Asserts RST#; resets config space).
- * Reconfigure bridges and devices. Don't try to bring the system
- * up if the reset failed for some reason.
- */
- rc = eeh_reset_pe(pe);
- if (rc)
- return rc;
-
- /* Restore PE */
- eeh_ops->configure_bridge(pe);
- eeh_pe_restore_bars(pe);
-
- /* Give the system 5 seconds to finish running the user-space
- * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
- * this is a hack, but if we don't do this, and try to bring
- * the device up before the scripts have taken it down,
- * potentially weird things happen.
- */
- if (bus) {
- ssleep(5);
- pcibios_add_pci_devices(bus);
- }
- pe->freeze_count = cnt;
-
- return 0;
-}
-
-/* The longest amount of time to wait for a pci device
- * to come back on line, in seconds.
- */
-#define MAX_WAIT_FOR_RECOVERY 150
-
-/**
- * eeh_handle_event - Reset a PCI device after hard lockup.
- * @pe: EEH PE
- *
- * While PHB detects address or data parity errors on particular PCI
- * slot, the associated PE will be frozen. Besides, DMA's occurring
- * to wild addresses (which usually happen due to bugs in device
- * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
- * #PERR or other misc PCI-related errors also can trigger EEH errors.
- *
- * Recovery process consists of unplugging the device driver (which
- * generated hotplug events to userspace), then issuing a PCI #RST to
- * the device, then reconfiguring the PCI config space for all bridges
- * & devices under this slot, and then finally restarting the device
- * drivers (which cause a second set of hotplug events to go out to
- * userspace).
- */
-void eeh_handle_event(struct eeh_pe *pe)
-{
- struct pci_bus *frozen_bus;
- int rc = 0;
- enum pci_ers_result result = PCI_ERS_RESULT_NONE;
-
- frozen_bus = eeh_pe_bus_get(pe);
- if (!frozen_bus) {
- pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
- __func__, pe->phb->global_number, pe->addr);
- return;
- }
-
- pe->freeze_count++;
- if (pe->freeze_count > EEH_MAX_ALLOWED_FREEZES)
- goto excess_failures;
- pr_warning("EEH: This PCI device has failed %d times in the last hour\n",
- pe->freeze_count);
-
- /* Walk the various device drivers attached to this slot through
- * a reset sequence, giving each an opportunity to do what it needs
- * to accomplish the reset. Each child gets a report of the
- * status ... if any child can't handle the reset, then the entire
- * slot is dlpar removed and added.
- */
- eeh_pe_dev_traverse(pe, eeh_report_error, &result);
-
- /* Get the current PCI slot state. This can take a long time,
- * sometimes over 3 seconds for certain systems.
- */
- rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
- if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
- printk(KERN_WARNING "EEH: Permanent failure\n");
- goto hard_fail;
- }
-
- /* Since rtas may enable MMIO when posting the error log,
- * don't post the error log until after all dev drivers
- * have been informed.
- */
- eeh_slot_error_detail(pe, EEH_LOG_TEMP);
-
- /* If all device drivers were EEH-unaware, then shut
- * down all of the device drivers, and hope they
- * go down willingly, without panicing the system.
- */
- if (result == PCI_ERS_RESULT_NONE) {
- rc = eeh_reset_device(pe, frozen_bus);
- if (rc) {
- printk(KERN_WARNING "EEH: Unable to reset, rc=%d\n", rc);
- goto hard_fail;
- }
- }
-
- /* If all devices reported they can proceed, then re-enable MMIO */
- if (result == PCI_ERS_RESULT_CAN_RECOVER) {
- rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
-
- if (rc < 0)
- goto hard_fail;
- if (rc) {
- result = PCI_ERS_RESULT_NEED_RESET;
- } else {
- result = PCI_ERS_RESULT_NONE;
- eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result);
- }
- }
-
- /* If all devices reported they can proceed, then re-enable DMA */
- if (result == PCI_ERS_RESULT_CAN_RECOVER) {
- rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
-
- if (rc < 0)
- goto hard_fail;
- if (rc)
- result = PCI_ERS_RESULT_NEED_RESET;
- else
- result = PCI_ERS_RESULT_RECOVERED;
- }
-
- /* If any device has a hard failure, then shut off everything. */
- if (result == PCI_ERS_RESULT_DISCONNECT) {
- printk(KERN_WARNING "EEH: Device driver gave up\n");
- goto hard_fail;
- }
-
- /* If any device called out for a reset, then reset the slot */
- if (result == PCI_ERS_RESULT_NEED_RESET) {
- rc = eeh_reset_device(pe, NULL);
- if (rc) {
- printk(KERN_WARNING "EEH: Cannot reset, rc=%d\n", rc);
- goto hard_fail;
- }
- result = PCI_ERS_RESULT_NONE;
- eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
- }
-
- /* All devices should claim they have recovered by now. */
- if ((result != PCI_ERS_RESULT_RECOVERED) &&
- (result != PCI_ERS_RESULT_NONE)) {
- printk(KERN_WARNING "EEH: Not recovered\n");
- goto hard_fail;
- }
-
- /* Tell all device drivers that they can resume operations */
- eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
-
- return;
-
-excess_failures:
- /*
- * About 90% of all real-life EEH failures in the field
- * are due to poorly seated PCI cards. Only 10% or so are
- * due to actual, failed cards.
- */
- pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n"
- "last hour and has been permanently disabled.\n"
- "Please try reseating or replacing it.\n",
- pe->phb->global_number, pe->addr,
- pe->freeze_count);
- goto perm_error;
-
-hard_fail:
- pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n"
- "Please try reseating or replacing it\n",
- pe->phb->global_number, pe->addr);
-
-perm_error:
- eeh_slot_error_detail(pe, EEH_LOG_PERM);
-
- /* Notify all devices that they're about to go down. */
- eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
-
- /* Shut down the device drivers for good. */
- if (frozen_bus)
- pcibios_remove_pci_devices(frozen_bus);
-}
-
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
deleted file mode 100644
index 185bedd926d..00000000000
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Copyright (c) 2005 Linas Vepstas <linas@linas.org>
- */
-
-#include <linux/delay.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/kthread.h>
-#include <asm/eeh_event.h>
-#include <asm/ppc-pci.h>
-
-/** Overview:
- * EEH error states may be detected within exception handlers;
- * however, the recovery processing needs to occur asynchronously
- * in a normal kernel context and not an interrupt context.
- * This pair of routines creates an event and queues it onto a
- * work-queue, where a worker thread can drive recovery.
- */
-
-/* EEH event workqueue setup. */
-static DEFINE_SPINLOCK(eeh_eventlist_lock);
-LIST_HEAD(eeh_eventlist);
-static void eeh_thread_launcher(struct work_struct *);
-DECLARE_WORK(eeh_event_wq, eeh_thread_launcher);
-
-/* Serialize reset sequences for a given pci device */
-DEFINE_MUTEX(eeh_event_mutex);
-
-/**
- * eeh_event_handler - Dispatch EEH events.
- * @dummy - unused
- *
- * The detection of a frozen slot can occur inside an interrupt,
- * where it can be hard to do anything about it. The goal of this
- * routine is to pull these detection events out of the context
- * of the interrupt handler, and re-dispatch them for processing
- * at a later time in a normal context.
- */
-static int eeh_event_handler(void * dummy)
-{
- unsigned long flags;
- struct eeh_event *event;
- struct eeh_pe *pe;
-
- spin_lock_irqsave(&eeh_eventlist_lock, flags);
- event = NULL;
-
- /* Unqueue the event, get ready to process. */
- if (!list_empty(&eeh_eventlist)) {
- event = list_entry(eeh_eventlist.next, struct eeh_event, list);
- list_del(&event->list);
- }
- spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
-
- if (event == NULL)
- return 0;
-
- /* Serialize processing of EEH events */
- mutex_lock(&eeh_event_mutex);
- pe = event->pe;
- eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
- pr_info("EEH: Detected PCI bus error on PHB#%d-PE#%x\n",
- pe->phb->global_number, pe->addr);
-
- set_current_state(TASK_INTERRUPTIBLE); /* Don't add to load average */
- eeh_handle_event(pe);
- eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
-
- kfree(event);
- mutex_unlock(&eeh_event_mutex);
-
- /* If there are no new errors after an hour, clear the counter. */
- if (pe && pe->freeze_count > 0) {
- msleep_interruptible(3600*1000);
- if (pe->freeze_count > 0)
- pe->freeze_count--;
-
- }
-
- return 0;
-}
-
-/**
- * eeh_thread_launcher - Start kernel thread to handle EEH events
- * @dummy - unused
- *
- * This routine is called to start the kernel thread for processing
- * EEH event.
- */
-static void eeh_thread_launcher(struct work_struct *dummy)
-{
- if (IS_ERR(kthread_run(eeh_event_handler, NULL, "eehd")))
- printk(KERN_ERR "Failed to start EEH daemon\n");
-}
-
-/**
- * eeh_send_failure_event - Generate a PCI error event
- * @pe: EEH PE
- *
- * This routine can be called within an interrupt context;
- * the actual event will be delivered in a normal context
- * (from a workqueue).
- */
-int eeh_send_failure_event(struct eeh_pe *pe)
-{
- unsigned long flags;
- struct eeh_event *event;
-
- event = kzalloc(sizeof(*event), GFP_ATOMIC);
- if (!event) {
- pr_err("EEH: out of memory, event not handled\n");
- return -ENOMEM;
- }
- event->pe = pe;
-
- /* We may or may not be called in an interrupt context */
- spin_lock_irqsave(&eeh_eventlist_lock, flags);
- list_add(&event->list, &eeh_eventlist);
- spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
-
- schedule_work(&eeh_event_wq);
-
- return 0;
-}
diff --git a/arch/powerpc/platforms/pseries/eeh_pe.c b/arch/powerpc/platforms/pseries/eeh_pe.c
deleted file mode 100644
index fe43d1aa2cf..00000000000
--- a/arch/powerpc/platforms/pseries/eeh_pe.c
+++ /dev/null
@@ -1,652 +0,0 @@
-/*
- * The file intends to implement PE based on the information from
- * platforms. Basically, there have 3 types of PEs: PHB/Bus/Device.
- * All the PEs should be organized as hierarchy tree. The first level
- * of the tree will be associated to existing PHBs since the particular
- * PE is only meaningful in one PHB domain.
- *
- * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/export.h>
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-
-#include <asm/pci-bridge.h>
-#include <asm/ppc-pci.h>
-
-static LIST_HEAD(eeh_phb_pe);
-
-/**
- * eeh_pe_alloc - Allocate PE
- * @phb: PCI controller
- * @type: PE type
- *
- * Allocate PE instance dynamically.
- */
-static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type)
-{
- struct eeh_pe *pe;
-
- /* Allocate PHB PE */
- pe = kzalloc(sizeof(struct eeh_pe), GFP_KERNEL);
- if (!pe) return NULL;
-
- /* Initialize PHB PE */
- pe->type = type;
- pe->phb = phb;
- INIT_LIST_HEAD(&pe->child_list);
- INIT_LIST_HEAD(&pe->child);
- INIT_LIST_HEAD(&pe->edevs);
-
- return pe;
-}
-
-/**
- * eeh_phb_pe_create - Create PHB PE
- * @phb: PCI controller
- *
- * The function should be called while the PHB is detected during
- * system boot or PCI hotplug in order to create PHB PE.
- */
-int eeh_phb_pe_create(struct pci_controller *phb)
-{
- struct eeh_pe *pe;
-
- /* Allocate PHB PE */
- pe = eeh_pe_alloc(phb, EEH_PE_PHB);
- if (!pe) {
- pr_err("%s: out of memory!\n", __func__);
- return -ENOMEM;
- }
-
- /* Put it into the list */
- eeh_lock();
- list_add_tail(&pe->child, &eeh_phb_pe);
- eeh_unlock();
-
- pr_debug("EEH: Add PE for PHB#%d\n", phb->global_number);
-
- return 0;
-}
-
-/**
- * eeh_phb_pe_get - Retrieve PHB PE based on the given PHB
- * @phb: PCI controller
- *
- * The overall PEs form hierarchy tree. The first layer of the
- * hierarchy tree is composed of PHB PEs. The function is used
- * to retrieve the corresponding PHB PE according to the given PHB.
- */
-static struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb)
-{
- struct eeh_pe *pe;
-
- list_for_each_entry(pe, &eeh_phb_pe, child) {
- /*
- * Actually, we needn't check the type since
- * the PE for PHB has been determined when that
- * was created.
- */
- if ((pe->type & EEH_PE_PHB) && pe->phb == phb)
- return pe;
- }
-
- return NULL;
-}
-
-/**
- * eeh_pe_next - Retrieve the next PE in the tree
- * @pe: current PE
- * @root: root PE
- *
- * The function is used to retrieve the next PE in the
- * hierarchy PE tree.
- */
-static struct eeh_pe *eeh_pe_next(struct eeh_pe *pe,
- struct eeh_pe *root)
-{
- struct list_head *next = pe->child_list.next;
-
- if (next == &pe->child_list) {
- while (1) {
- if (pe == root)
- return NULL;
- next = pe->child.next;
- if (next != &pe->parent->child_list)
- break;
- pe = pe->parent;
- }
- }
-
- return list_entry(next, struct eeh_pe, child);
-}
-
-/**
- * eeh_pe_traverse - Traverse PEs in the specified PHB
- * @root: root PE
- * @fn: callback
- * @flag: extra parameter to callback
- *
- * The function is used to traverse the specified PE and its
- * child PEs. The traversing is to be terminated once the
- * callback returns something other than NULL, or no more PEs
- * to be traversed.
- */
-static void *eeh_pe_traverse(struct eeh_pe *root,
- eeh_traverse_func fn, void *flag)
-{
- struct eeh_pe *pe;
- void *ret;
-
- for (pe = root; pe; pe = eeh_pe_next(pe, root)) {
- ret = fn(pe, flag);
- if (ret) return ret;
- }
-
- return NULL;
-}
-
-/**
- * eeh_pe_dev_traverse - Traverse the devices from the PE
- * @root: EEH PE
- * @fn: function callback
- * @flag: extra parameter to callback
- *
- * The function is used to traverse the devices of the specified
- * PE and its child PEs.
- */
-void *eeh_pe_dev_traverse(struct eeh_pe *root,
- eeh_traverse_func fn, void *flag)
-{
- struct eeh_pe *pe;
- struct eeh_dev *edev;
- void *ret;
-
- if (!root) {
- pr_warning("%s: Invalid PE %p\n", __func__, root);
- return NULL;
- }
-
- eeh_lock();
-
- /* Traverse root PE */
- for (pe = root; pe; pe = eeh_pe_next(pe, root)) {
- eeh_pe_for_each_dev(pe, edev) {
- ret = fn(edev, flag);
- if (ret) {
- eeh_unlock();
- return ret;
- }
- }
- }
-
- eeh_unlock();
-
- return NULL;
-}
-
-/**
- * __eeh_pe_get - Check the PE address
- * @data: EEH PE
- * @flag: EEH device
- *
- * For one particular PE, it can be identified by PE address
- * or tranditional BDF address. BDF address is composed of
- * Bus/Device/Function number. The extra data referred by flag
- * indicates which type of address should be used.
- */
-static void *__eeh_pe_get(void *data, void *flag)
-{
- struct eeh_pe *pe = (struct eeh_pe *)data;
- struct eeh_dev *edev = (struct eeh_dev *)flag;
-
- /* Unexpected PHB PE */
- if (pe->type & EEH_PE_PHB)
- return NULL;
-
- /* We prefer PE address */
- if (edev->pe_config_addr &&
- (edev->pe_config_addr == pe->addr))
- return pe;
-
- /* Try BDF address */
- if (edev->pe_config_addr &&
- (edev->config_addr == pe->config_addr))
- return pe;
-
- return NULL;
-}
-
-/**
- * eeh_pe_get - Search PE based on the given address
- * @edev: EEH device
- *
- * Search the corresponding PE based on the specified address which
- * is included in the eeh device. The function is used to check if
- * the associated PE has been created against the PE address. It's
- * notable that the PE address has 2 format: traditional PE address
- * which is composed of PCI bus/device/function number, or unified
- * PE address.
- */
-static struct eeh_pe *eeh_pe_get(struct eeh_dev *edev)
-{
- struct eeh_pe *root = eeh_phb_pe_get(edev->phb);
- struct eeh_pe *pe;
-
- pe = eeh_pe_traverse(root, __eeh_pe_get, edev);
-
- return pe;
-}
-
-/**
- * eeh_pe_get_parent - Retrieve the parent PE
- * @edev: EEH device
- *
- * The whole PEs existing in the system are organized as hierarchy
- * tree. The function is used to retrieve the parent PE according
- * to the parent EEH device.
- */
-static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev)
-{
- struct device_node *dn;
- struct eeh_dev *parent;
-
- /*
- * It might have the case for the indirect parent
- * EEH device already having associated PE, but
- * the direct parent EEH device doesn't have yet.
- */
- dn = edev->dn->parent;
- while (dn) {
- /* We're poking out of PCI territory */
- if (!PCI_DN(dn)) return NULL;
-
- parent = of_node_to_eeh_dev(dn);
- /* We're poking out of PCI territory */
- if (!parent) return NULL;
-
- if (parent->pe)
- return parent->pe;
-
- dn = dn->parent;
- }
-
- return NULL;
-}
-
-/**
- * eeh_add_to_parent_pe - Add EEH device to parent PE
- * @edev: EEH device
- *
- * Add EEH device to the parent PE. If the parent PE already
- * exists, the PE type will be changed to EEH_PE_BUS. Otherwise,
- * we have to create new PE to hold the EEH device and the new
- * PE will be linked to its parent PE as well.
- */
-int eeh_add_to_parent_pe(struct eeh_dev *edev)
-{
- struct eeh_pe *pe, *parent;
-
- eeh_lock();
-
- /*
- * Search the PE has been existing or not according
- * to the PE address. If that has been existing, the
- * PE should be composed of PCI bus and its subordinate
- * components.
- */
- pe = eeh_pe_get(edev);
- if (pe && !(pe->type & EEH_PE_INVALID)) {
- if (!edev->pe_config_addr) {
- eeh_unlock();
- pr_err("%s: PE with addr 0x%x already exists\n",
- __func__, edev->config_addr);
- return -EEXIST;
- }
-
- /* Mark the PE as type of PCI bus */
- pe->type = EEH_PE_BUS;
- edev->pe = pe;
-
- /* Put the edev to PE */
- list_add_tail(&edev->list, &pe->edevs);
- eeh_unlock();
- pr_debug("EEH: Add %s to Bus PE#%x\n",
- edev->dn->full_name, pe->addr);
-
- return 0;
- } else if (pe && (pe->type & EEH_PE_INVALID)) {
- list_add_tail(&edev->list, &pe->edevs);
- edev->pe = pe;
- /*
- * We're running to here because of PCI hotplug caused by
- * EEH recovery. We need clear EEH_PE_INVALID until the top.
- */
- parent = pe;
- while (parent) {
- if (!(parent->type & EEH_PE_INVALID))
- break;
- parent->type &= ~EEH_PE_INVALID;
- parent = parent->parent;
- }
- eeh_unlock();
- pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
- edev->dn->full_name, pe->addr, pe->parent->addr);
-
- return 0;
- }
-
- /* Create a new EEH PE */
- pe = eeh_pe_alloc(edev->phb, EEH_PE_DEVICE);
- if (!pe) {
- eeh_unlock();
- pr_err("%s: out of memory!\n", __func__);
- return -ENOMEM;
- }
- pe->addr = edev->pe_config_addr;
- pe->config_addr = edev->config_addr;
-
- /*
- * Put the new EEH PE into hierarchy tree. If the parent
- * can't be found, the newly created PE will be attached
- * to PHB directly. Otherwise, we have to associate the
- * PE with its parent.
- */
- parent = eeh_pe_get_parent(edev);
- if (!parent) {
- parent = eeh_phb_pe_get(edev->phb);
- if (!parent) {
- eeh_unlock();
- pr_err("%s: No PHB PE is found (PHB Domain=%d)\n",
- __func__, edev->phb->global_number);
- edev->pe = NULL;
- kfree(pe);
- return -EEXIST;
- }
- }
- pe->parent = parent;
-
- /*
- * Put the newly created PE into the child list and
- * link the EEH device accordingly.
- */
- list_add_tail(&pe->child, &parent->child_list);
- list_add_tail(&edev->list, &pe->edevs);
- edev->pe = pe;
- eeh_unlock();
- pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
- edev->dn->full_name, pe->addr, pe->parent->addr);
-
- return 0;
-}
-
-/**
- * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE
- * @edev: EEH device
- * @purge_pe: remove PE or not
- *
- * The PE hierarchy tree might be changed when doing PCI hotplug.
- * Also, the PCI devices or buses could be removed from the system
- * during EEH recovery. So we have to call the function remove the
- * corresponding PE accordingly if necessary.
- */
-int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe)
-{
- struct eeh_pe *pe, *parent, *child;
- int cnt;
-
- if (!edev->pe) {
- pr_warning("%s: No PE found for EEH device %s\n",
- __func__, edev->dn->full_name);
- return -EEXIST;
- }
-
- eeh_lock();
-
- /* Remove the EEH device */
- pe = edev->pe;
- edev->pe = NULL;
- list_del(&edev->list);
-
- /*
- * Check if the parent PE includes any EEH devices.
- * If not, we should delete that. Also, we should
- * delete the parent PE if it doesn't have associated
- * child PEs and EEH devices.
- */
- while (1) {
- parent = pe->parent;
- if (pe->type & EEH_PE_PHB)
- break;
-
- if (purge_pe) {
- if (list_empty(&pe->edevs) &&
- list_empty(&pe->child_list)) {
- list_del(&pe->child);
- kfree(pe);
- } else {
- break;
- }
- } else {
- if (list_empty(&pe->edevs)) {
- cnt = 0;
- list_for_each_entry(child, &pe->child_list, child) {
- if (!(child->type & EEH_PE_INVALID)) {
- cnt++;
- break;
- }
- }
-
- if (!cnt)
- pe->type |= EEH_PE_INVALID;
- else
- break;
- }
- }
-
- pe = parent;
- }
-
- eeh_unlock();
-
- return 0;
-}
-
-/**
- * __eeh_pe_state_mark - Mark the state for the PE
- * @data: EEH PE
- * @flag: state
- *
- * The function is used to mark the indicated state for the given
- * PE. Also, the associated PCI devices will be put into IO frozen
- * state as well.
- */
-static void *__eeh_pe_state_mark(void *data, void *flag)
-{
- struct eeh_pe *pe = (struct eeh_pe *)data;
- int state = *((int *)flag);
- struct eeh_dev *tmp;
- struct pci_dev *pdev;
-
- /*
- * Mark the PE with the indicated state. Also,
- * the associated PCI device will be put into
- * I/O frozen state to avoid I/O accesses from
- * the PCI device driver.
- */
- pe->state |= state;
- eeh_pe_for_each_dev(pe, tmp) {
- pdev = eeh_dev_to_pci_dev(tmp);
- if (pdev)
- pdev->error_state = pci_channel_io_frozen;
- }
-
- return NULL;
-}
-
-/**
- * eeh_pe_state_mark - Mark specified state for PE and its associated device
- * @pe: EEH PE
- *
- * EEH error affects the current PE and its child PEs. The function
- * is used to mark appropriate state for the affected PEs and the
- * associated devices.
- */
-void eeh_pe_state_mark(struct eeh_pe *pe, int state)
-{
- eeh_lock();
- eeh_pe_traverse(pe, __eeh_pe_state_mark, &state);
- eeh_unlock();
-}
-
-/**
- * __eeh_pe_state_clear - Clear state for the PE
- * @data: EEH PE
- * @flag: state
- *
- * The function is used to clear the indicated state from the
- * given PE. Besides, we also clear the check count of the PE
- * as well.
- */
-static void *__eeh_pe_state_clear(void *data, void *flag)
-{
- struct eeh_pe *pe = (struct eeh_pe *)data;
- int state = *((int *)flag);
-
- pe->state &= ~state;
- pe->check_count = 0;
-
- return NULL;
-}
-
-/**
- * eeh_pe_state_clear - Clear state for the PE and its children
- * @pe: PE
- * @state: state to be cleared
- *
- * When the PE and its children has been recovered from error,
- * we need clear the error state for that. The function is used
- * for the purpose.
- */
-void eeh_pe_state_clear(struct eeh_pe *pe, int state)
-{
- eeh_lock();
- eeh_pe_traverse(pe, __eeh_pe_state_clear, &state);
- eeh_unlock();
-}
-
-/**
- * eeh_restore_one_device_bars - Restore the Base Address Registers for one device
- * @data: EEH device
- * @flag: Unused
- *
- * Loads the PCI configuration space base address registers,
- * the expansion ROM base address, the latency timer, and etc.
- * from the saved values in the device node.
- */
-static void *eeh_restore_one_device_bars(void *data, void *flag)
-{
- int i;
- u32 cmd;
- struct eeh_dev *edev = (struct eeh_dev *)data;
- struct device_node *dn = eeh_dev_to_of_node(edev);
-
- for (i = 4; i < 10; i++)
- eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
- /* 12 == Expansion ROM Address */
- eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]);
-
-#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
-#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
-
- eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
- SAVED_BYTE(PCI_CACHE_LINE_SIZE));
- eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
- SAVED_BYTE(PCI_LATENCY_TIMER));
-
- /* max latency, min grant, interrupt pin and line */
- eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]);
-
- /*
- * Restore PERR & SERR bits, some devices require it,
- * don't touch the other command bits
- */
- eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd);
- if (edev->config_space[1] & PCI_COMMAND_PARITY)
- cmd |= PCI_COMMAND_PARITY;
- else
- cmd &= ~PCI_COMMAND_PARITY;
- if (edev->config_space[1] & PCI_COMMAND_SERR)
- cmd |= PCI_COMMAND_SERR;
- else
- cmd &= ~PCI_COMMAND_SERR;
- eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd);
-
- return NULL;
-}
-
-/**
- * eeh_pe_restore_bars - Restore the PCI config space info
- * @pe: EEH PE
- *
- * This routine performs a recursive walk to the children
- * of this device as well.
- */
-void eeh_pe_restore_bars(struct eeh_pe *pe)
-{
- /*
- * We needn't take the EEH lock since eeh_pe_dev_traverse()
- * will take that.
- */
- eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL);
-}
-
-/**
- * eeh_pe_bus_get - Retrieve PCI bus according to the given PE
- * @pe: EEH PE
- *
- * Retrieve the PCI bus according to the given PE. Basically,
- * there're 3 types of PEs: PHB/Bus/Device. For PHB PE, the
- * primary PCI bus will be retrieved. The parent bus will be
- * returned for BUS PE. However, we don't have associated PCI
- * bus for DEVICE PE.
- */
-struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
-{
- struct pci_bus *bus = NULL;
- struct eeh_dev *edev;
- struct pci_dev *pdev;
-
- eeh_lock();
-
- if (pe->type & EEH_PE_PHB) {
- bus = pe->phb->bus;
- } else if (pe->type & EEH_PE_BUS) {
- edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
- pdev = eeh_dev_to_pci_dev(edev);
- if (pdev)
- bus = pdev->bus;
- }
-
- eeh_unlock();
-
- return bus;
-}
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 19506f93573..0bec0c02c5e 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -83,7 +83,11 @@ static int pseries_eeh_init(void)
ibm_configure_pe = rtas_token("ibm,configure-pe");
ibm_configure_bridge = rtas_token("ibm,configure-bridge");
- /* necessary sanity check */
+ /*
+ * Necessary sanity check. We needn't check "get-config-addr-info"
+ * and its variant since the old firmware probably support address
+ * of domain/bus/slot/function for EEH RTAS operations.
+ */
if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) {
pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n",
__func__);
@@ -102,12 +106,6 @@ static int pseries_eeh_init(void)
pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n",
__func__);
return -EINVAL;
- } else if (ibm_get_config_addr_info2 == RTAS_UNKNOWN_SERVICE &&
- ibm_get_config_addr_info == RTAS_UNKNOWN_SERVICE) {
- pr_warning("%s: RTAS service <ibm,get-config-addr-info2> and "
- "<ibm,get-config-addr-info> invalid\n",
- __func__);
- return -EINVAL;
} else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) {
pr_warning("%s: RTAS service <ibm,configure-pe> and "
@@ -135,6 +133,78 @@ static int pseries_eeh_init(void)
return 0;
}
+static int pseries_eeh_cap_start(struct device_node *dn)
+{
+ struct pci_dn *pdn = PCI_DN(dn);
+ u32 status;
+
+ if (!pdn)
+ return 0;
+
+ rtas_read_config(pdn, PCI_STATUS, 2, &status);
+ if (!(status & PCI_STATUS_CAP_LIST))
+ return 0;
+
+ return PCI_CAPABILITY_LIST;
+}
+
+
+static int pseries_eeh_find_cap(struct device_node *dn, int cap)
+{
+ struct pci_dn *pdn = PCI_DN(dn);
+ int pos = pseries_eeh_cap_start(dn);
+ int cnt = 48; /* Maximal number of capabilities */
+ u32 id;
+
+ if (!pos)
+ return 0;
+
+ while (cnt--) {
+ rtas_read_config(pdn, pos, 1, &pos);
+ if (pos < 0x40)
+ break;
+ pos &= ~3;
+ rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
+ if (id == 0xff)
+ break;
+ if (id == cap)
+ return pos;
+ pos += PCI_CAP_LIST_NEXT;
+ }
+
+ return 0;
+}
+
+static int pseries_eeh_find_ecap(struct device_node *dn, int cap)
+{
+ struct pci_dn *pdn = PCI_DN(dn);
+ struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ u32 header;
+ int pos = 256;
+ int ttl = (4096 - 256) / 8;
+
+ if (!edev || !edev->pcie_cap)
+ return 0;
+ if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
+ return 0;
+ else if (!header)
+ return 0;
+
+ while (ttl-- > 0) {
+ if (PCI_EXT_CAP_ID(header) == cap && pos)
+ return pos;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (pos < 256)
+ break;
+
+ if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
+ break;
+ }
+
+ return 0;
+}
+
/**
* pseries_eeh_of_probe - EEH probe on the given device
* @dn: OF node
@@ -148,30 +218,54 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
{
struct eeh_dev *edev;
struct eeh_pe pe;
- const u32 *class_code, *vendor_id, *device_id;
- const u32 *regs;
+ struct pci_dn *pdn = PCI_DN(dn);
+ const __be32 *classp, *vendorp, *devicep;
+ u32 class_code;
+ const __be32 *regs;
+ u32 pcie_flags;
int enable = 0;
int ret;
/* Retrieve OF node and eeh device */
edev = of_node_to_eeh_dev(dn);
- if (!of_device_is_available(dn))
+ if (edev->pe || !of_device_is_available(dn))
return NULL;
/* Retrieve class/vendor/device IDs */
- class_code = of_get_property(dn, "class-code", NULL);
- vendor_id = of_get_property(dn, "vendor-id", NULL);
- device_id = of_get_property(dn, "device-id", NULL);
+ classp = of_get_property(dn, "class-code", NULL);
+ vendorp = of_get_property(dn, "vendor-id", NULL);
+ devicep = of_get_property(dn, "device-id", NULL);
/* Skip for bad OF node or PCI-ISA bridge */
- if (!class_code || !vendor_id || !device_id)
+ if (!classp || !vendorp || !devicep)
return NULL;
if (dn->type && !strcmp(dn->type, "isa"))
return NULL;
- /* Update class code and mode of eeh device */
- edev->class_code = *class_code;
- edev->mode = 0;
+ class_code = of_read_number(classp, 1);
+
+ /*
+ * Update class code and mode of eeh device. We need
+ * correctly reflects that current device is root port
+ * or PCIe switch downstream port.
+ */
+ edev->class_code = class_code;
+ edev->pcix_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_PCIX);
+ edev->pcie_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_EXP);
+ edev->aer_cap = pseries_eeh_find_ecap(dn, PCI_EXT_CAP_ID_ERR);
+ edev->mode &= 0xFFFFFF00;
+ if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ edev->mode |= EEH_DEV_BRIDGE;
+ if (edev->pcie_cap) {
+ rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
+ 2, &pcie_flags);
+ pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
+ if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
+ edev->mode |= EEH_DEV_ROOT_PORT;
+ else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
+ edev->mode |= EEH_DEV_DS_PORT;
+ }
+ }
/* Retrieve the device address */
regs = of_get_property(dn, "reg", NULL);
@@ -184,12 +278,12 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
/* Initialize the fake PE */
memset(&pe, 0, sizeof(struct eeh_pe));
pe.phb = edev->phb;
- pe.config_addr = regs[0];
+ pe.config_addr = of_read_number(regs, 1);
/* Enable EEH on the device */
ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
if (!ret) {
- edev->config_addr = regs[0];
+ edev->config_addr = of_read_number(regs, 1);
/* Retrieve PE address */
edev->pe_config_addr = eeh_ops->get_pe_addr(&pe);
pe.addr = edev->pe_config_addr;
@@ -203,7 +297,7 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
enable = 1;
if (enable) {
- eeh_subsystem_enabled = 1;
+ eeh_set_enable(true);
eeh_add_to_parent_pe(edev);
pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n",
@@ -402,6 +496,7 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *state)
} else {
result = EEH_STATE_NOT_SUPPORT;
}
+ break;
default:
result = EEH_STATE_NOT_SUPPORT;
}
@@ -437,11 +532,19 @@ static int pseries_eeh_reset(struct eeh_pe *pe, int option)
/* If fundamental-reset not supported, try hot-reset */
if (option == EEH_RESET_FUNDAMENTAL &&
ret == -8) {
+ option = EEH_RESET_HOT;
ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
config_addr, BUID_HI(pe->phb->buid),
- BUID_LO(pe->phb->buid), EEH_RESET_HOT);
+ BUID_LO(pe->phb->buid), option);
}
+ /* We need reset hold or settlement delay */
+ if (option == EEH_RESET_FUNDAMENTAL ||
+ option == EEH_RESET_HOT)
+ msleep(EEH_PE_RST_HOLD_TIME);
+ else
+ msleep(EEH_PE_RST_SETTLE_TIME);
+
return ret;
}
@@ -627,7 +730,9 @@ static struct eeh_ops pseries_eeh_ops = {
.get_log = pseries_eeh_get_log,
.configure_bridge = pseries_eeh_configure_bridge,
.read_config = pseries_eeh_read_config,
- .write_config = pseries_eeh_write_config
+ .write_config = pseries_eeh_write_config,
+ .next_error = NULL,
+ .restore_config = NULL
};
/**
diff --git a/arch/powerpc/platforms/pseries/eeh_sysfs.c b/arch/powerpc/platforms/pseries/eeh_sysfs.c
deleted file mode 100644
index d37708360f2..00000000000
--- a/arch/powerpc/platforms/pseries/eeh_sysfs.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Sysfs entries for PCI Error Recovery for PAPR-compliant platform.
- * Copyright IBM Corporation 2007
- * Copyright Linas Vepstas <linas@austin.ibm.com> 2007
- *
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
- */
-#include <linux/pci.h>
-#include <linux/stat.h>
-#include <asm/ppc-pci.h>
-#include <asm/pci-bridge.h>
-
-/**
- * EEH_SHOW_ATTR -- Create sysfs entry for eeh statistic
- * @_name: name of file in sysfs directory
- * @_memb: name of member in struct pci_dn to access
- * @_format: printf format for display
- *
- * All of the attributes look very similar, so just
- * auto-gen a cut-n-paste routine to display them.
- */
-#define EEH_SHOW_ATTR(_name,_memb,_format) \
-static ssize_t eeh_show_##_name(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct pci_dev *pdev = to_pci_dev(dev); \
- struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev); \
- \
- if (!edev) \
- return 0; \
- \
- return sprintf(buf, _format "\n", edev->_memb); \
-} \
-static DEVICE_ATTR(_name, S_IRUGO, eeh_show_##_name, NULL);
-
-EEH_SHOW_ATTR(eeh_mode, mode, "0x%x");
-EEH_SHOW_ATTR(eeh_config_addr, config_addr, "0x%x");
-EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x");
-
-void eeh_sysfs_add_device(struct pci_dev *pdev)
-{
- int rc=0;
-
- rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode);
- rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr);
- rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
-
- if (rc)
- printk(KERN_WARNING "EEH: Unable to create sysfs entries\n");
-}
-
-void eeh_sysfs_remove_device(struct pci_dev *pdev)
-{
- device_remove_file(&pdev->dev, &dev_attr_eeh_mode);
- device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr);
- device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
-}
-
diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c
index 2605c310166..18380e8f6df 100644
--- a/arch/powerpc/platforms/pseries/event_sources.c
+++ b/arch/powerpc/platforms/pseries/event_sources.c
@@ -25,7 +25,7 @@ void request_event_sources_irqs(struct device_node *np,
const char *name)
{
int i, index, count = 0;
- struct of_irq oirq;
+ struct of_phandle_args oirq;
const u32 *opicprop;
unsigned int opicplen;
unsigned int virqs[16];
@@ -55,13 +55,11 @@ void request_event_sources_irqs(struct device_node *np,
/* Else use normal interrupt tree parsing */
else {
/* First try to do a proper OF tree parsing */
- for (index = 0; of_irq_map_one(np, index, &oirq) == 0;
+ for (index = 0; of_irq_parse_one(np, index, &oirq) == 0;
index++) {
if (count > 15)
break;
- virqs[count] = irq_create_of_mapping(oirq.controller,
- oirq.specifier,
- oirq.size);
+ virqs[count] = irq_create_of_mapping(&oirq);
if (virqs[count] == NO_IRQ) {
pr_err("event-sources: Unable to allocate "
"interrupt number for %s\n",
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c
index 7b56118f531..8c80588abac 100644
--- a/arch/powerpc/platforms/pseries/firmware.c
+++ b/arch/powerpc/platforms/pseries/firmware.c
@@ -28,13 +28,18 @@
#include "pseries.h"
-typedef struct {
+struct hypertas_fw_feature {
unsigned long val;
char * name;
-} firmware_feature_t;
+};
-static __initdata firmware_feature_t
-firmware_features_table[FIRMWARE_MAX_FEATURES] = {
+/*
+ * The names in this table match names in rtas/ibm,hypertas-functions. If the
+ * entry ends in a '*', only upto the '*' is matched. Otherwise the entire
+ * string must match.
+ */
+static __initdata struct hypertas_fw_feature
+hypertas_fw_features_table[] = {
{FW_FEATURE_PFT, "hcall-pft"},
{FW_FEATURE_TCE, "hcall-tce"},
{FW_FEATURE_SPRG0, "hcall-sprg0"},
@@ -57,32 +62,72 @@ firmware_features_table[FIRMWARE_MAX_FEATURES] = {
{FW_FEATURE_SPLPAR, "hcall-splpar"},
{FW_FEATURE_VPHN, "hcall-vphn"},
{FW_FEATURE_SET_MODE, "hcall-set-mode"},
+ {FW_FEATURE_BEST_ENERGY, "hcall-best-energy-1*"},
};
/* Build up the firmware features bitmask using the contents of
* device-tree/ibm,hypertas-functions. Ultimately this functionality may
* be moved into prom.c prom_init().
*/
-void __init fw_feature_init(const char *hypertas, unsigned long len)
+void __init fw_hypertas_feature_init(const char *hypertas, unsigned long len)
{
const char *s;
int i;
- pr_debug(" -> fw_feature_init()\n");
+ pr_debug(" -> fw_hypertas_feature_init()\n");
for (s = hypertas; s < hypertas + len; s += strlen(s) + 1) {
- for (i = 0; i < FIRMWARE_MAX_FEATURES; i++) {
- /* check value against table of strings */
- if (!firmware_features_table[i].name ||
- strcmp(firmware_features_table[i].name, s))
+ for (i = 0; i < ARRAY_SIZE(hypertas_fw_features_table); i++) {
+ const char *name = hypertas_fw_features_table[i].name;
+ size_t size;
+
+ /*
+ * If there is a '*' at the end of name, only check
+ * upto there
+ */
+ size = strlen(name);
+ if (size && name[size - 1] == '*') {
+ if (strncmp(name, s, size - 1))
+ continue;
+ } else if (strcmp(name, s))
continue;
/* we have a match */
powerpc_firmware_features |=
- firmware_features_table[i].val;
+ hypertas_fw_features_table[i].val;
break;
}
}
- pr_debug(" <- fw_feature_init()\n");
+ pr_debug(" <- fw_hypertas_feature_init()\n");
+}
+
+struct vec5_fw_feature {
+ unsigned long val;
+ unsigned int feature;
+};
+
+static __initdata struct vec5_fw_feature
+vec5_fw_features_table[] = {
+ {FW_FEATURE_TYPE1_AFFINITY, OV5_TYPE1_AFFINITY},
+ {FW_FEATURE_PRRN, OV5_PRRN},
+};
+
+void __init fw_vec5_feature_init(const char *vec5, unsigned long len)
+{
+ unsigned int index, feat;
+ int i;
+
+ pr_debug(" -> fw_vec5_feature_init()\n");
+
+ for (i = 0; i < ARRAY_SIZE(vec5_fw_features_table); i++) {
+ index = OV5_INDX(vec5_fw_features_table[i].feature);
+ feat = OV5_FEAT(vec5_fw_features_table[i].feature);
+
+ if (vec5[index] & feat)
+ powerpc_firmware_features |=
+ vec5_fw_features_table[i].val;
+ }
+
+ pr_debug(" <- fw_vec5_feature_init()\n");
}
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index a38956269fb..20d62975856 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -30,16 +30,12 @@
#include <asm/machdep.h>
#include <asm/vdso_datapage.h>
#include <asm/xics.h>
-#include "plpar_wrappers.h"
+#include <asm/plpar_wrappers.h>
+
#include "offline_states.h"
/* This version can't take the spinlock, because it never returns */
-static struct rtas_args rtas_stop_self_args = {
- .token = RTAS_UNKNOWN_SERVICE,
- .nargs = 0,
- .nret = 1,
- .rets = &rtas_stop_self_args.args[0],
-};
+static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
CPU_STATE_OFFLINE;
@@ -92,15 +88,21 @@ void set_default_offline_state(int cpu)
static void rtas_stop_self(void)
{
- struct rtas_args *args = &rtas_stop_self_args;
+ static struct rtas_args args = {
+ .nargs = 0,
+ .nret = 1,
+ .rets = &args.args[0],
+ };
+
+ args.token = cpu_to_be32(rtas_stop_self_token);
local_irq_disable();
- BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
+ BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
printk("cpu %u (hwid %u) Ready to die...\n",
smp_processor_id(), hard_smp_processor_id());
- enter_rtas(__pa(args));
+ enter_rtas(__pa(&args));
panic("Alas, I survived.\n");
}
@@ -123,20 +125,28 @@ static void pseries_mach_cpu_die(void)
cede_latency_hint = 2;
get_lppaca()->idle = 1;
- if (!get_lppaca()->shared_proc)
+ if (!lppaca_shared_proc(get_lppaca()))
get_lppaca()->donate_dedicated_cpu = 1;
while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
+ while (!prep_irq_for_idle()) {
+ local_irq_enable();
+ local_irq_disable();
+ }
+
extended_cede_processor(cede_latency_hint);
}
- if (!get_lppaca()->shared_proc)
+ local_irq_disable();
+
+ if (!lppaca_shared_proc(get_lppaca()))
get_lppaca()->donate_dedicated_cpu = 0;
get_lppaca()->idle = 0;
if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
unregister_slb_shadow(hwcpu);
+ hard_irq_disable();
/*
* Call to start_secondary_resume() will not return.
* Kernel stack will be reset and start_secondary()
@@ -383,10 +393,10 @@ static int __init pseries_cpu_hotplug_init(void)
}
}
- rtas_stop_self_args.token = rtas_token("stop-self");
+ rtas_stop_self_token = rtas_token("stop-self");
qcss_tok = rtas_token("query-cpu-stopped-state");
- if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
+ if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
qcss_tok == RTAS_UNKNOWN_SERVICE) {
printk(KERN_INFO "CPU Hotplug not supported by firmware "
"- disabling.\n");
@@ -411,4 +421,4 @@ static int __init pseries_cpu_hotplug_init(void)
return 0;
}
-arch_initcall(pseries_cpu_hotplug_init);
+machine_arch_initcall(pseries, pseries_cpu_hotplug_init);
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 2372c609fa2..7995135170a 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -10,15 +10,18 @@
*/
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/memblock.h>
#include <linux/vmalloc.h>
#include <linux/memory.h>
+#include <linux/memory_hotplug.h>
#include <asm/firmware.h>
#include <asm/machdep.h>
+#include <asm/prom.h>
#include <asm/sparsemem.h>
-static unsigned long get_memblock_size(void)
+unsigned long pseries_memory_block_size(void)
{
struct device_node *np;
unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
@@ -61,71 +64,53 @@ static unsigned long get_memblock_size(void)
return memblock_size;
}
-/* WARNING: This is going to override the generic definition whenever
- * pseries is built-in regardless of what platform is active at boot
- * time. This is fine for now as this is the only "option" and it
- * should work everywhere. If not, we'll have to turn this into a
- * ppc_md. callback
- */
-unsigned long memory_block_size_bytes(void)
+#ifdef CONFIG_MEMORY_HOTREMOVE
+static int pseries_remove_memory(u64 start, u64 size)
{
- return get_memblock_size();
+ int ret;
+
+ /* Remove htab bolted mappings for this section of memory */
+ start = (unsigned long)__va(start);
+ ret = remove_section_mapping(start, start + size);
+
+ /* Ensure all vmalloc mappings are flushed in case they also
+ * hit that section of memory
+ */
+ vm_unmap_aliases();
+
+ return ret;
}
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{
- unsigned long start, start_pfn;
- struct zone *zone;
- int ret;
- unsigned long section;
- unsigned long sections_to_remove;
+ unsigned long block_sz, start_pfn;
+ int sections_per_block;
+ int i, nid;
start_pfn = base >> PAGE_SHIFT;
- if (!pfn_valid(start_pfn)) {
- memblock_remove(base, memblock_size);
- return 0;
- }
+ lock_device_hotplug();
- zone = page_zone(pfn_to_page(start_pfn));
+ if (!pfn_valid(start_pfn))
+ goto out;
- /*
- * Remove section mappings and sysfs entries for the
- * section of the memory we are removing.
- *
- * NOTE: Ideally, this should be done in generic code like
- * remove_memory(). But remove_memory() gets called by writing
- * to sysfs "state" file and we can't remove sysfs entries
- * while writing to it. So we have to defer it to here.
- */
- sections_to_remove = (memblock_size >> PAGE_SHIFT) / PAGES_PER_SECTION;
- for (section = 0; section < sections_to_remove; section++) {
- unsigned long pfn = start_pfn + section * PAGES_PER_SECTION;
- ret = __remove_pages(zone, pfn, PAGES_PER_SECTION);
- if (ret)
- return ret;
+ block_sz = pseries_memory_block_size();
+ sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
+ nid = memory_add_physaddr_to_nid(base);
+
+ for (i = 0; i < sections_per_block; i++) {
+ remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
+ base += MIN_MEMORY_BLOCK_SIZE;
}
- /*
- * Update memory regions for memory remove
- */
+out:
+ /* Update memory regions for memory remove */
memblock_remove(base, memblock_size);
-
- /*
- * Remove htab bolted mappings for this section of memory
- */
- start = (unsigned long)__va(base);
- ret = remove_section_mapping(start, start + memblock_size);
-
- /* Ensure all vmalloc mappings are flushed in case they also
- * hit that section of memory
- */
- vm_unmap_aliases();
-
- return ret;
+ unlock_device_hotplug();
+ return 0;
}
-static int pseries_remove_memory(struct device_node *np)
+static int pseries_remove_mem_node(struct device_node *np)
{
const char *type;
const unsigned int *regs;
@@ -150,11 +135,22 @@ static int pseries_remove_memory(struct device_node *np)
base = *(unsigned long *)regs;
lmb_size = regs[3];
- ret = pseries_remove_memblock(base, lmb_size);
- return ret;
+ pseries_remove_memblock(base, lmb_size);
+ return 0;
}
+#else
+static inline int pseries_remove_memblock(unsigned long base,
+ unsigned int memblock_size)
+{
+ return -EOPNOTSUPP;
+}
+static inline int pseries_remove_mem_node(struct device_node *np)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_MEMORY_HOTREMOVE */
-static int pseries_add_memory(struct device_node *np)
+static int pseries_add_mem_node(struct device_node *np)
{
const char *type;
const unsigned int *regs;
@@ -194,7 +190,7 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr)
u32 *p;
int i, rc = -EINVAL;
- memblock_size = get_memblock_size();
+ memblock_size = pseries_memory_block_size();
if (!memblock_size)
return -EINVAL;
@@ -240,10 +236,10 @@ static int pseries_memory_notifier(struct notifier_block *nb,
switch (action) {
case OF_RECONFIG_ATTACH_NODE:
- err = pseries_add_memory(node);
+ err = pseries_add_mem_node(node);
break;
case OF_RECONFIG_DETACH_NODE:
- err = pseries_remove_memory(node);
+ err = pseries_remove_mem_node(node);
break;
case OF_RECONFIG_UPDATE_PROPERTY:
pr = (struct of_prop_reconfig *)node;
@@ -263,6 +259,10 @@ static int __init pseries_memory_hotplug_init(void)
if (firmware_has_feature(FW_FEATURE_LPAR))
of_reconfig_notifier_register(&pseries_mem_nb);
+#ifdef CONFIG_MEMORY_HOTREMOVE
+ ppc_md.remove_memory = pseries_remove_memory;
+#endif
+
return 0;
}
machine_device_initcall(pseries, pseries_memory_hotplug_init);
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 444fe7759e5..99ecf0a5a92 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -49,7 +49,7 @@ END_FTR_SECTION(0, 1); \
std r0,16(r1); \
addi r4,r1,STK_PARAM(FIRST_REG); \
stdu r1,-STACK_FRAME_OVERHEAD(r1); \
- bl .__trace_hcall_entry; \
+ bl __trace_hcall_entry; \
addi r1,r1,STACK_FRAME_OVERHEAD; \
ld r0,16(r1); \
ld r3,STK_PARAM(R3)(r1); \
@@ -83,7 +83,7 @@ END_FTR_SECTION(0, 1); \
mr r3,r6; \
std r0,16(r1); \
stdu r1,-STACK_FRAME_OVERHEAD(r1); \
- bl .__trace_hcall_exit; \
+ bl __trace_hcall_exit; \
addi r1,r1,STACK_FRAME_OVERHEAD; \
ld r0,16(r1); \
ld r3,STK_PARAM(R3)(r1); \
@@ -106,7 +106,7 @@ END_FTR_SECTION(0, 1); \
.text
-_GLOBAL(plpar_hcall_norets)
+_GLOBAL_TOC(plpar_hcall_norets)
HMT_MEDIUM
mfcr r0
@@ -122,7 +122,7 @@ _GLOBAL(plpar_hcall_norets)
mtcrf 0xff,r0
blr /* return r3 = status */
-_GLOBAL(plpar_hcall)
+_GLOBAL_TOC(plpar_hcall)
HMT_MEDIUM
mfcr r0
@@ -188,7 +188,7 @@ _GLOBAL(plpar_hcall_raw)
blr /* return r3 = status */
-_GLOBAL(plpar_hcall9)
+_GLOBAL_TOC(plpar_hcall9)
HMT_MEDIUM
mfcr r0
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index c9311cfdfca..cf4e7736e4f 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -86,7 +86,7 @@ static int hcall_inst_seq_open(struct inode *inode, struct file *file)
rc = seq_open(file, &hcall_inst_seq_ops);
seq = file->private_data;
- seq->private = file->f_path.dentry->d_inode->i_private;
+ seq->private = file_inode(file)->i_private;
return rc;
}
diff --git a/arch/powerpc/platforms/pseries/hvconsole.c b/arch/powerpc/platforms/pseries/hvconsole.c
index b344f94b040..849b29b3e9a 100644
--- a/arch/powerpc/platforms/pseries/hvconsole.c
+++ b/arch/powerpc/platforms/pseries/hvconsole.c
@@ -28,7 +28,7 @@
#include <linux/errno.h>
#include <asm/hvcall.h>
#include <asm/hvconsole.h>
-#include "plpar_wrappers.h"
+#include <asm/plpar_wrappers.h>
/**
* hvc_get_chars - retrieve characters from firmware for denoted vterm adatper
@@ -40,10 +40,16 @@
*/
int hvc_get_chars(uint32_t vtermno, char *buf, int count)
{
- unsigned long got;
+ long ret;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ unsigned long *lbuf = (unsigned long *)buf;
+
+ ret = plpar_hcall(H_GET_TERM_CHAR, retbuf, vtermno);
+ lbuf[0] = be64_to_cpu(retbuf[1]);
+ lbuf[1] = be64_to_cpu(retbuf[2]);
- if (plpar_get_term_char(vtermno, &got, buf) == H_SUCCESS)
- return got;
+ if (ret == H_SUCCESS)
+ return retbuf[0];
return 0;
}
@@ -69,8 +75,9 @@ int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
if (count > MAX_VIO_PUT_CHARS)
count = MAX_VIO_PUT_CHARS;
- ret = plpar_hcall_norets(H_PUT_TERM_CHAR, vtermno, count, lbuf[0],
- lbuf[1]);
+ ret = plpar_hcall_norets(H_PUT_TERM_CHAR, vtermno, count,
+ cpu_to_be64(lbuf[0]),
+ cpu_to_be64(lbuf[1]));
if (ret == H_SUCCESS)
return count;
if (ret == H_BUSY)
diff --git a/arch/powerpc/platforms/pseries/hvcserver.c b/arch/powerpc/platforms/pseries/hvcserver.c
index fcf4b4cbeaf..4557e91626c 100644
--- a/arch/powerpc/platforms/pseries/hvcserver.c
+++ b/arch/powerpc/platforms/pseries/hvcserver.c
@@ -23,6 +23,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <asm/hvcall.h>
#include <asm/hvcserver.h>
@@ -188,9 +189,9 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
= (unsigned int)last_p_partition_ID;
/* copy the Null-term char too */
- strncpy(&next_partner_info->location_code[0],
+ strlcpy(&next_partner_info->location_code[0],
(char *)&pi_buff[2],
- strlen((char *)&pi_buff[2]) + 1);
+ sizeof(next_partner_info->location_code));
list_add_tail(&(next_partner_info->node), head);
next_partner_info = NULL;
diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c
index ef9d9d84c7d..0240c4ff878 100644
--- a/arch/powerpc/platforms/pseries/io_event_irq.c
+++ b/arch/powerpc/platforms/pseries/io_event_irq.c
@@ -82,9 +82,9 @@ static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog)
* RTAS_TYPE_IO only exists in extended event log version 6 or later.
* No need to check event log version.
*/
- if (unlikely(elog->type != RTAS_TYPE_IO)) {
- printk_once(KERN_WARNING "io_event_irq: Unexpected event type %d",
- elog->type);
+ if (unlikely(rtas_error_type(elog) != RTAS_TYPE_IO)) {
+ printk_once(KERN_WARNING"io_event_irq: Unexpected event type %d",
+ rtas_error_type(elog));
return NULL;
}
@@ -115,7 +115,7 @@ static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog)
* by scope or event type alone. For example, Torrent ISR route change
* event is reported with scope 0x00 (Not Applicatable) rather than
* 0x3B (Torrent-hub). It is better to let the clients to identify
- * who owns the the event.
+ * who owns the event.
*/
static irqreturn_t ioei_interrupt(int irq, void *dev_id)
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index e2685badb5d..33b552ffbe5 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -48,12 +48,11 @@
#include <asm/ppc-pci.h>
#include <asm/udbg.h>
#include <asm/mmzone.h>
-
-#include "plpar_wrappers.h"
+#include <asm/plpar_wrappers.h>
static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
- u64 *startp, u64 *endp)
+ __be64 *startp, __be64 *endp)
{
u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
unsigned long start, end, inc;
@@ -87,7 +86,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
struct dma_attrs *attrs)
{
u64 proto_tce;
- u64 *tcep, *tces;
+ __be64 *tcep, *tces;
u64 rpn;
proto_tce = TCE_PCI_READ; // Read allowed
@@ -95,12 +94,12 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
if (direction != DMA_TO_DEVICE)
proto_tce |= TCE_PCI_WRITE;
- tces = tcep = ((u64 *)tbl->it_base) + index;
+ tces = tcep = ((__be64 *)tbl->it_base) + index;
while (npages--) {
/* can't move this out since we might cross MEMBLOCK boundary */
rpn = __pa(uaddr) >> TCE_SHIFT;
- *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
+ *tcep = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
uaddr += TCE_PAGE_SIZE;
tcep++;
@@ -114,9 +113,9 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
{
- u64 *tcep, *tces;
+ __be64 *tcep, *tces;
- tces = tcep = ((u64 *)tbl->it_base) + index;
+ tces = tcep = ((__be64 *)tbl->it_base) + index;
while (npages--)
*(tcep++) = 0;
@@ -127,11 +126,11 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
{
- u64 *tcep;
+ __be64 *tcep;
- tcep = ((u64 *)tbl->it_base) + index;
+ tcep = ((__be64 *)tbl->it_base) + index;
- return *tcep;
+ return be64_to_cpu(*tcep);
}
static void tce_free_pSeriesLP(struct iommu_table*, long, long);
@@ -178,7 +177,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
return ret;
}
-static DEFINE_PER_CPU(u64 *, tce_page);
+static DEFINE_PER_CPU(__be64 *, tce_page);
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
@@ -187,7 +186,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
{
u64 rc = 0;
u64 proto_tce;
- u64 *tcep;
+ __be64 *tcep;
u64 rpn;
long l, limit;
long tcenum_start = tcenum, npages_start = npages;
@@ -207,7 +206,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
* from iommu_alloc{,_sg}()
*/
if (!tcep) {
- tcep = (u64 *)__get_free_page(GFP_ATOMIC);
+ tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
/* If allocation fails, fall back to the loop implementation */
if (!tcep) {
local_irq_restore(flags);
@@ -231,7 +230,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);
for (l = 0; l < limit; l++) {
- tcep[l] = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
+ tcep[l] = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
rpn++;
}
@@ -330,16 +329,16 @@ struct direct_window {
/* Dynamic DMA Window support */
struct ddw_query_response {
- u32 windows_available;
- u32 largest_available_block;
- u32 page_size;
- u32 migration_capable;
+ __be32 windows_available;
+ __be32 largest_available_block;
+ __be32 page_size;
+ __be32 migration_capable;
};
struct ddw_create_response {
- u32 liobn;
- u32 addr_hi;
- u32 addr_lo;
+ __be32 liobn;
+ __be32 addr_hi;
+ __be32 addr_lo;
};
static LIST_HEAD(direct_window_list);
@@ -382,6 +381,7 @@ static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn,
rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn),
dma_offset,
0, limit);
+ next += limit * tce_size;
num_tce -= limit;
} while (num_tce > 0 && !rc);
@@ -392,7 +392,8 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
unsigned long num_pfn, const void *arg)
{
const struct dynamic_dma_window_prop *maprange = arg;
- u64 *tcep, tce_size, num_tce, dma_offset, next, proto_tce, liobn;
+ u64 tce_size, num_tce, dma_offset, next, proto_tce, liobn;
+ __be64 *tcep;
u32 tce_shift;
u64 rc = 0;
long l, limit;
@@ -401,7 +402,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
tcep = __get_cpu_var(tce_page);
if (!tcep) {
- tcep = (u64 *)__get_free_page(GFP_ATOMIC);
+ tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
if (!tcep) {
local_irq_enable();
return -ENOMEM;
@@ -435,7 +436,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
dma_offset = next + be64_to_cpu(maprange->dma_base);
for (l = 0; l < limit; l++) {
- tcep[l] = proto_tce | next;
+ tcep[l] = cpu_to_be64(proto_tce | next);
next += tce_size;
}
@@ -485,9 +486,10 @@ static void iommu_table_setparms(struct pci_controller *phb,
memset((void *)tbl->it_base, 0, *sizep);
tbl->it_busno = phb->bus->number;
+ tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
/* Units of tce entries */
- tbl->it_offset = phb->dma_window_base_cur >> IOMMU_PAGE_SHIFT;
+ tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift;
/* Test if we are going over 2GB of DMA space */
if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
@@ -498,7 +500,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
phb->dma_window_base_cur += phb->dma_window_size;
/* Set the tce table size - measured in entries */
- tbl->it_size = phb->dma_window_size >> IOMMU_PAGE_SHIFT;
+ tbl->it_size = phb->dma_window_size >> tbl->it_page_shift;
tbl->it_index = 0;
tbl->it_blocksize = 16;
@@ -529,18 +531,19 @@ static void iommu_table_setparms(struct pci_controller *phb,
static void iommu_table_setparms_lpar(struct pci_controller *phb,
struct device_node *dn,
struct iommu_table *tbl,
- const void *dma_window)
+ const __be32 *dma_window)
{
unsigned long offset, size;
of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
tbl->it_busno = phb->bus->number;
+ tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
tbl->it_base = 0;
tbl->it_blocksize = 16;
tbl->it_type = TCE_PCI;
- tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
- tbl->it_size = size >> IOMMU_PAGE_SHIFT;
+ tbl->it_offset = offset >> tbl->it_page_shift;
+ tbl->it_size = size >> tbl->it_page_shift;
}
static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
@@ -613,6 +616,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
iommu_table_setparms(pci->phb, dn, tbl);
pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
+ iommu_register_group(tbl, pci_domain_nr(bus), 0);
/* Divide the rest (1.75GB) among the children */
pci->phb->dma_window_size = 0x80000000ul;
@@ -628,7 +632,7 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
struct iommu_table *tbl;
struct device_node *dn, *pdn;
struct pci_dn *ppci;
- const void *dma_window = NULL;
+ const __be32 *dma_window = NULL;
dn = pci_bus_to_OF_node(bus);
@@ -657,6 +661,7 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
ppci->phb->node);
iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
+ iommu_register_group(tbl, pci_domain_nr(bus), 0);
pr_debug(" created table: %p\n", ppci->iommu_table);
}
}
@@ -683,7 +688,9 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
phb->node);
iommu_table_setparms(phb, dn, tbl);
PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
- set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table);
+ iommu_register_group(tbl, pci_domain_nr(phb->bus), 0);
+ set_iommu_table_base_and_group(&dev->dev,
+ PCI_DN(dn)->iommu_table);
return;
}
@@ -695,7 +702,8 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
dn = dn->parent;
if (dn && PCI_DN(dn))
- set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table);
+ set_iommu_table_base_and_group(&dev->dev,
+ PCI_DN(dn)->iommu_table);
else
printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
pci_name(dev));
@@ -713,21 +721,6 @@ static int __init disable_ddw_setup(char *str)
early_param("disable_ddw", disable_ddw_setup);
-static inline void __remove_ddw(struct device_node *np, const u32 *ddw_avail, u64 liobn)
-{
- int ret;
-
- ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
- if (ret)
- pr_warning("%s: failed to remove DMA window: rtas returned "
- "%d to ibm,remove-pe-dma-window(%x) %llx\n",
- np->full_name, ret, ddw_avail[2], liobn);
- else
- pr_debug("%s: successfully removed DMA window: rtas returned "
- "%d to ibm,remove-pe-dma-window(%x) %llx\n",
- np->full_name, ret, ddw_avail[2], liobn);
-}
-
static void remove_ddw(struct device_node *np)
{
struct dynamic_dma_window_prop *dwp;
@@ -757,7 +750,15 @@ static void remove_ddw(struct device_node *np)
pr_debug("%s successfully cleared tces in window.\n",
np->full_name);
- __remove_ddw(np, ddw_avail, liobn);
+ ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
+ if (ret)
+ pr_warning("%s: failed to remove direct window: rtas returned "
+ "%d to ibm,remove-pe-dma-window(%x) %llx\n",
+ np->full_name, ret, ddw_avail[2], liobn);
+ else
+ pr_debug("%s: successfully removed direct window: rtas returned "
+ "%d to ibm,remove-pe-dma-window(%x) %llx\n",
+ np->full_name, ret, ddw_avail[2], liobn);
delprop:
ret = of_remove_property(np, win64);
@@ -777,7 +778,7 @@ static u64 find_existing_ddw(struct device_node *pdn)
list_for_each_entry(window, &direct_window_list, list) {
if (window->device == pdn) {
direct64 = window->prop;
- dma_addr = direct64->dma_base;
+ dma_addr = be64_to_cpu(direct64->dma_base);
break;
}
}
@@ -882,34 +883,12 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
return ret;
}
-static void restore_default_window(struct pci_dev *dev,
- u32 ddw_restore_token, unsigned long liobn)
-{
- struct eeh_dev *edev;
- u32 cfg_addr;
- u64 buid;
- int ret;
-
- /*
- * Get the config address and phb buid of the PE window.
- * Rely on eeh to retrieve this for us.
- * Retrieve them from the pci device, not the node with the
- * dma-window property
- */
- edev = pci_dev_to_eeh_dev(dev);
- cfg_addr = edev->config_addr;
- if (edev->pe_config_addr)
- cfg_addr = edev->pe_config_addr;
- buid = edev->phb->buid;
+struct failed_ddw_pdn {
+ struct device_node *pdn;
+ struct list_head list;
+};
- do {
- ret = rtas_call(ddw_restore_token, 3, 1, NULL, cfg_addr,
- BUID_HI(buid), BUID_LO(buid));
- } while (rtas_busy_delay(ret));
- dev_info(&dev->dev,
- "ibm,reset-pe-dma-windows(%x) %x %x %x returned %d\n",
- ddw_restore_token, cfg_addr, BUID_HI(buid), BUID_LO(buid), ret);
-}
+static LIST_HEAD(failed_ddw_pdn_list);
/*
* If the PE supports dynamic dma windows, and there is space for a table
@@ -931,13 +910,10 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
u64 dma_addr, max_addr;
struct device_node *dn;
const u32 *uninitialized_var(ddw_avail);
- const u32 *uninitialized_var(ddw_extensions);
- u32 ddw_restore_token = 0;
struct direct_window *window;
struct property *win64;
struct dynamic_dma_window_prop *ddwprop;
- const void *dma_window = NULL;
- unsigned long liobn, offset, size;
+ struct failed_ddw_pdn *fpdn;
mutex_lock(&direct_window_init_mutex);
@@ -946,6 +922,18 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
goto out_unlock;
/*
+ * If we already went through this for a previous function of
+ * the same device and failed, we don't want to muck with the
+ * DMA window again, as it will race with in-flight operations
+ * and can lead to EEHs. The above mutex protects access to the
+ * list.
+ */
+ list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) {
+ if (!strcmp(fpdn->pdn->full_name, pdn->full_name))
+ goto out_unlock;
+ }
+
+ /*
* the ibm,ddw-applicable property holds the tokens for:
* ibm,query-pe-dma-window
* ibm,create-pe-dma-window
@@ -955,42 +943,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
*/
ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
if (!ddw_avail || len < 3 * sizeof(u32))
- goto out_unlock;
-
- /*
- * the extensions property is only required to exist in certain
- * levels of firmware and later
- * the ibm,ddw-extensions property is a list with the first
- * element containing the number of extensions and each
- * subsequent entry is a value corresponding to that extension
- */
- ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions", &len);
- if (ddw_extensions) {
- /*
- * each new defined extension length should be added to
- * the top of the switch so the "earlier" entries also
- * get picked up
- */
- switch (ddw_extensions[0]) {
- /* ibm,reset-pe-dma-windows */
- case 1:
- ddw_restore_token = ddw_extensions[1];
- break;
- }
- }
+ goto out_failed;
- /*
- * Only remove the existing DMA window if we can restore back to
- * the default state. Removing the existing window maximizes the
- * resources available to firmware for dynamic window creation.
- */
- if (ddw_restore_token) {
- dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
- of_parse_dma_window(pdn, dma_window, &liobn, &offset, &size);
- __remove_ddw(pdn, ddw_avail, liobn);
- }
-
- /*
+ /*
* Query if there is a second window of size to map the
* whole partition. Query returns number of windows, largest
* block assigned to PE (partition endpoint), and two bitmasks
@@ -999,7 +954,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
dn = pci_device_to_OF_node(dev);
ret = query_ddw(dev, ddw_avail, &query);
if (ret != 0)
- goto out_restore_window;
+ goto out_failed;
if (query.windows_available == 0) {
/*
@@ -1008,34 +963,34 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
* trading in for a larger page size.
*/
dev_dbg(&dev->dev, "no free dynamic windows");
- goto out_restore_window;
+ goto out_failed;
}
- if (query.page_size & 4) {
+ if (be32_to_cpu(query.page_size) & 4) {
page_shift = 24; /* 16MB */
- } else if (query.page_size & 2) {
+ } else if (be32_to_cpu(query.page_size) & 2) {
page_shift = 16; /* 64kB */
- } else if (query.page_size & 1) {
+ } else if (be32_to_cpu(query.page_size) & 1) {
page_shift = 12; /* 4kB */
} else {
dev_dbg(&dev->dev, "no supported direct page size in mask %x",
query.page_size);
- goto out_restore_window;
+ goto out_failed;
}
/* verify the window * number of ptes will map the partition */
/* check largest block * page size > max memory hotplug addr */
max_addr = memory_hotplug_max();
- if (query.largest_available_block < (max_addr >> page_shift)) {
+ if (be32_to_cpu(query.largest_available_block) < (max_addr >> page_shift)) {
dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u "
"%llu-sized pages\n", max_addr, query.largest_available_block,
1ULL << page_shift);
- goto out_restore_window;
+ goto out_failed;
}
len = order_base_2(max_addr);
win64 = kzalloc(sizeof(struct property), GFP_KERNEL);
if (!win64) {
dev_info(&dev->dev,
"couldn't allocate property for 64bit dma window\n");
- goto out_restore_window;
+ goto out_failed;
}
win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
@@ -1050,7 +1005,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
if (ret != 0)
goto out_free_prop;
- ddwprop->liobn = cpu_to_be32(create.liobn);
+ ddwprop->liobn = create.liobn;
ddwprop->dma_base = cpu_to_be64(of_read_number(&create.addr_hi, 2));
ddwprop->tce_shift = cpu_to_be32(page_shift);
ddwprop->window_shift = cpu_to_be32(len);
@@ -1097,9 +1052,13 @@ out_free_prop:
kfree(win64->value);
kfree(win64);
-out_restore_window:
- if (ddw_restore_token)
- restore_default_window(dev, ddw_restore_token, liobn);
+out_failed:
+
+ fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
+ if (!fpdn)
+ goto out_unlock;
+ fpdn->pdn = pdn;
+ list_add(&fpdn->list, &failed_ddw_pdn_list);
out_unlock:
mutex_unlock(&direct_window_init_mutex);
@@ -1110,7 +1069,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
{
struct device_node *pdn, *dn;
struct iommu_table *tbl;
- const void *dma_window = NULL;
+ const __be32 *dma_window = NULL;
struct pci_dn *pci;
pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
@@ -1145,12 +1104,13 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
pci->phb->node);
iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
+ iommu_register_group(tbl, pci_domain_nr(pci->phb->bus), 0);
pr_debug(" created table: %p\n", pci->iommu_table);
} else {
pr_debug(" found DMA window, table: %p\n", pci->iommu_table);
}
- set_iommu_table_base(&dev->dev, pci->iommu_table);
+ set_iommu_table_base_and_group(&dev->dev, pci->iommu_table);
}
static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
@@ -1158,7 +1118,7 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
bool ddw_enabled = false;
struct device_node *pdn, *dn;
struct pci_dev *pdev;
- const void *dma_window = NULL;
+ const __be32 *dma_window = NULL;
u64 dma_offset;
if (!dev->dma_mask)
@@ -1295,6 +1255,7 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
switch (action) {
case OF_RECONFIG_DETACH_NODE:
+ remove_ddw(np);
if (pci && pci->iommu_table)
iommu_free_table(pci->iommu_table, np->full_name);
@@ -1307,16 +1268,6 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
}
}
spin_unlock(&direct_window_list_lock);
-
- /*
- * Because the notifier runs after isolation of the
- * slot, we are guaranteed any DMA window has already
- * been revoked and the TCEs have been marked invalid,
- * so we don't need a call to remove_ddw(np). However,
- * if an additional notifier action is added before the
- * isolate call, we should update this code for
- * completeness with such a call.
- */
break;
default:
err = NOTIFY_DONE;
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 7d94bdc63d5..13fa95b3aa8 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -17,9 +17,9 @@
#include <asm/mpic.h>
#include <asm/xics.h>
#include <asm/smp.h>
+#include <asm/plpar_wrappers.h>
#include "pseries.h"
-#include "plpar_wrappers.h"
static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
{
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 0da39fed355..b02af9ef3ff 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -41,10 +41,17 @@
#include <asm/smp.h>
#include <asm/trace.h>
#include <asm/firmware.h>
+#include <asm/plpar_wrappers.h>
-#include "plpar_wrappers.h"
#include "pseries.h"
+/* Flag bits for H_BULK_REMOVE */
+#define HBR_REQUEST 0x4000000000000000UL
+#define HBR_RESPONSE 0x8000000000000000UL
+#define HBR_END 0xc000000000000000UL
+#define HBR_AVPN 0x0200000000000000UL
+#define HBR_ANDCOND 0x0100000000000000UL
+
/* in hvCall.S */
EXPORT_SYMBOL(plpar_hcall);
@@ -61,9 +68,18 @@ void vpa_init(int cpu)
struct paca_struct *pp;
struct dtl_entry *dtl;
+ /*
+ * The spec says it "may be problematic" if CPU x registers the VPA of
+ * CPU y. We should never do that, but wail if we ever do.
+ */
+ WARN_ON(cpu != smp_processor_id());
+
if (cpu_has_feature(CPU_FTR_ALTIVEC))
lppaca_of(cpu).vmxregs_in_use = 1;
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ lppaca_of(cpu).ebb_regs_in_use = 1;
+
addr = __pa(&lppaca_of(cpu));
ret = register_vpa(hwcpu, addr);
@@ -76,7 +92,7 @@ void vpa_init(int cpu)
* PAPR says this feature is SLB-Buffer but firmware never
* reports that. All SPLPAR support SLB shadow buffer.
*/
- addr = __pa(&slb_shadow[cpu]);
+ addr = __pa(paca[cpu].slb_shadow_ptr);
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
ret = register_slb_shadow(hwcpu, addr);
if (ret)
@@ -96,7 +112,7 @@ void vpa_init(int cpu)
lppaca_of(cpu).dtl_idx = 0;
/* hypervisor reads buffer length from this field */
- dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
+ dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
ret = register_dtl(hwcpu, __pa(dtl));
if (ret)
pr_err("WARNING: DTL registration of cpu %d (hw %d) "
@@ -109,7 +125,7 @@ void vpa_init(int cpu)
static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long vpn, unsigned long pa,
unsigned long rflags, unsigned long vflags,
- int psize, int ssize)
+ int psize, int apsize, int ssize)
{
unsigned long lpar_rc;
unsigned long flags;
@@ -121,8 +137,8 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
"pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
hpte_group, vpn, pa, rflags, vflags, psize);
- hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize) | rflags;
+ hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
+ hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
if (!(vflags & HPTE_V_BOLTED))
pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
@@ -136,8 +152,9 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
flags = 0;
/* Make pHyp happy */
- if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU))
- hpte_r &= ~_PAGE_COHERENT;
+ if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU))
+ hpte_r &= ~HPTE_R_M;
+
if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
flags |= H_COALESCE_CAND;
@@ -155,7 +172,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
*/
if (unlikely(lpar_rc != H_SUCCESS)) {
if (!(vflags & HPTE_V_BOLTED))
- pr_devel(" lpar err %lu\n", lpar_rc);
+ pr_devel(" lpar err %ld\n", lpar_rc);
return -2;
}
if (!(vflags & HPTE_V_BOLTED))
@@ -186,7 +203,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
(0x1UL << 4), &dummy1, &dummy2);
if (lpar_rc == H_SUCCESS)
return i;
- BUG_ON(lpar_rc != H_NOT_FOUND);
+
+ /*
+ * The test for adjunct partition is performed before the
+ * ANDCOND test. H_RESOURCE may be returned, so we need to
+ * check for that as well.
+ */
+ BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
slot_offset++;
slot_offset &= 0x7;
@@ -223,6 +246,23 @@ static void pSeries_lpar_hptab_clear(void)
&(ptes[j].pteh), &(ptes[j].ptel));
}
}
+
+#ifdef __LITTLE_ENDIAN__
+ /* Reset exceptions to big endian */
+ if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
+ long rc;
+
+ rc = pseries_big_endian_exceptions();
+ /*
+ * At this point it is unlikely panic() will get anything
+ * out to the user, but at least this will stop us from
+ * continuing on further and creating an even more
+ * difficult to debug situation.
+ */
+ if (rc)
+ panic("Could not enable big endian exceptions");
+ }
+#endif
}
/*
@@ -234,7 +274,8 @@ static void pSeries_lpar_hptab_clear(void)
static long pSeries_lpar_hpte_updatepp(unsigned long slot,
unsigned long newpp,
unsigned long vpn,
- int psize, int ssize, int local)
+ int psize, int apsize,
+ int ssize, int local)
{
unsigned long lpar_rc;
unsigned long flags = (newpp & 7) | H_AVPN;
@@ -322,7 +363,8 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
}
static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
- int psize, int ssize, int local)
+ int psize, int apsize,
+ int ssize, int local)
{
unsigned long want_v;
unsigned long lpar_rc;
@@ -339,6 +381,113 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
BUG_ON(lpar_rc != H_SUCCESS);
}
+/*
+ * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
+ * to make sure that we avoid bouncing the hypervisor tlbie lock.
+ */
+#define PPC64_HUGE_HPTE_BATCH 12
+
+static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
+ unsigned long *vpn, int count,
+ int psize, int ssize)
+{
+ unsigned long param[8];
+ int i = 0, pix = 0, rc;
+ unsigned long flags = 0;
+ int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+
+ if (lock_tlbie)
+ spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
+
+ for (i = 0; i < count; i++) {
+
+ if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
+ pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
+ ssize, 0);
+ } else {
+ param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
+ param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
+ pix += 2;
+ if (pix == 8) {
+ rc = plpar_hcall9(H_BULK_REMOVE, param,
+ param[0], param[1], param[2],
+ param[3], param[4], param[5],
+ param[6], param[7]);
+ BUG_ON(rc != H_SUCCESS);
+ pix = 0;
+ }
+ }
+ }
+ if (pix) {
+ param[pix] = HBR_END;
+ rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
+ param[2], param[3], param[4], param[5],
+ param[6], param[7]);
+ BUG_ON(rc != H_SUCCESS);
+ }
+
+ if (lock_tlbie)
+ spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
+}
+
+static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
+ unsigned char *hpte_slot_array,
+ unsigned long addr, int psize)
+{
+ int ssize = 0, i, index = 0;
+ unsigned long s_addr = addr;
+ unsigned int max_hpte_count, valid;
+ unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
+ unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
+ unsigned long shift, hidx, vpn = 0, vsid, hash, slot;
+
+ shift = mmu_psize_defs[psize].shift;
+ max_hpte_count = 1U << (PMD_SHIFT - shift);
+
+ for (i = 0; i < max_hpte_count; i++) {
+ valid = hpte_valid(hpte_slot_array, i);
+ if (!valid)
+ continue;
+ hidx = hpte_hash_index(hpte_slot_array, i);
+
+ /* get the vpn */
+ addr = s_addr + (i * (1ul << shift));
+ if (!is_kernel_addr(addr)) {
+ ssize = user_segment_size(addr);
+ vsid = get_vsid(mm->context.id, addr, ssize);
+ WARN_ON(vsid == 0);
+ } else {
+ vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
+ ssize = mmu_kernel_ssize;
+ }
+
+ vpn = hpt_vpn(addr, vsid, ssize);
+ hash = hpt_hash(vpn, shift, ssize);
+ if (hidx & _PTEIDX_SECONDARY)
+ hash = ~hash;
+
+ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+ slot += hidx & _PTEIDX_GROUP_IX;
+
+ slot_array[index] = slot;
+ vpn_array[index] = vpn;
+ if (index == PPC64_HUGE_HPTE_BATCH - 1) {
+ /*
+ * Now do a bluk invalidate
+ */
+ __pSeries_lpar_hugepage_invalidate(slot_array,
+ vpn_array,
+ PPC64_HUGE_HPTE_BATCH,
+ psize, ssize);
+ index = 0;
+ } else
+ index++;
+ }
+ if (index)
+ __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
+ index, psize, ssize);
+}
+
static void pSeries_lpar_hpte_removebolted(unsigned long ea,
int psize, int ssize)
{
@@ -350,17 +499,12 @@ static void pSeries_lpar_hpte_removebolted(unsigned long ea,
slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
BUG_ON(slot == -1);
-
- pSeries_lpar_hpte_invalidate(slot, vpn, psize, ssize, 0);
+ /*
+ * lpar doesn't use the passed actual page size
+ */
+ pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
}
-/* Flag bits for H_BULK_REMOVE */
-#define HBR_REQUEST 0x4000000000000000UL
-#define HBR_RESPONSE 0x8000000000000000UL
-#define HBR_END 0xc000000000000000UL
-#define HBR_AVPN 0x0200000000000000UL
-#define HBR_ANDCOND 0x0100000000000000UL
-
/*
* Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
* lock.
@@ -394,8 +538,11 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
+ /*
+ * lpar doesn't use the passed actual page size
+ */
pSeries_lpar_hpte_invalidate(slot, vpn, psize,
- ssize, local);
+ 0, ssize, local);
} else {
param[pix] = HBR_REQUEST | HBR_AVPN | slot;
param[pix+1] = hpte_encode_avpn(vpn, psize,
@@ -446,6 +593,7 @@ void __init hpte_init_lpar(void)
ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted;
ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range;
ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear;
+ ppc_md.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
}
#ifdef CONFIG_PPC_SMLPAR
@@ -600,7 +748,7 @@ int h_get_mpp(struct hvcall_mpp_data *mpp_data)
mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
- mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff;
+ mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
mpp_data->pool_size = retbuf[4];
mpp_data->loan_request = retbuf[5];
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
new file mode 100644
index 00000000000..c9fecf09b8f
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -0,0 +1,710 @@
+/*
+ * PowerPC64 LPAR Configuration Information Driver
+ *
+ * Dave Engebretsen engebret@us.ibm.com
+ * Copyright (c) 2003 Dave Engebretsen
+ * Will Schmidt willschm@us.ibm.com
+ * SPLPAR updates, Copyright (c) 2003 Will Schmidt IBM Corporation.
+ * seq_file updates, Copyright (c) 2004 Will Schmidt IBM Corporation.
+ * Nathan Lynch nathanl@austin.ibm.com
+ * Added lparcfg_write, Copyright (C) 2004 Nathan Lynch IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This driver creates a proc file at /proc/ppc64/lparcfg which contains
+ * keyword - value pairs that specify the configuration of the partition.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include <asm/lppaca.h>
+#include <asm/hvcall.h>
+#include <asm/firmware.h>
+#include <asm/rtas.h>
+#include <asm/time.h>
+#include <asm/prom.h>
+#include <asm/vdso_datapage.h>
+#include <asm/vio.h>
+#include <asm/mmu.h>
+#include <asm/machdep.h>
+
+
+/*
+ * This isn't a module but we expose that to userspace
+ * via /proc so leave the definitions here
+ */
+#define MODULE_VERS "1.9"
+#define MODULE_NAME "lparcfg"
+
+/* #define LPARCFG_DEBUG */
+
+/*
+ * Track sum of all purrs across all processors. This is used to further
+ * calculate usage values by different applications
+ */
+static unsigned long get_purr(void)
+{
+ unsigned long sum_purr = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct cpu_usage *cu;
+
+ cu = &per_cpu(cpu_usage_array, cpu);
+ sum_purr += cu->current_tb;
+ }
+ return sum_purr;
+}
+
+/*
+ * Methods used to fetch LPAR data when running on a pSeries platform.
+ */
+
+struct hvcall_ppp_data {
+ u64 entitlement;
+ u64 unallocated_entitlement;
+ u16 group_num;
+ u16 pool_num;
+ u8 capped;
+ u8 weight;
+ u8 unallocated_weight;
+ u16 active_procs_in_pool;
+ u16 active_system_procs;
+ u16 phys_platform_procs;
+ u32 max_proc_cap_avail;
+ u32 entitled_proc_cap_avail;
+};
+
+/*
+ * H_GET_PPP hcall returns info in 4 parms.
+ * entitled_capacity,unallocated_capacity,
+ * aggregation, resource_capability).
+ *
+ * R4 = Entitled Processor Capacity Percentage.
+ * R5 = Unallocated Processor Capacity Percentage.
+ * R6 (AABBCCDDEEFFGGHH).
+ * XXXX - reserved (0)
+ * XXXX - reserved (0)
+ * XXXX - Group Number
+ * XXXX - Pool Number.
+ * R7 (IIJJKKLLMMNNOOPP).
+ * XX - reserved. (0)
+ * XX - bit 0-6 reserved (0). bit 7 is Capped indicator.
+ * XX - variable processor Capacity Weight
+ * XX - Unallocated Variable Processor Capacity Weight.
+ * XXXX - Active processors in Physical Processor Pool.
+ * XXXX - Processors active on platform.
+ * R8 (QQQQRRRRRRSSSSSS). if ibm,partition-performance-parameters-level >= 1
+ * XXXX - Physical platform procs allocated to virtualization.
+ * XXXXXX - Max procs capacity % available to the partitions pool.
+ * XXXXXX - Entitled procs capacity % available to the
+ * partitions pool.
+ */
+static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
+{
+ unsigned long rc;
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+ rc = plpar_hcall9(H_GET_PPP, retbuf);
+
+ ppp_data->entitlement = retbuf[0];
+ ppp_data->unallocated_entitlement = retbuf[1];
+
+ ppp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
+ ppp_data->pool_num = retbuf[2] & 0xffff;
+
+ ppp_data->capped = (retbuf[3] >> 6 * 8) & 0x01;
+ ppp_data->weight = (retbuf[3] >> 5 * 8) & 0xff;
+ ppp_data->unallocated_weight = (retbuf[3] >> 4 * 8) & 0xff;
+ ppp_data->active_procs_in_pool = (retbuf[3] >> 2 * 8) & 0xffff;
+ ppp_data->active_system_procs = retbuf[3] & 0xffff;
+
+ ppp_data->phys_platform_procs = retbuf[4] >> 6 * 8;
+ ppp_data->max_proc_cap_avail = (retbuf[4] >> 3 * 8) & 0xffffff;
+ ppp_data->entitled_proc_cap_avail = retbuf[4] & 0xffffff;
+
+ return rc;
+}
+
+static unsigned h_pic(unsigned long *pool_idle_time,
+ unsigned long *num_procs)
+{
+ unsigned long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall(H_PIC, retbuf);
+
+ *pool_idle_time = retbuf[0];
+ *num_procs = retbuf[1];
+
+ return rc;
+}
+
+/*
+ * parse_ppp_data
+ * Parse out the data returned from h_get_ppp and h_pic
+ */
+static void parse_ppp_data(struct seq_file *m)
+{
+ struct hvcall_ppp_data ppp_data;
+ struct device_node *root;
+ const __be32 *perf_level;
+ int rc;
+
+ rc = h_get_ppp(&ppp_data);
+ if (rc)
+ return;
+
+ seq_printf(m, "partition_entitled_capacity=%lld\n",
+ ppp_data.entitlement);
+ seq_printf(m, "group=%d\n", ppp_data.group_num);
+ seq_printf(m, "system_active_processors=%d\n",
+ ppp_data.active_system_procs);
+
+ /* pool related entries are appropriate for shared configs */
+ if (lppaca_shared_proc(get_lppaca())) {
+ unsigned long pool_idle_time, pool_procs;
+
+ seq_printf(m, "pool=%d\n", ppp_data.pool_num);
+
+ /* report pool_capacity in percentage */
+ seq_printf(m, "pool_capacity=%d\n",
+ ppp_data.active_procs_in_pool * 100);
+
+ h_pic(&pool_idle_time, &pool_procs);
+ seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time);
+ seq_printf(m, "pool_num_procs=%ld\n", pool_procs);
+ }
+
+ seq_printf(m, "unallocated_capacity_weight=%d\n",
+ ppp_data.unallocated_weight);
+ seq_printf(m, "capacity_weight=%d\n", ppp_data.weight);
+ seq_printf(m, "capped=%d\n", ppp_data.capped);
+ seq_printf(m, "unallocated_capacity=%lld\n",
+ ppp_data.unallocated_entitlement);
+
+ /* The last bits of information returned from h_get_ppp are only
+ * valid if the ibm,partition-performance-parameters-level
+ * property is >= 1.
+ */
+ root = of_find_node_by_path("/");
+ if (root) {
+ perf_level = of_get_property(root,
+ "ibm,partition-performance-parameters-level",
+ NULL);
+ if (perf_level && (be32_to_cpup(perf_level) >= 1)) {
+ seq_printf(m,
+ "physical_procs_allocated_to_virtualization=%d\n",
+ ppp_data.phys_platform_procs);
+ seq_printf(m, "max_proc_capacity_available=%d\n",
+ ppp_data.max_proc_cap_avail);
+ seq_printf(m, "entitled_proc_capacity_available=%d\n",
+ ppp_data.entitled_proc_cap_avail);
+ }
+
+ of_node_put(root);
+ }
+}
+
+/**
+ * parse_mpp_data
+ * Parse out data returned from h_get_mpp
+ */
+static void parse_mpp_data(struct seq_file *m)
+{
+ struct hvcall_mpp_data mpp_data;
+ int rc;
+
+ rc = h_get_mpp(&mpp_data);
+ if (rc)
+ return;
+
+ seq_printf(m, "entitled_memory=%ld\n", mpp_data.entitled_mem);
+
+ if (mpp_data.mapped_mem != -1)
+ seq_printf(m, "mapped_entitled_memory=%ld\n",
+ mpp_data.mapped_mem);
+
+ seq_printf(m, "entitled_memory_group_number=%d\n", mpp_data.group_num);
+ seq_printf(m, "entitled_memory_pool_number=%d\n", mpp_data.pool_num);
+
+ seq_printf(m, "entitled_memory_weight=%d\n", mpp_data.mem_weight);
+ seq_printf(m, "unallocated_entitled_memory_weight=%d\n",
+ mpp_data.unallocated_mem_weight);
+ seq_printf(m, "unallocated_io_mapping_entitlement=%ld\n",
+ mpp_data.unallocated_entitlement);
+
+ if (mpp_data.pool_size != -1)
+ seq_printf(m, "entitled_memory_pool_size=%ld bytes\n",
+ mpp_data.pool_size);
+
+ seq_printf(m, "entitled_memory_loan_request=%ld\n",
+ mpp_data.loan_request);
+
+ seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem);
+}
+
+/**
+ * parse_mpp_x_data
+ * Parse out data returned from h_get_mpp_x
+ */
+static void parse_mpp_x_data(struct seq_file *m)
+{
+ struct hvcall_mpp_x_data mpp_x_data;
+
+ if (!firmware_has_feature(FW_FEATURE_XCMO))
+ return;
+ if (h_get_mpp_x(&mpp_x_data))
+ return;
+
+ seq_printf(m, "coalesced_bytes=%ld\n", mpp_x_data.coalesced_bytes);
+
+ if (mpp_x_data.pool_coalesced_bytes)
+ seq_printf(m, "pool_coalesced_bytes=%ld\n",
+ mpp_x_data.pool_coalesced_bytes);
+ if (mpp_x_data.pool_purr_cycles)
+ seq_printf(m, "coalesce_pool_purr=%ld\n", mpp_x_data.pool_purr_cycles);
+ if (mpp_x_data.pool_spurr_cycles)
+ seq_printf(m, "coalesce_pool_spurr=%ld\n", mpp_x_data.pool_spurr_cycles);
+}
+
+#define SPLPAR_CHARACTERISTICS_TOKEN 20
+#define SPLPAR_MAXLENGTH 1026*(sizeof(char))
+
+/*
+ * parse_system_parameter_string()
+ * Retrieve the potential_processors, max_entitled_capacity and friends
+ * through the get-system-parameter rtas call. Replace keyword strings as
+ * necessary.
+ */
+static void parse_system_parameter_string(struct seq_file *m)
+{
+ int call_status;
+
+ unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
+ if (!local_buffer) {
+ printk(KERN_ERR "%s %s kmalloc failure at line %d\n",
+ __FILE__, __func__, __LINE__);
+ return;
+ }
+
+ spin_lock(&rtas_data_buf_lock);
+ memset(rtas_data_buf, 0, SPLPAR_MAXLENGTH);
+ call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
+ NULL,
+ SPLPAR_CHARACTERISTICS_TOKEN,
+ __pa(rtas_data_buf),
+ RTAS_DATA_BUF_SIZE);
+ memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
+ local_buffer[SPLPAR_MAXLENGTH - 1] = '\0';
+ spin_unlock(&rtas_data_buf_lock);
+
+ if (call_status != 0) {
+ printk(KERN_INFO
+ "%s %s Error calling get-system-parameter (0x%x)\n",
+ __FILE__, __func__, call_status);
+ } else {
+ int splpar_strlen;
+ int idx, w_idx;
+ char *workbuffer = kzalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
+ if (!workbuffer) {
+ printk(KERN_ERR "%s %s kmalloc failure at line %d\n",
+ __FILE__, __func__, __LINE__);
+ kfree(local_buffer);
+ return;
+ }
+#ifdef LPARCFG_DEBUG
+ printk(KERN_INFO "success calling get-system-parameter\n");
+#endif
+ splpar_strlen = local_buffer[0] * 256 + local_buffer[1];
+ local_buffer += 2; /* step over strlen value */
+
+ w_idx = 0;
+ idx = 0;
+ while ((*local_buffer) && (idx < splpar_strlen)) {
+ workbuffer[w_idx++] = local_buffer[idx++];
+ if ((local_buffer[idx] == ',')
+ || (local_buffer[idx] == '\0')) {
+ workbuffer[w_idx] = '\0';
+ if (w_idx) {
+ /* avoid the empty string */
+ seq_printf(m, "%s\n", workbuffer);
+ }
+ memset(workbuffer, 0, SPLPAR_MAXLENGTH);
+ idx++; /* skip the comma */
+ w_idx = 0;
+ } else if (local_buffer[idx] == '=') {
+ /* code here to replace workbuffer contents
+ with different keyword strings */
+ if (0 == strcmp(workbuffer, "MaxEntCap")) {
+ strcpy(workbuffer,
+ "partition_max_entitled_capacity");
+ w_idx = strlen(workbuffer);
+ }
+ if (0 == strcmp(workbuffer, "MaxPlatProcs")) {
+ strcpy(workbuffer,
+ "system_potential_processors");
+ w_idx = strlen(workbuffer);
+ }
+ }
+ }
+ kfree(workbuffer);
+ local_buffer -= 2; /* back up over strlen value */
+ }
+ kfree(local_buffer);
+}
+
+/* Return the number of processors in the system.
+ * This function reads through the device tree and counts
+ * the virtual processors, this does not include threads.
+ */
+static int lparcfg_count_active_processors(void)
+{
+ struct device_node *cpus_dn = NULL;
+ int count = 0;
+
+ while ((cpus_dn = of_find_node_by_type(cpus_dn, "cpu"))) {
+#ifdef LPARCFG_DEBUG
+ printk(KERN_ERR "cpus_dn %p\n", cpus_dn);
+#endif
+ count++;
+ }
+ return count;
+}
+
+static void pseries_cmo_data(struct seq_file *m)
+{
+ int cpu;
+ unsigned long cmo_faults = 0;
+ unsigned long cmo_fault_time = 0;
+
+ seq_printf(m, "cmo_enabled=%d\n", firmware_has_feature(FW_FEATURE_CMO));
+
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ return;
+
+ for_each_possible_cpu(cpu) {
+ cmo_faults += be64_to_cpu(lppaca_of(cpu).cmo_faults);
+ cmo_fault_time += be64_to_cpu(lppaca_of(cpu).cmo_fault_time);
+ }
+
+ seq_printf(m, "cmo_faults=%lu\n", cmo_faults);
+ seq_printf(m, "cmo_fault_time_usec=%lu\n",
+ cmo_fault_time / tb_ticks_per_usec);
+ seq_printf(m, "cmo_primary_psp=%d\n", cmo_get_primary_psp());
+ seq_printf(m, "cmo_secondary_psp=%d\n", cmo_get_secondary_psp());
+ seq_printf(m, "cmo_page_size=%lu\n", cmo_get_page_size());
+}
+
+static void splpar_dispatch_data(struct seq_file *m)
+{
+ int cpu;
+ unsigned long dispatches = 0;
+ unsigned long dispatch_dispersions = 0;
+
+ for_each_possible_cpu(cpu) {
+ dispatches += be32_to_cpu(lppaca_of(cpu).yield_count);
+ dispatch_dispersions +=
+ be32_to_cpu(lppaca_of(cpu).dispersion_count);
+ }
+
+ seq_printf(m, "dispatches=%lu\n", dispatches);
+ seq_printf(m, "dispatch_dispersions=%lu\n", dispatch_dispersions);
+}
+
+static void parse_em_data(struct seq_file *m)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ if (firmware_has_feature(FW_FEATURE_LPAR) &&
+ plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
+ seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
+}
+
+static int pseries_lparcfg_data(struct seq_file *m, void *v)
+{
+ int partition_potential_processors;
+ int partition_active_processors;
+ struct device_node *rtas_node;
+ const __be32 *lrdrp = NULL;
+
+ rtas_node = of_find_node_by_path("/rtas");
+ if (rtas_node)
+ lrdrp = of_get_property(rtas_node, "ibm,lrdr-capacity", NULL);
+
+ if (lrdrp == NULL) {
+ partition_potential_processors = vdso_data->processorCount;
+ } else {
+ partition_potential_processors = be32_to_cpup(lrdrp + 4);
+ }
+ of_node_put(rtas_node);
+
+ partition_active_processors = lparcfg_count_active_processors();
+
+ if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+ /* this call handles the ibm,get-system-parameter contents */
+ parse_system_parameter_string(m);
+ parse_ppp_data(m);
+ parse_mpp_data(m);
+ parse_mpp_x_data(m);
+ pseries_cmo_data(m);
+ splpar_dispatch_data(m);
+
+ seq_printf(m, "purr=%ld\n", get_purr());
+ } else { /* non SPLPAR case */
+
+ seq_printf(m, "system_active_processors=%d\n",
+ partition_potential_processors);
+
+ seq_printf(m, "system_potential_processors=%d\n",
+ partition_potential_processors);
+
+ seq_printf(m, "partition_max_entitled_capacity=%d\n",
+ partition_potential_processors * 100);
+
+ seq_printf(m, "partition_entitled_capacity=%d\n",
+ partition_active_processors * 100);
+ }
+
+ seq_printf(m, "partition_active_processors=%d\n",
+ partition_active_processors);
+
+ seq_printf(m, "partition_potential_processors=%d\n",
+ partition_potential_processors);
+
+ seq_printf(m, "shared_processor_mode=%d\n",
+ lppaca_shared_proc(get_lppaca()));
+
+ seq_printf(m, "slb_size=%d\n", mmu_slb_size);
+
+ parse_em_data(m);
+
+ return 0;
+}
+
+static ssize_t update_ppp(u64 *entitlement, u8 *weight)
+{
+ struct hvcall_ppp_data ppp_data;
+ u8 new_weight;
+ u64 new_entitled;
+ ssize_t retval;
+
+ /* Get our current parameters */
+ retval = h_get_ppp(&ppp_data);
+ if (retval)
+ return retval;
+
+ if (entitlement) {
+ new_weight = ppp_data.weight;
+ new_entitled = *entitlement;
+ } else if (weight) {
+ new_weight = *weight;
+ new_entitled = ppp_data.entitlement;
+ } else
+ return -EINVAL;
+
+ pr_debug("%s: current_entitled = %llu, current_weight = %u\n",
+ __func__, ppp_data.entitlement, ppp_data.weight);
+
+ pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
+ __func__, new_entitled, new_weight);
+
+ retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight);
+ return retval;
+}
+
+/**
+ * update_mpp
+ *
+ * Update the memory entitlement and weight for the partition. Caller must
+ * specify either a new entitlement or weight, not both, to be updated
+ * since the h_set_mpp call takes both entitlement and weight as parameters.
+ */
+static ssize_t update_mpp(u64 *entitlement, u8 *weight)
+{
+ struct hvcall_mpp_data mpp_data;
+ u64 new_entitled;
+ u8 new_weight;
+ ssize_t rc;
+
+ if (entitlement) {
+ /* Check with vio to ensure the new memory entitlement
+ * can be handled.
+ */
+ rc = vio_cmo_entitlement_update(*entitlement);
+ if (rc)
+ return rc;
+ }
+
+ rc = h_get_mpp(&mpp_data);
+ if (rc)
+ return rc;
+
+ if (entitlement) {
+ new_weight = mpp_data.mem_weight;
+ new_entitled = *entitlement;
+ } else if (weight) {
+ new_weight = *weight;
+ new_entitled = mpp_data.entitled_mem;
+ } else
+ return -EINVAL;
+
+ pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
+ __func__, mpp_data.entitled_mem, mpp_data.mem_weight);
+
+ pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
+ __func__, new_entitled, new_weight);
+
+ rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight);
+ return rc;
+}
+
+/*
+ * Interface for changing system parameters (variable capacity weight
+ * and entitled capacity). Format of input is "param_name=value";
+ * anything after value is ignored. Valid parameters at this time are
+ * "partition_entitled_capacity" and "capacity_weight". We use
+ * H_SET_PPP to alter parameters.
+ *
+ * This function should be invoked only on systems with
+ * FW_FEATURE_SPLPAR.
+ */
+static ssize_t lparcfg_write(struct file *file, const char __user * buf,
+ size_t count, loff_t * off)
+{
+ int kbuf_sz = 64;
+ char kbuf[kbuf_sz];
+ char *tmp;
+ u64 new_entitled, *new_entitled_ptr = &new_entitled;
+ u8 new_weight, *new_weight_ptr = &new_weight;
+ ssize_t retval;
+
+ if (!firmware_has_feature(FW_FEATURE_SPLPAR))
+ return -EINVAL;
+
+ if (count > kbuf_sz)
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EFAULT;
+
+ kbuf[count - 1] = '\0';
+ tmp = strchr(kbuf, '=');
+ if (!tmp)
+ return -EINVAL;
+
+ *tmp++ = '\0';
+
+ if (!strcmp(kbuf, "partition_entitled_capacity")) {
+ char *endp;
+ *new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
+ if (endp == tmp)
+ return -EINVAL;
+
+ retval = update_ppp(new_entitled_ptr, NULL);
+ } else if (!strcmp(kbuf, "capacity_weight")) {
+ char *endp;
+ *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
+ if (endp == tmp)
+ return -EINVAL;
+
+ retval = update_ppp(NULL, new_weight_ptr);
+ } else if (!strcmp(kbuf, "entitled_memory")) {
+ char *endp;
+ *new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
+ if (endp == tmp)
+ return -EINVAL;
+
+ retval = update_mpp(new_entitled_ptr, NULL);
+ } else if (!strcmp(kbuf, "entitled_memory_weight")) {
+ char *endp;
+ *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
+ if (endp == tmp)
+ return -EINVAL;
+
+ retval = update_mpp(NULL, new_weight_ptr);
+ } else
+ return -EINVAL;
+
+ if (retval == H_SUCCESS || retval == H_CONSTRAINED) {
+ retval = count;
+ } else if (retval == H_BUSY) {
+ retval = -EBUSY;
+ } else if (retval == H_HARDWARE) {
+ retval = -EIO;
+ } else if (retval == H_PARAMETER) {
+ retval = -EINVAL;
+ }
+
+ return retval;
+}
+
+static int lparcfg_data(struct seq_file *m, void *v)
+{
+ struct device_node *rootdn;
+ const char *model = "";
+ const char *system_id = "";
+ const char *tmp;
+ const __be32 *lp_index_ptr;
+ unsigned int lp_index = 0;
+
+ seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS);
+
+ rootdn = of_find_node_by_path("/");
+ if (rootdn) {
+ tmp = of_get_property(rootdn, "model", NULL);
+ if (tmp)
+ model = tmp;
+ tmp = of_get_property(rootdn, "system-id", NULL);
+ if (tmp)
+ system_id = tmp;
+ lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
+ NULL);
+ if (lp_index_ptr)
+ lp_index = be32_to_cpup(lp_index_ptr);
+ of_node_put(rootdn);
+ }
+ seq_printf(m, "serial_number=%s\n", system_id);
+ seq_printf(m, "system_type=%s\n", model);
+ seq_printf(m, "partition_id=%d\n", (int)lp_index);
+
+ return pseries_lparcfg_data(m, v);
+}
+
+static int lparcfg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, lparcfg_data, NULL);
+}
+
+static const struct file_operations lparcfg_fops = {
+ .read = seq_read,
+ .write = lparcfg_write,
+ .open = lparcfg_open,
+ .release = single_release,
+ .llseek = seq_lseek,
+};
+
+static int __init lparcfg_init(void)
+{
+ umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
+
+ /* Allow writing if we have FW_FEATURE_SPLPAR */
+ if (firmware_has_feature(FW_FEATURE_SPLPAR))
+ mode |= S_IWUSR;
+
+ if (!proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops)) {
+ printk(KERN_ERR "Failed to create powerpc/lparcfg\n");
+ return -EIO;
+ }
+ return 0;
+}
+machine_device_initcall(pseries, lparcfg_init);
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 6573808cc5f..bde7ebad394 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -28,7 +28,7 @@ struct update_props_workarea {
u32 state;
u64 reserved;
u32 nprops;
-};
+} __packed;
#define NODE_ACTION_MASK 0xff000000
#define NODE_COUNT_MASK 0x00ffffff
@@ -37,14 +37,16 @@ struct update_props_workarea {
#define UPDATE_DT_NODE 0x02000000
#define ADD_DT_NODE 0x03000000
-static int mobility_rtas_call(int token, char *buf)
+#define MIGRATION_SCOPE (1)
+
+static int mobility_rtas_call(int token, char *buf, s32 scope)
{
int rc;
spin_lock(&rtas_data_buf_lock);
memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
- rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, 1);
+ rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
spin_unlock(&rtas_data_buf_lock);
@@ -60,6 +62,7 @@ static int delete_dt_node(u32 phandle)
return -ENOENT;
dlpar_detach_node(dn);
+ of_node_put(dn);
return 0;
}
@@ -117,21 +120,22 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
if (!more) {
of_update_property(dn, new_prop);
- new_prop = NULL;
+ *prop = NULL;
}
return 0;
}
-static int update_dt_node(u32 phandle)
+static int update_dt_node(u32 phandle, s32 scope)
{
struct update_props_workarea *upwa;
struct device_node *dn;
struct property *prop = NULL;
- int i, rc;
+ int i, rc, rtas_rc;
char *prop_data;
char *rtas_buf;
int update_properties_token;
+ u32 vd;
update_properties_token = rtas_token("ibm,update-properties");
if (update_properties_token == RTAS_UNKNOWN_SERVICE)
@@ -151,19 +155,32 @@ static int update_dt_node(u32 phandle)
upwa->phandle = phandle;
do {
- rc = mobility_rtas_call(update_properties_token, rtas_buf);
- if (rc < 0)
+ rtas_rc = mobility_rtas_call(update_properties_token, rtas_buf,
+ scope);
+ if (rtas_rc < 0)
break;
prop_data = rtas_buf + sizeof(*upwa);
+ /* On the first call to ibm,update-properties for a node the
+ * the first property value descriptor contains an empty
+ * property name, the property value length encoded as u32,
+ * and the property value is the node path being updated.
+ */
+ if (*prop_data == 0) {
+ prop_data++;
+ vd = *(u32 *)prop_data;
+ prop_data += vd + sizeof(vd);
+ upwa->nprops--;
+ }
+
for (i = 0; i < upwa->nprops; i++) {
char *prop_name;
- u32 vd;
- prop_name = prop_data + 1;
+ prop_name = prop_data;
prop_data += strlen(prop_name) + 1;
- vd = *prop_data++;
+ vd = *(u32 *)prop_data;
+ prop_data += sizeof(vd);
switch (vd) {
case 0x00000000:
@@ -187,7 +204,7 @@ static int update_dt_node(u32 phandle)
prop_data += vd;
}
}
- } while (rc == 1);
+ } while (rtas_rc == 1);
of_node_put(dn);
kfree(rtas_buf);
@@ -200,17 +217,14 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
struct device_node *parent_dn;
int rc;
- dn = dlpar_configure_connector(drc_index);
- if (!dn)
+ parent_dn = of_find_node_by_phandle(parent_phandle);
+ if (!parent_dn)
return -ENOENT;
- parent_dn = of_find_node_by_phandle(parent_phandle);
- if (!parent_dn) {
- dlpar_free_cc_nodes(dn);
+ dn = dlpar_configure_connector(drc_index, parent_dn);
+ if (!dn)
return -ENOENT;
- }
- dn->parent = parent_dn;
rc = dlpar_attach_node(dn);
if (rc)
dlpar_free_cc_nodes(dn);
@@ -219,7 +233,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
return rc;
}
-static int pseries_devicetree_update(void)
+int pseries_devicetree_update(s32 scope)
{
char *rtas_buf;
u32 *data;
@@ -235,7 +249,7 @@ static int pseries_devicetree_update(void)
return -ENOMEM;
do {
- rc = mobility_rtas_call(update_nodes_token, rtas_buf);
+ rc = mobility_rtas_call(update_nodes_token, rtas_buf, scope);
if (rc && rc != 1)
break;
@@ -256,7 +270,7 @@ static int pseries_devicetree_update(void)
delete_dt_node(phandle);
break;
case UPDATE_DT_NODE:
- update_dt_node(phandle);
+ update_dt_node(phandle, scope);
break;
case ADD_DT_NODE:
drc_index = *data++;
@@ -276,13 +290,6 @@ void post_mobility_fixup(void)
int rc;
int activate_fw_token;
- rc = pseries_devicetree_update();
- if (rc) {
- printk(KERN_ERR "Initial post-mobility device tree update "
- "failed: %d\n", rc);
- return;
- }
-
activate_fw_token = rtas_token("ibm,activate-firmware");
if (activate_fw_token == RTAS_UNKNOWN_SERVICE) {
printk(KERN_ERR "Could not make post-mobility "
@@ -290,16 +297,17 @@ void post_mobility_fixup(void)
return;
}
- rc = rtas_call(activate_fw_token, 0, 1, NULL);
- if (!rc) {
- rc = pseries_devicetree_update();
- if (rc)
- printk(KERN_ERR "Secondary post-mobility device tree "
- "update failed: %d\n", rc);
- } else {
+ do {
+ rc = rtas_call(activate_fw_token, 0, 1, NULL);
+ } while (rtas_busy_delay(rc));
+
+ if (rc)
printk(KERN_ERR "Post-mobility activate-fw failed: %d\n", rc);
- return;
- }
+
+ rc = pseries_devicetree_update(MIGRATION_SCOPE);
+ if (rc)
+ printk(KERN_ERR "Post-mobility device tree update "
+ "failed: %d\n", rc);
return;
}
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index e5b08472313..0c882e83c4c 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -24,26 +24,7 @@ static int query_token, change_token;
#define RTAS_RESET_FN 2
#define RTAS_CHANGE_MSI_FN 3
#define RTAS_CHANGE_MSIX_FN 4
-
-static struct pci_dn *get_pdn(struct pci_dev *pdev)
-{
- struct device_node *dn;
- struct pci_dn *pdn;
-
- dn = pci_device_to_OF_node(pdev);
- if (!dn) {
- dev_dbg(&pdev->dev, "rtas_msi: No OF device node\n");
- return NULL;
- }
-
- pdn = PCI_DN(dn);
- if (!pdn) {
- dev_dbg(&pdev->dev, "rtas_msi: No PCI DN\n");
- return NULL;
- }
-
- return pdn;
-}
+#define RTAS_CHANGE_32MSI_FN 5
/* RTAS Helpers */
@@ -58,7 +39,8 @@ static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs)
seq_num = 1;
do {
- if (func == RTAS_CHANGE_MSI_FN || func == RTAS_CHANGE_MSIX_FN)
+ if (func == RTAS_CHANGE_MSI_FN || func == RTAS_CHANGE_MSIX_FN ||
+ func == RTAS_CHANGE_32MSI_FN)
rc = rtas_call(change_token, 6, 4, rtas_ret, addr,
BUID_HI(buid), BUID_LO(buid),
func, num_irqs, seq_num);
@@ -89,7 +71,7 @@ static void rtas_disable_msi(struct pci_dev *pdev)
{
struct pci_dn *pdn;
- pdn = get_pdn(pdev);
+ pdn = pci_get_pdn(pdev);
if (!pdn)
return;
@@ -148,27 +130,29 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
{
struct device_node *dn;
struct pci_dn *pdn;
- const u32 *req_msi;
+ const __be32 *p;
+ u32 req_msi;
- pdn = get_pdn(pdev);
+ pdn = pci_get_pdn(pdev);
if (!pdn)
return -ENODEV;
dn = pdn->node;
- req_msi = of_get_property(dn, prop_name, NULL);
- if (!req_msi) {
+ p = of_get_property(dn, prop_name, NULL);
+ if (!p) {
pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name);
return -ENOENT;
}
- if (*req_msi < nvec) {
+ req_msi = be32_to_cpup(p);
+ if (req_msi < nvec) {
pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec);
- if (*req_msi == 0) /* Be paranoid */
+ if (req_msi == 0) /* Be paranoid */
return -ENOSPC;
- return *req_msi;
+ return req_msi;
}
return 0;
@@ -189,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec)
static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
{
struct device_node *dn;
- const u32 *p;
+ const __be32 *p;
dn = of_node_get(pci_device_to_OF_node(dev));
while (dn) {
@@ -197,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
if (p) {
pr_debug("rtas_msi: found prop on dn %s\n",
dn->full_name);
- *total = *p;
+ *total = be32_to_cpup(p);
return dn;
}
@@ -250,13 +234,13 @@ struct msi_counts {
static void *count_non_bridge_devices(struct device_node *dn, void *data)
{
struct msi_counts *counts = data;
- const u32 *p;
+ const __be32 *p;
u32 class;
pr_debug("rtas_msi: counting %s\n", dn->full_name);
p = of_get_property(dn, "class-code", NULL);
- class = p ? *p : 0;
+ class = p ? be32_to_cpup(p) : 0;
if ((class >> 8) != PCI_CLASS_BRIDGE_PCI)
counts->num_devices++;
@@ -267,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data)
static void *count_spare_msis(struct device_node *dn, void *data)
{
struct msi_counts *counts = data;
- const u32 *p;
+ const __be32 *p;
int req;
if (dn == counts->requestor)
@@ -278,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data)
req = 0;
p = of_get_property(dn, "ibm,req#msi", NULL);
if (p)
- req = *p;
+ req = be32_to_cpup(p);
p = of_get_property(dn, "ibm,req#msi-x", NULL);
if (p)
- req = max(req, (int)*p);
+ req = max(req, (int)be32_to_cpup(p));
}
if (req < counts->quota)
@@ -392,6 +376,23 @@ static int check_msix_entries(struct pci_dev *pdev)
return 0;
}
+static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev)
+{
+ u32 addr_hi, addr_lo;
+
+ /*
+ * We should only get in here for IODA1 configs. This is based on the
+ * fact that we using RTAS for MSIs, we don't have the 32 bit MSI RTAS
+ * support, and we are in a PCIe Gen2 slot.
+ */
+ dev_info(&pdev->dev,
+ "rtas_msi: No 32 bit MSI firmware support, forcing 32 bit MSI\n");
+ pci_read_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, &addr_hi);
+ addr_lo = 0xffff0000 | ((addr_hi >> (48 - 32)) << 4);
+ pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_LO, addr_lo);
+ pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, 0);
+}
+
static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
{
struct pci_dn *pdn;
@@ -399,8 +400,9 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
struct msi_desc *entry;
struct msi_msg msg;
int nvec = nvec_in;
+ int use_32bit_msi_hack = 0;
- pdn = get_pdn(pdev);
+ pdn = pci_get_pdn(pdev);
if (!pdn)
return -ENODEV;
@@ -426,12 +428,31 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
*/
again:
if (type == PCI_CAP_ID_MSI) {
- rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);
+ if (pdn->force_32bit_msi) {
+ rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
+ if (rc < 0) {
+ /*
+ * We only want to run the 32 bit MSI hack below if
+ * the max bus speed is Gen2 speed
+ */
+ if (pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT)
+ return rc;
+
+ use_32bit_msi_hack = 1;
+ }
+ } else
+ rc = -1;
+
+ if (rc < 0)
+ rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);
if (rc < 0) {
pr_debug("rtas_msi: trying the old firmware call.\n");
rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec);
}
+
+ if (use_32bit_msi_hack && rc > 0)
+ rtas_hack_32bit_msi_gen2(pdev);
} else
rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec);
@@ -512,3 +533,4 @@ static int rtas_msi_init(void)
return 0;
}
arch_initcall(rtas_msi_init);
+
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 8733a86ad52..0cc240b7f69 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -18,6 +18,7 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/kmsg_dump.h>
+#include <linux/pstore.h>
#include <linux/ctype.h>
#include <linux/zlib.h>
#include <asm/uaccess.h>
@@ -29,14 +30,21 @@
/* Max bytes to read/write in one go */
#define NVRW_CNT 0x20
+/*
+ * Set oops header version to distinguish between old and new format header.
+ * lnx,oops-log partition max size is 4000, header version > 4000 will
+ * help in identifying new header.
+ */
+#define OOPS_HDR_VERSION 5000
+
static unsigned int nvram_size;
static int nvram_fetch, nvram_store;
static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */
static DEFINE_SPINLOCK(nvram_lock);
struct err_log_info {
- int error_type;
- unsigned int seq_num;
+ __be32 error_type;
+ __be32 seq_num;
};
struct nvram_os_partition {
@@ -45,20 +53,23 @@ struct nvram_os_partition {
int min_size; /* minimum acceptable size (0 means req_size) */
long size; /* size of data portion (excluding err_log_info) */
long index; /* offset of data portion of partition */
+ bool os_partition; /* partition initialized by OS, not FW */
};
static struct nvram_os_partition rtas_log_partition = {
.name = "ibm,rtas-log",
.req_size = 2079,
.min_size = 1055,
- .index = -1
+ .index = -1,
+ .os_partition = true
};
static struct nvram_os_partition oops_log_partition = {
.name = "lnx,oops-log",
.req_size = 4000,
.min_size = 2000,
- .index = -1
+ .index = -1,
+ .os_partition = true
};
static const char *pseries_nvram_os_partitions[] = {
@@ -67,6 +78,12 @@ static const char *pseries_nvram_os_partitions[] = {
NULL
};
+struct oops_log_info {
+ __be16 version;
+ __be16 report_length;
+ __be64 timestamp;
+} __attribute__((packed));
+
static void oops_to_nvram(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason);
@@ -83,28 +100,28 @@ static unsigned long last_unread_rtas_event; /* timestamp */
* big_oops_buf[] holds the uncompressed text we're capturing.
*
- * oops_buf[] holds the compressed text, preceded by a prefix.
- * The prefix is just a u16 holding the length of the compressed* text.
- * (*Or uncompressed, if compression fails.) oops_buf[] gets written
- * to NVRAM.
+ * oops_buf[] holds the compressed text, preceded by a oops header.
+ * oops header has u16 holding the version of oops header (to differentiate
+ * between old and new format header) followed by u16 holding the length of
+ * the compressed* text (*Or uncompressed, if compression fails.) and u64
+ * holding the timestamp. oops_buf[] gets written to NVRAM.
*
- * oops_len points to the prefix. oops_data points to the compressed text.
+ * oops_log_info points to the header. oops_data points to the compressed text.
*
* +- oops_buf
- * | +- oops_data
- * v v
- * +------------+-----------------------------------------------+
- * | length | text |
- * | (2 bytes) | (oops_data_sz bytes) |
- * +------------+-----------------------------------------------+
+ * | +- oops_data
+ * v v
+ * +-----------+-----------+-----------+------------------------+
+ * | version | length | timestamp | text |
+ * | (2 bytes) | (2 bytes) | (8 bytes) | (oops_data_sz bytes) |
+ * +-----------+-----------+-----------+------------------------+
* ^
- * +- oops_len
+ * +- oops_log_info
*
* We preallocate these buffers during init to avoid kmalloc during oops/panic.
*/
static size_t big_oops_buf_sz;
static char *big_oops_buf, *oops_buf;
-static u16 *oops_len;
static char *oops_data;
static size_t oops_data_sz;
@@ -114,6 +131,30 @@ static size_t oops_data_sz;
#define MEM_LEVEL 4
static struct z_stream_s stream;
+#ifdef CONFIG_PSTORE
+static struct nvram_os_partition of_config_partition = {
+ .name = "of-config",
+ .index = -1,
+ .os_partition = false
+};
+
+static struct nvram_os_partition common_partition = {
+ .name = "common",
+ .index = -1,
+ .os_partition = false
+};
+
+static enum pstore_type_id nvram_type_ids[] = {
+ PSTORE_TYPE_DMESG,
+ PSTORE_TYPE_PPC_RTAS,
+ PSTORE_TYPE_PPC_OF,
+ PSTORE_TYPE_PPC_COMMON,
+ -1
+};
+static int read_type;
+static unsigned long last_rtas_event;
+#endif
+
static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index)
{
unsigned int i;
@@ -250,20 +291,20 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff,
length = part->size;
}
- info.error_type = err_type;
- info.seq_num = error_log_cnt;
+ info.error_type = cpu_to_be32(err_type);
+ info.seq_num = cpu_to_be32(error_log_cnt);
tmp_index = part->index;
rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
if (rc <= 0) {
- pr_err("%s: Failed nvram_write (%d)\n", __FUNCTION__, rc);
+ pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
return rc;
}
rc = ppc_md.nvram_write(buff, length, &tmp_index);
if (rc <= 0) {
- pr_err("%s: Failed nvram_write (%d)\n", __FUNCTION__, rc);
+ pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
return rc;
}
@@ -275,48 +316,71 @@ int nvram_write_error_log(char * buff, int length,
{
int rc = nvram_write_os_partition(&rtas_log_partition, buff, length,
err_type, error_log_cnt);
- if (!rc)
+ if (!rc) {
last_unread_rtas_event = get_seconds();
+#ifdef CONFIG_PSTORE
+ last_rtas_event = get_seconds();
+#endif
+ }
+
return rc;
}
-/* nvram_read_error_log
+/* nvram_read_partition
*
- * Reads nvram for error log for at most 'length'
+ * Reads nvram partition for at most 'length'
*/
-int nvram_read_error_log(char * buff, int length,
- unsigned int * err_type, unsigned int * error_log_cnt)
+int nvram_read_partition(struct nvram_os_partition *part, char *buff,
+ int length, unsigned int *err_type,
+ unsigned int *error_log_cnt)
{
int rc;
loff_t tmp_index;
struct err_log_info info;
- if (rtas_log_partition.index == -1)
+ if (part->index == -1)
return -1;
- if (length > rtas_log_partition.size)
- length = rtas_log_partition.size;
+ if (length > part->size)
+ length = part->size;
- tmp_index = rtas_log_partition.index;
+ tmp_index = part->index;
- rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index);
- if (rc <= 0) {
- printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
- return rc;
+ if (part->os_partition) {
+ rc = ppc_md.nvram_read((char *)&info,
+ sizeof(struct err_log_info),
+ &tmp_index);
+ if (rc <= 0) {
+ pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
+ return rc;
+ }
}
rc = ppc_md.nvram_read(buff, length, &tmp_index);
if (rc <= 0) {
- printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
+ pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
return rc;
}
- *error_log_cnt = info.seq_num;
- *err_type = info.error_type;
+ if (part->os_partition) {
+ *error_log_cnt = be32_to_cpu(info.seq_num);
+ *err_type = be32_to_cpu(info.error_type);
+ }
return 0;
}
+/* nvram_read_error_log
+ *
+ * Reads nvram for error log for at most 'length'
+ */
+int nvram_read_error_log(char *buff, int length,
+ unsigned int *err_type, unsigned int *error_log_cnt)
+{
+ return nvram_read_partition(&rtas_log_partition, buff, length,
+ err_type, error_log_cnt);
+}
+
/* This doesn't actually zero anything, but it sets the event_logged
* word to tell that this event is safely in syslog.
*/
@@ -364,9 +428,6 @@ static int __init pseries_nvram_init_os_partition(struct nvram_os_partition
loff_t p;
int size;
- /* Scan nvram for partitions */
- nvram_scan_partitions();
-
/* Look for ours */
p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size);
@@ -405,6 +466,267 @@ static int __init pseries_nvram_init_os_partition(struct nvram_os_partition
return 0;
}
+/*
+ * Are we using the ibm,rtas-log for oops/panic reports? And if so,
+ * would logging this oops/panic overwrite an RTAS event that rtas_errd
+ * hasn't had a chance to read and process? Return 1 if so, else 0.
+ *
+ * We assume that if rtas_errd hasn't read the RTAS event in
+ * NVRAM_RTAS_READ_TIMEOUT seconds, it's probably not going to.
+ */
+static int clobbering_unread_rtas_event(void)
+{
+ return (oops_log_partition.index == rtas_log_partition.index
+ && last_unread_rtas_event
+ && get_seconds() - last_unread_rtas_event <=
+ NVRAM_RTAS_READ_TIMEOUT);
+}
+
+/* Derived from logfs_compress() */
+static int nvram_compress(const void *in, void *out, size_t inlen,
+ size_t outlen)
+{
+ int err, ret;
+
+ ret = -EIO;
+ err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
+ MEM_LEVEL, Z_DEFAULT_STRATEGY);
+ if (err != Z_OK)
+ goto error;
+
+ stream.next_in = in;
+ stream.avail_in = inlen;
+ stream.total_in = 0;
+ stream.next_out = out;
+ stream.avail_out = outlen;
+ stream.total_out = 0;
+
+ err = zlib_deflate(&stream, Z_FINISH);
+ if (err != Z_STREAM_END)
+ goto error;
+
+ err = zlib_deflateEnd(&stream);
+ if (err != Z_OK)
+ goto error;
+
+ if (stream.total_out >= stream.total_in)
+ goto error;
+
+ ret = stream.total_out;
+error:
+ return ret;
+}
+
+/* Compress the text from big_oops_buf into oops_buf. */
+static int zip_oops(size_t text_len)
+{
+ struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
+ int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len,
+ oops_data_sz);
+ if (zipped_len < 0) {
+ pr_err("nvram: compression failed; returned %d\n", zipped_len);
+ pr_err("nvram: logging uncompressed oops/panic report\n");
+ return -1;
+ }
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+ oops_hdr->report_length = cpu_to_be16(zipped_len);
+ oops_hdr->timestamp = cpu_to_be64(get_seconds());
+ return 0;
+}
+
+#ifdef CONFIG_PSTORE
+static int nvram_pstore_open(struct pstore_info *psi)
+{
+ /* Reset the iterator to start reading partitions again */
+ read_type = -1;
+ return 0;
+}
+
+/**
+ * nvram_pstore_write - pstore write callback for nvram
+ * @type: Type of message logged
+ * @reason: reason behind dump (oops/panic)
+ * @id: identifier to indicate the write performed
+ * @part: pstore writes data to registered buffer in parts,
+ * part number will indicate the same.
+ * @count: Indicates oops count
+ * @compressed: Flag to indicate the log is compressed
+ * @size: number of bytes written to the registered buffer
+ * @psi: registered pstore_info structure
+ *
+ * Called by pstore_dump() when an oops or panic report is logged in the
+ * printk buffer.
+ * Returns 0 on successful write.
+ */
+static int nvram_pstore_write(enum pstore_type_id type,
+ enum kmsg_dump_reason reason,
+ u64 *id, unsigned int part, int count,
+ bool compressed, size_t size,
+ struct pstore_info *psi)
+{
+ int rc;
+ unsigned int err_type = ERR_TYPE_KERNEL_PANIC;
+ struct oops_log_info *oops_hdr = (struct oops_log_info *) oops_buf;
+
+ /* part 1 has the recent messages from printk buffer */
+ if (part > 1 || type != PSTORE_TYPE_DMESG ||
+ clobbering_unread_rtas_event())
+ return -1;
+
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+ oops_hdr->report_length = cpu_to_be16(size);
+ oops_hdr->timestamp = cpu_to_be64(get_seconds());
+
+ if (compressed)
+ err_type = ERR_TYPE_KERNEL_PANIC_GZ;
+
+ rc = nvram_write_os_partition(&oops_log_partition, oops_buf,
+ (int) (sizeof(*oops_hdr) + size), err_type, count);
+
+ if (rc != 0)
+ return rc;
+
+ *id = part;
+ return 0;
+}
+
+/*
+ * Reads the oops/panic report, rtas, of-config and common partition.
+ * Returns the length of the data we read from each partition.
+ * Returns 0 if we've been called before.
+ */
+static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
+ int *count, struct timespec *time, char **buf,
+ bool *compressed, struct pstore_info *psi)
+{
+ struct oops_log_info *oops_hdr;
+ unsigned int err_type, id_no, size = 0;
+ struct nvram_os_partition *part = NULL;
+ char *buff = NULL;
+ int sig = 0;
+ loff_t p;
+
+ read_type++;
+
+ switch (nvram_type_ids[read_type]) {
+ case PSTORE_TYPE_DMESG:
+ part = &oops_log_partition;
+ *type = PSTORE_TYPE_DMESG;
+ break;
+ case PSTORE_TYPE_PPC_RTAS:
+ part = &rtas_log_partition;
+ *type = PSTORE_TYPE_PPC_RTAS;
+ time->tv_sec = last_rtas_event;
+ time->tv_nsec = 0;
+ break;
+ case PSTORE_TYPE_PPC_OF:
+ sig = NVRAM_SIG_OF;
+ part = &of_config_partition;
+ *type = PSTORE_TYPE_PPC_OF;
+ *id = PSTORE_TYPE_PPC_OF;
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ break;
+ case PSTORE_TYPE_PPC_COMMON:
+ sig = NVRAM_SIG_SYS;
+ part = &common_partition;
+ *type = PSTORE_TYPE_PPC_COMMON;
+ *id = PSTORE_TYPE_PPC_COMMON;
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ break;
+ default:
+ return 0;
+ }
+
+ if (!part->os_partition) {
+ p = nvram_find_partition(part->name, sig, &size);
+ if (p <= 0) {
+ pr_err("nvram: Failed to find partition %s, "
+ "err %d\n", part->name, (int)p);
+ return 0;
+ }
+ part->index = p;
+ part->size = size;
+ }
+
+ buff = kmalloc(part->size, GFP_KERNEL);
+
+ if (!buff)
+ return -ENOMEM;
+
+ if (nvram_read_partition(part, buff, part->size, &err_type, &id_no)) {
+ kfree(buff);
+ return 0;
+ }
+
+ *count = 0;
+
+ if (part->os_partition)
+ *id = id_no;
+
+ if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
+ size_t length, hdr_size;
+
+ oops_hdr = (struct oops_log_info *)buff;
+ if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
+ /* Old format oops header had 2-byte record size */
+ hdr_size = sizeof(u16);
+ length = be16_to_cpu(oops_hdr->version);
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ } else {
+ hdr_size = sizeof(*oops_hdr);
+ length = be16_to_cpu(oops_hdr->report_length);
+ time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
+ time->tv_nsec = 0;
+ }
+ *buf = kmalloc(length, GFP_KERNEL);
+ if (*buf == NULL)
+ return -ENOMEM;
+ memcpy(*buf, buff + hdr_size, length);
+ kfree(buff);
+
+ if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
+ *compressed = true;
+ else
+ *compressed = false;
+ return length;
+ }
+
+ *buf = buff;
+ return part->size;
+}
+
+static struct pstore_info nvram_pstore_info = {
+ .owner = THIS_MODULE,
+ .name = "nvram",
+ .open = nvram_pstore_open,
+ .read = nvram_pstore_read,
+ .write = nvram_pstore_write,
+};
+
+static int nvram_pstore_init(void)
+{
+ int rc = 0;
+
+ nvram_pstore_info.buf = oops_data;
+ nvram_pstore_info.bufsize = oops_data_sz;
+
+ rc = pstore_register(&nvram_pstore_info);
+ if (rc != 0)
+ pr_err("nvram: pstore_register() failed, defaults to "
+ "kmsg_dump; returned %d\n", rc);
+
+ return rc;
+}
+#else
+static int nvram_pstore_init(void)
+{
+ return -1;
+}
+#endif
+
static void __init nvram_init_oops_partition(int rtas_partition_exists)
{
int rc;
@@ -425,9 +747,13 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
oops_log_partition.name);
return;
}
- oops_len = (u16*) oops_buf;
- oops_data = oops_buf + sizeof(u16);
- oops_data_sz = oops_log_partition.size - sizeof(u16);
+ oops_data = oops_buf + sizeof(struct oops_log_info);
+ oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info);
+
+ rc = nvram_pstore_init();
+
+ if (!rc)
+ return;
/*
* Figure compression (preceded by elimination of each line's <n>
@@ -437,8 +763,8 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
big_oops_buf_sz = (oops_data_sz * 100) / 45;
big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
if (big_oops_buf) {
- stream.workspace = kmalloc(zlib_deflate_workspacesize(
- WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
+ stream.workspace = kmalloc(zlib_deflate_workspacesize(
+ WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
if (!stream.workspace) {
pr_err("nvram: No memory for compression workspace; "
"skipping compression of %s partition data\n",
@@ -465,6 +791,9 @@ static int __init pseries_nvram_init_log_partitions(void)
{
int rc;
+ /* Scan nvram for partitions */
+ nvram_scan_partitions();
+
rc = pseries_nvram_init_os_partition(&rtas_log_partition);
nvram_init_oops_partition(rc == 0);
return 0;
@@ -474,7 +803,7 @@ machine_arch_initcall(pseries, pseries_nvram_init_log_partitions);
int __init pSeries_nvram_init(void)
{
struct device_node *nvram;
- const unsigned int *nbytes_p;
+ const __be32 *nbytes_p;
unsigned int proplen;
nvram = of_find_node_by_type(NULL, "nvram");
@@ -487,7 +816,7 @@ int __init pSeries_nvram_init(void)
return -EIO;
}
- nvram_size = *nbytes_p;
+ nvram_size = be32_to_cpup(nbytes_p);
nvram_fetch = rtas_token("nvram-fetch");
nvram_store = rtas_token("nvram-store");
@@ -501,70 +830,6 @@ int __init pSeries_nvram_init(void)
return 0;
}
-/*
- * Are we using the ibm,rtas-log for oops/panic reports? And if so,
- * would logging this oops/panic overwrite an RTAS event that rtas_errd
- * hasn't had a chance to read and process? Return 1 if so, else 0.
- *
- * We assume that if rtas_errd hasn't read the RTAS event in
- * NVRAM_RTAS_READ_TIMEOUT seconds, it's probably not going to.
- */
-static int clobbering_unread_rtas_event(void)
-{
- return (oops_log_partition.index == rtas_log_partition.index
- && last_unread_rtas_event
- && get_seconds() - last_unread_rtas_event <=
- NVRAM_RTAS_READ_TIMEOUT);
-}
-
-/* Derived from logfs_compress() */
-static int nvram_compress(const void *in, void *out, size_t inlen,
- size_t outlen)
-{
- int err, ret;
-
- ret = -EIO;
- err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
- MEM_LEVEL, Z_DEFAULT_STRATEGY);
- if (err != Z_OK)
- goto error;
-
- stream.next_in = in;
- stream.avail_in = inlen;
- stream.total_in = 0;
- stream.next_out = out;
- stream.avail_out = outlen;
- stream.total_out = 0;
-
- err = zlib_deflate(&stream, Z_FINISH);
- if (err != Z_STREAM_END)
- goto error;
-
- err = zlib_deflateEnd(&stream);
- if (err != Z_OK)
- goto error;
-
- if (stream.total_out >= stream.total_in)
- goto error;
-
- ret = stream.total_out;
-error:
- return ret;
-}
-
-/* Compress the text from big_oops_buf into oops_buf. */
-static int zip_oops(size_t text_len)
-{
- int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len,
- oops_data_sz);
- if (zipped_len < 0) {
- pr_err("nvram: compression failed; returned %d\n", zipped_len);
- pr_err("nvram: logging uncompressed oops/panic report\n");
- return -1;
- }
- *oops_len = (u16) zipped_len;
- return 0;
-}
/*
* This is our kmsg_dump callback, called after an oops or panic report
@@ -576,6 +841,7 @@ static int zip_oops(size_t text_len)
static void oops_to_nvram(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason)
{
+ struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
static unsigned int oops_count = 0;
static bool panicking = false;
static DEFINE_SPINLOCK(lock);
@@ -602,7 +868,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
break;
default:
pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
- __FUNCTION__, (int) reason);
+ __func__, (int) reason);
return;
}
@@ -619,14 +885,17 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
}
if (rc != 0) {
kmsg_dump_rewind(dumper);
- kmsg_dump_get_buffer(dumper, true,
+ kmsg_dump_get_buffer(dumper, false,
oops_data, oops_data_sz, &text_len);
err_type = ERR_TYPE_KERNEL_PANIC;
- *oops_len = (u16) text_len;
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+ oops_hdr->report_length = cpu_to_be16(text_len);
+ oops_hdr->timestamp = cpu_to_be64(get_seconds());
}
(void) nvram_write_os_partition(&oops_log_partition, oops_buf,
- (int) (sizeof(*oops_len) + *oops_len), err_type, ++oops_count);
+ (int) (sizeof(*oops_hdr) + text_len), err_type,
+ ++oops_count);
spin_unlock_irqrestore(&lock, flags);
}
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 56b864d777e..c413ec158ff 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -40,7 +40,8 @@ void pcibios_name_device(struct pci_dev *dev)
*/
dn = pci_device_to_OF_node(dev);
if (dn) {
- const char *loc_code = of_get_property(dn, "ibm,loc-code", 0);
+ const char *loc_code = of_get_property(dn, "ibm,loc-code",
+ NULL);
if (loc_code) {
int loc_len = strlen(loc_code);
if (loc_len < sizeof(dev->dev.name)) {
@@ -107,3 +108,64 @@ static void fixup_winbond_82c105(struct pci_dev* dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105,
fixup_winbond_82c105);
+
+int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ struct device_node *dn, *pdn;
+ struct pci_bus *bus;
+ u32 pcie_link_speed_stats[2];
+ int rc;
+
+ bus = bridge->bus;
+
+ dn = pcibios_get_phb_of_node(bus);
+ if (!dn)
+ return 0;
+
+ for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
+ rc = of_property_read_u32_array(pdn,
+ "ibm,pcie-link-speed-stats",
+ &pcie_link_speed_stats[0], 2);
+ if (!rc)
+ break;
+ }
+
+ of_node_put(pdn);
+
+ if (rc) {
+ pr_err("no ibm,pcie-link-speed-stats property\n");
+ return 0;
+ }
+
+ switch (pcie_link_speed_stats[0]) {
+ case 0x01:
+ bus->max_bus_speed = PCIE_SPEED_2_5GT;
+ break;
+ case 0x02:
+ bus->max_bus_speed = PCIE_SPEED_5_0GT;
+ break;
+ case 0x04:
+ bus->max_bus_speed = PCIE_SPEED_8_0GT;
+ break;
+ default:
+ bus->max_bus_speed = PCI_SPEED_UNKNOWN;
+ break;
+ }
+
+ switch (pcie_link_speed_stats[1]) {
+ case 0x01:
+ bus->cur_bus_speed = PCIE_SPEED_2_5GT;
+ break;
+ case 0x02:
+ bus->cur_bus_speed = PCIE_SPEED_5_0GT;
+ break;
+ case 0x04:
+ bus->cur_bus_speed = PCIE_SPEED_8_0GT;
+ break;
+ default:
+ bus->cur_bus_speed = PCI_SPEED_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index c91b22be928..203cbf0dc10 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -37,15 +37,15 @@ find_bus_among_children(struct pci_bus *bus,
struct device_node *dn)
{
struct pci_bus *child = NULL;
- struct list_head *tmp;
+ struct pci_bus *tmp;
struct device_node *busdn;
busdn = pci_bus_to_OF_node(bus);
if (busdn == dn)
return bus;
- list_for_each(tmp, &bus->children) {
- child = find_bus_among_children(pci_bus_b(tmp), dn);
+ list_for_each_entry(tmp, &bus->children, node) {
+ child = find_bus_among_children(tmp, dn);
if (child)
break;
};
@@ -64,91 +64,6 @@ pcibios_find_pci_bus(struct device_node *dn)
}
EXPORT_SYMBOL_GPL(pcibios_find_pci_bus);
-/**
- * __pcibios_remove_pci_devices - remove all devices under this bus
- * @bus: the indicated PCI bus
- * @purge_pe: destroy the PE on removal of PCI devices
- *
- * Remove all of the PCI devices under this bus both from the
- * linux pci device tree, and from the powerpc EEH address cache.
- * By default, the corresponding PE will be destroied during the
- * normal PCI hotplug path. For PCI hotplug during EEH recovery,
- * the corresponding PE won't be destroied and deallocated.
- */
-void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe)
-{
- struct pci_dev *dev, *tmp;
- struct pci_bus *child_bus;
-
- /* First go down child busses */
- list_for_each_entry(child_bus, &bus->children, node)
- __pcibios_remove_pci_devices(child_bus, purge_pe);
-
- pr_debug("PCI: Removing devices on bus %04x:%02x\n",
- pci_domain_nr(bus), bus->number);
- list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
- pr_debug(" * Removing %s...\n", pci_name(dev));
- eeh_remove_bus_device(dev, purge_pe);
- pci_stop_and_remove_bus_device(dev);
- }
-}
-
-/**
- * pcibios_remove_pci_devices - remove all devices under this bus
- *
- * Remove all of the PCI devices under this bus both from the
- * linux pci device tree, and from the powerpc EEH address cache.
- */
-void pcibios_remove_pci_devices(struct pci_bus *bus)
-{
- __pcibios_remove_pci_devices(bus, 1);
-}
-EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
-
-/**
- * pcibios_add_pci_devices - adds new pci devices to bus
- *
- * This routine will find and fixup new pci devices under
- * the indicated bus. This routine presumes that there
- * might already be some devices under this bridge, so
- * it carefully tries to add only new devices. (And that
- * is how this routine differs from other, similar pcibios
- * routines.)
- */
-void pcibios_add_pci_devices(struct pci_bus * bus)
-{
- int slotno, num, mode, pass, max;
- struct pci_dev *dev;
- struct device_node *dn = pci_bus_to_OF_node(bus);
-
- eeh_add_device_tree_early(dn);
-
- mode = PCI_PROBE_NORMAL;
- if (ppc_md.pci_probe_mode)
- mode = ppc_md.pci_probe_mode(bus);
-
- if (mode == PCI_PROBE_DEVTREE) {
- /* use ofdt-based probe */
- of_rescan_bus(dn, bus);
- } else if (mode == PCI_PROBE_NORMAL) {
- /* use legacy probe */
- slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
- num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
- if (!num)
- return;
- pcibios_setup_bus_devices(bus);
- max = bus->busn_res.start;
- for (pass=0; pass < 2; pass++)
- list_for_each_entry(dev, &bus->devices, bus_list) {
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
- dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
- max = pci_scan_bridge(bus, dev, max, pass);
- }
- }
- pcibios_finish_adding_to_bus(bus);
-}
-EXPORT_SYMBOL_GPL(pcibios_add_pci_devices);
-
struct pci_controller *init_phb_dynamic(struct device_node *dn)
{
struct pci_controller *phb;
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
deleted file mode 100644
index e6cc34a6705..00000000000
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ /dev/null
@@ -1,307 +0,0 @@
-#ifndef _PSERIES_PLPAR_WRAPPERS_H
-#define _PSERIES_PLPAR_WRAPPERS_H
-
-#include <linux/string.h>
-
-#include <asm/hvcall.h>
-#include <asm/paca.h>
-#include <asm/page.h>
-
-/* Get state of physical CPU from query_cpu_stopped */
-int smp_query_cpu_stopped(unsigned int pcpu);
-#define QCSS_STOPPED 0
-#define QCSS_STOPPING 1
-#define QCSS_NOT_STOPPED 2
-#define QCSS_HARDWARE_ERROR -1
-#define QCSS_HARDWARE_BUSY -2
-
-static inline long poll_pending(void)
-{
- return plpar_hcall_norets(H_POLL_PENDING);
-}
-
-static inline u8 get_cede_latency_hint(void)
-{
- return get_lppaca()->cede_latency_hint;
-}
-
-static inline void set_cede_latency_hint(u8 latency_hint)
-{
- get_lppaca()->cede_latency_hint = latency_hint;
-}
-
-static inline long cede_processor(void)
-{
- return plpar_hcall_norets(H_CEDE);
-}
-
-static inline long extended_cede_processor(unsigned long latency_hint)
-{
- long rc;
- u8 old_latency_hint = get_cede_latency_hint();
-
- set_cede_latency_hint(latency_hint);
- rc = cede_processor();
- set_cede_latency_hint(old_latency_hint);
-
- return rc;
-}
-
-static inline long vpa_call(unsigned long flags, unsigned long cpu,
- unsigned long vpa)
-{
- /* flags are in bits 16-18 (counting from most significant bit) */
- flags = flags << (63 - 18);
-
- return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
-}
-
-static inline long unregister_vpa(unsigned long cpu)
-{
- return vpa_call(0x5, cpu, 0);
-}
-
-static inline long register_vpa(unsigned long cpu, unsigned long vpa)
-{
- return vpa_call(0x1, cpu, vpa);
-}
-
-static inline long unregister_slb_shadow(unsigned long cpu)
-{
- return vpa_call(0x7, cpu, 0);
-}
-
-static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
-{
- return vpa_call(0x3, cpu, vpa);
-}
-
-static inline long unregister_dtl(unsigned long cpu)
-{
- return vpa_call(0x6, cpu, 0);
-}
-
-static inline long register_dtl(unsigned long cpu, unsigned long vpa)
-{
- return vpa_call(0x2, cpu, vpa);
-}
-
-static inline long plpar_page_set_loaned(unsigned long vpa)
-{
- unsigned long cmo_page_sz = cmo_get_page_size();
- long rc = 0;
- int i;
-
- for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
- rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
-
- for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
- plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE,
- vpa + i - cmo_page_sz, 0);
-
- return rc;
-}
-
-static inline long plpar_page_set_active(unsigned long vpa)
-{
- unsigned long cmo_page_sz = cmo_get_page_size();
- long rc = 0;
- int i;
-
- for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
- rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
-
- for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
- plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED,
- vpa + i - cmo_page_sz, 0);
-
- return rc;
-}
-
-extern void vpa_init(int cpu);
-
-static inline long plpar_pte_enter(unsigned long flags,
- unsigned long hpte_group, unsigned long hpte_v,
- unsigned long hpte_r, unsigned long *slot)
-{
- long rc;
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
-
- *slot = retbuf[0];
-
- return rc;
-}
-
-static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
- unsigned long avpn, unsigned long *old_pteh_ret,
- unsigned long *old_ptel_ret)
-{
- long rc;
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
-
- *old_pteh_ret = retbuf[0];
- *old_ptel_ret = retbuf[1];
-
- return rc;
-}
-
-/* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
-static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
- unsigned long avpn, unsigned long *old_pteh_ret,
- unsigned long *old_ptel_ret)
-{
- long rc;
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
-
- *old_pteh_ret = retbuf[0];
- *old_ptel_ret = retbuf[1];
-
- return rc;
-}
-
-static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
- unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
-{
- long rc;
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- rc = plpar_hcall(H_READ, retbuf, flags, ptex);
-
- *old_pteh_ret = retbuf[0];
- *old_ptel_ret = retbuf[1];
-
- return rc;
-}
-
-/* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
-static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
- unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
-{
- long rc;
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
-
- *old_pteh_ret = retbuf[0];
- *old_ptel_ret = retbuf[1];
-
- return rc;
-}
-
-/*
- * plpar_pte_read_4_raw can be called in real mode.
- * ptes must be 8*sizeof(unsigned long)
- */
-static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
- unsigned long *ptes)
-
-{
- long rc;
- unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
-
- rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
-
- memcpy(ptes, retbuf, 8*sizeof(unsigned long));
-
- return rc;
-}
-
-static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
- unsigned long avpn)
-{
- return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
-}
-
-static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
- unsigned long *tce_ret)
-{
- long rc;
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
-
- *tce_ret = retbuf[0];
-
- return rc;
-}
-
-static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
- unsigned long tceval)
-{
- return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
-}
-
-static inline long plpar_tce_put_indirect(unsigned long liobn,
- unsigned long ioba, unsigned long page, unsigned long count)
-{
- return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
-}
-
-static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
- unsigned long tceval, unsigned long count)
-{
- return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
-}
-
-static inline long plpar_get_term_char(unsigned long termno,
- unsigned long *len_ret, char *buf_ret)
-{
- long rc;
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
- unsigned long *lbuf = (unsigned long *)buf_ret; /* TODO: alignment? */
-
- rc = plpar_hcall(H_GET_TERM_CHAR, retbuf, termno);
-
- *len_ret = retbuf[0];
- lbuf[0] = retbuf[1];
- lbuf[1] = retbuf[2];
-
- return rc;
-}
-
-static inline long plpar_put_term_char(unsigned long termno, unsigned long len,
- const char *buffer)
-{
- unsigned long *lbuf = (unsigned long *)buffer; /* TODO: alignment? */
- return plpar_hcall_norets(H_PUT_TERM_CHAR, termno, len, lbuf[0],
- lbuf[1]);
-}
-
-/* Set various resource mode parameters */
-static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
- unsigned long value1, unsigned long value2)
-{
- return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
-}
-
-/*
- * Enable relocation on exceptions on this partition
- *
- * Note: this call has a partition wide scope and can take a while to complete.
- * If it returns H_LONG_BUSY_* it should be retried periodically until it
- * returns H_SUCCESS.
- */
-static inline long enable_reloc_on_exceptions(void)
-{
- /* mflags = 3: Exceptions at 0xC000000000004000 */
- return plpar_set_mode(3, 3, 0, 0);
-}
-
-/*
- * Disable relocation on exceptions on this partition
- *
- * Note: this call has a partition wide scope and can take a while to complete.
- * If it returns H_LONG_BUSY_* it should be retried periodically until it
- * returns H_SUCCESS.
- */
-static inline long disable_reloc_on_exceptions(void) {
- return plpar_set_mode(0, 3, 0, 0);
-}
-
-#endif /* _PSERIES_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
deleted file mode 100644
index 4d806b41960..00000000000
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ /dev/null
@@ -1,368 +0,0 @@
-/*
- * processor_idle - idle state cpuidle driver.
- * Adapted from drivers/idle/intel_idle.c and
- * drivers/acpi/processor_idle.c
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/moduleparam.h>
-#include <linux/cpuidle.h>
-#include <linux/cpu.h>
-#include <linux/notifier.h>
-
-#include <asm/paca.h>
-#include <asm/reg.h>
-#include <asm/machdep.h>
-#include <asm/firmware.h>
-#include <asm/runlatch.h>
-
-#include "plpar_wrappers.h"
-#include "pseries.h"
-
-struct cpuidle_driver pseries_idle_driver = {
- .name = "pseries_idle",
- .owner = THIS_MODULE,
-};
-
-#define MAX_IDLE_STATE_COUNT 2
-
-static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
-static struct cpuidle_device __percpu *pseries_cpuidle_devices;
-static struct cpuidle_state *cpuidle_state_table;
-
-static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before)
-{
-
- *kt_before = ktime_get();
- *in_purr = mfspr(SPRN_PURR);
- /*
- * Indicate to the HV that we are idle. Now would be
- * a good time to find other work to dispatch.
- */
- get_lppaca()->idle = 1;
-}
-
-static inline s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before)
-{
- get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr;
- get_lppaca()->idle = 0;
-
- return ktime_to_us(ktime_sub(ktime_get(), kt_before));
-}
-
-static int snooze_loop(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- unsigned long in_purr;
- ktime_t kt_before;
- int cpu = dev->cpu;
-
- idle_loop_prolog(&in_purr, &kt_before);
- local_irq_enable();
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- while ((!need_resched()) && cpu_online(cpu)) {
- ppc64_runlatch_off();
- HMT_low();
- HMT_very_low();
- }
-
- HMT_medium();
- clear_thread_flag(TIF_POLLING_NRFLAG);
- smp_mb();
-
- dev->last_residency =
- (int)idle_loop_epilog(in_purr, kt_before);
- return index;
-}
-
-static void check_and_cede_processor(void)
-{
- /*
- * Ensure our interrupt state is properly tracked,
- * also checks if no interrupt has occurred while we
- * were soft-disabled
- */
- if (prep_irq_for_idle()) {
- cede_processor();
-#ifdef CONFIG_TRACE_IRQFLAGS
- /* Ensure that H_CEDE returns with IRQs on */
- if (WARN_ON(!(mfmsr() & MSR_EE)))
- __hard_irq_enable();
-#endif
- }
-}
-
-static int dedicated_cede_loop(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- unsigned long in_purr;
- ktime_t kt_before;
-
- idle_loop_prolog(&in_purr, &kt_before);
- get_lppaca()->donate_dedicated_cpu = 1;
-
- ppc64_runlatch_off();
- HMT_medium();
- check_and_cede_processor();
-
- get_lppaca()->donate_dedicated_cpu = 0;
- dev->last_residency =
- (int)idle_loop_epilog(in_purr, kt_before);
- return index;
-}
-
-static int shared_cede_loop(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- unsigned long in_purr;
- ktime_t kt_before;
-
- idle_loop_prolog(&in_purr, &kt_before);
-
- /*
- * Yield the processor to the hypervisor. We return if
- * an external interrupt occurs (which are driven prior
- * to returning here) or if a prod occurs from another
- * processor. When returning here, external interrupts
- * are enabled.
- */
- check_and_cede_processor();
-
- dev->last_residency =
- (int)idle_loop_epilog(in_purr, kt_before);
- return index;
-}
-
-/*
- * States for dedicated partition case.
- */
-static struct cpuidle_state dedicated_states[MAX_IDLE_STATE_COUNT] = {
- { /* Snooze */
- .name = "snooze",
- .desc = "snooze",
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .exit_latency = 0,
- .target_residency = 0,
- .enter = &snooze_loop },
- { /* CEDE */
- .name = "CEDE",
- .desc = "CEDE",
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .exit_latency = 10,
- .target_residency = 100,
- .enter = &dedicated_cede_loop },
-};
-
-/*
- * States for shared partition case.
- */
-static struct cpuidle_state shared_states[MAX_IDLE_STATE_COUNT] = {
- { /* Shared Cede */
- .name = "Shared Cede",
- .desc = "Shared Cede",
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .exit_latency = 0,
- .target_residency = 0,
- .enter = &shared_cede_loop },
-};
-
-void update_smt_snooze_delay(int cpu, int residency)
-{
- struct cpuidle_driver *drv = cpuidle_get_driver();
- struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
-
- if (cpuidle_state_table != dedicated_states)
- return;
-
- if (residency < 0) {
- /* Disable the Nap state on that cpu */
- if (dev)
- dev->states_usage[1].disable = 1;
- } else
- if (drv)
- drv->states[1].target_residency = residency;
-}
-
-static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n,
- unsigned long action, void *hcpu)
-{
- int hotcpu = (unsigned long)hcpu;
- struct cpuidle_device *dev =
- per_cpu_ptr(pseries_cpuidle_devices, hotcpu);
-
- if (dev && cpuidle_get_driver()) {
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- cpuidle_pause_and_lock();
- cpuidle_enable_device(dev);
- cpuidle_resume_and_unlock();
- break;
-
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- cpuidle_pause_and_lock();
- cpuidle_disable_device(dev);
- cpuidle_resume_and_unlock();
- break;
-
- default:
- return NOTIFY_DONE;
- }
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block setup_hotplug_notifier = {
- .notifier_call = pseries_cpuidle_add_cpu_notifier,
-};
-
-/*
- * pseries_cpuidle_driver_init()
- */
-static int pseries_cpuidle_driver_init(void)
-{
- int idle_state;
- struct cpuidle_driver *drv = &pseries_idle_driver;
-
- drv->state_count = 0;
-
- for (idle_state = 0; idle_state < MAX_IDLE_STATE_COUNT; ++idle_state) {
-
- if (idle_state > max_idle_state)
- break;
-
- /* is the state not enabled? */
- if (cpuidle_state_table[idle_state].enter == NULL)
- continue;
-
- drv->states[drv->state_count] = /* structure copy */
- cpuidle_state_table[idle_state];
-
- drv->state_count += 1;
- }
-
- return 0;
-}
-
-/* pseries_idle_devices_uninit(void)
- * unregister cpuidle devices and de-allocate memory
- */
-static void pseries_idle_devices_uninit(void)
-{
- int i;
- struct cpuidle_device *dev;
-
- for_each_possible_cpu(i) {
- dev = per_cpu_ptr(pseries_cpuidle_devices, i);
- cpuidle_unregister_device(dev);
- }
-
- free_percpu(pseries_cpuidle_devices);
- return;
-}
-
-/* pseries_idle_devices_init()
- * allocate, initialize and register cpuidle device
- */
-static int pseries_idle_devices_init(void)
-{
- int i;
- struct cpuidle_driver *drv = &pseries_idle_driver;
- struct cpuidle_device *dev;
-
- pseries_cpuidle_devices = alloc_percpu(struct cpuidle_device);
- if (pseries_cpuidle_devices == NULL)
- return -ENOMEM;
-
- for_each_possible_cpu(i) {
- dev = per_cpu_ptr(pseries_cpuidle_devices, i);
- dev->state_count = drv->state_count;
- dev->cpu = i;
- if (cpuidle_register_device(dev)) {
- printk(KERN_DEBUG \
- "cpuidle_register_device %d failed!\n", i);
- return -EIO;
- }
- }
-
- return 0;
-}
-
-/*
- * pseries_idle_probe()
- * Choose state table for shared versus dedicated partition
- */
-static int pseries_idle_probe(void)
-{
-
- if (!firmware_has_feature(FW_FEATURE_SPLPAR))
- return -ENODEV;
-
- if (cpuidle_disable != IDLE_NO_OVERRIDE)
- return -ENODEV;
-
- if (max_idle_state == 0) {
- printk(KERN_DEBUG "pseries processor idle disabled.\n");
- return -EPERM;
- }
-
- if (get_lppaca()->shared_proc)
- cpuidle_state_table = shared_states;
- else
- cpuidle_state_table = dedicated_states;
-
- return 0;
-}
-
-static int __init pseries_processor_idle_init(void)
-{
- int retval;
-
- retval = pseries_idle_probe();
- if (retval)
- return retval;
-
- pseries_cpuidle_driver_init();
- retval = cpuidle_register_driver(&pseries_idle_driver);
- if (retval) {
- printk(KERN_DEBUG "Registration of pseries driver failed.\n");
- return retval;
- }
-
- retval = pseries_idle_devices_init();
- if (retval) {
- pseries_idle_devices_uninit();
- cpuidle_unregister_driver(&pseries_idle_driver);
- return retval;
- }
-
- register_cpu_notifier(&setup_hotplug_notifier);
- printk(KERN_DEBUG "pseries_idle_driver registered\n");
-
- return 0;
-}
-
-static void __exit pseries_processor_idle_exit(void)
-{
-
- unregister_cpu_notifier(&setup_hotplug_notifier);
- pseries_idle_devices_uninit();
- cpuidle_unregister_driver(&pseries_idle_driver);
-
- return;
-}
-
-module_init(pseries_processor_idle_init);
-module_exit(pseries_processor_idle_exit);
-
-MODULE_AUTHOR("Deepthi Dharwar <deepthi@linux.vnet.ibm.com>");
-MODULE_DESCRIPTION("Cpuidle driver for POWER");
-MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 9a3dda07566..361add62abf 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -19,7 +19,10 @@ extern void request_event_sources_irqs(struct device_node *np,
#include <linux/of.h>
-extern void __init fw_feature_init(const char *hypertas, unsigned long len);
+extern void __init fw_hypertas_feature_init(const char *hypertas,
+ unsigned long len);
+extern void __init fw_vec5_feature_init(const char *hypertas,
+ unsigned long len);
struct pt_regs;
@@ -53,11 +56,14 @@ extern void hvc_vio_init_early(void);
/* Dynamic logical Partitioning/Mobility */
extern void dlpar_free_cc_nodes(struct device_node *);
extern void dlpar_free_cc_property(struct property *);
-extern struct device_node *dlpar_configure_connector(u32);
+extern struct device_node *dlpar_configure_connector(u32, struct device_node *);
extern int dlpar_attach_node(struct device_node *);
extern int dlpar_detach_node(struct device_node *);
-/* Snooze Delay, pseries_idle */
-DECLARE_PER_CPU(long, smt_snooze_delay);
+/* PCI root bridge prepare function override for pseries */
+struct pci_host_bridge;
+int pseries_root_bridge_prepare(struct pci_host_bridge *bridge);
+
+unsigned long pseries_memory_block_size(void);
#endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
index af281dce510..92767791f93 100644
--- a/arch/powerpc/platforms/pseries/pseries_energy.c
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -21,6 +21,7 @@
#include <asm/cputhreads.h>
#include <asm/page.h>
#include <asm/hvcall.h>
+#include <asm/firmware.h>
#define MODULE_VERS "1.0"
@@ -32,40 +33,6 @@ static int sysfs_entries;
/* Helper routines */
-/*
- * Routine to detect firmware support for hcall
- * return 1 if H_BEST_ENERGY is supported
- * else return 0
- */
-
-static int check_for_h_best_energy(void)
-{
- struct device_node *rtas = NULL;
- const char *hypertas, *s;
- int length;
- int rc = 0;
-
- rtas = of_find_node_by_path("/rtas");
- if (!rtas)
- return 0;
-
- hypertas = of_get_property(rtas, "ibm,hypertas-functions", &length);
- if (!hypertas) {
- of_node_put(rtas);
- return 0;
- }
-
- /* hypertas will have list of strings with hcall names */
- for (s = hypertas; s < hypertas + length; s += strlen(s) + 1) {
- if (!strncmp("hcall-best-energy-1", s, 19)) {
- rc = 1; /* Found the string */
- break;
- }
- }
- of_node_put(rtas);
- return rc;
-}
-
/* Helper Routines to convert between drc_index to cpu numbers */
static u32 cpu_to_drc_index(int cpu)
@@ -141,8 +108,8 @@ err:
* energy consumption.
*/
-#define FLAGS_MODE1 0x004E200000080E01
-#define FLAGS_MODE2 0x004E200000080401
+#define FLAGS_MODE1 0x004E200000080E01UL
+#define FLAGS_MODE2 0x004E200000080401UL
#define FLAGS_ACTIVATE 0x100
static ssize_t get_best_energy_list(char *page, int activate)
@@ -262,7 +229,7 @@ static int __init pseries_energy_init(void)
int cpu, err;
struct device *cpu_dev;
- if (!check_for_h_best_energy()) {
+ if (!firmware_has_feature(FW_FEATURE_BEST_ENERGY)) {
printk(KERN_INFO "Hypercall H_BEST_ENERGY not supported\n");
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index c4dfccd3a3d..9c5778e6ed4 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -83,7 +83,7 @@ static void handle_system_shutdown(char event_modifier)
switch (event_modifier) {
case EPOW_SHUTDOWN_NORMAL:
pr_emerg("Firmware initiated power off");
- orderly_poweroff(1);
+ orderly_poweroff(true);
break;
case EPOW_SHUTDOWN_ON_UPS:
@@ -95,13 +95,13 @@ static void handle_system_shutdown(char event_modifier)
pr_emerg("Loss of system critical functions reported by "
"firmware");
pr_emerg("Check RTAS error log for details");
- orderly_poweroff(1);
+ orderly_poweroff(true);
break;
case EPOW_SHUTDOWN_AMBIENT_TEMPERATURE_TOO_HIGH:
pr_emerg("Ambient temperature too high reported by firmware");
pr_emerg("Check RTAS error log for details");
- orderly_poweroff(1);
+ orderly_poweroff(true);
break;
default:
@@ -162,7 +162,7 @@ void rtas_parse_epow_errlog(struct rtas_error_log *log)
case EPOW_SYSTEM_HALT:
pr_emerg("Firmware initiated power off");
- orderly_poweroff(1);
+ orderly_poweroff(true);
break;
case EPOW_MAIN_ENCLOSURE:
@@ -236,7 +236,8 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
rtas_elog = (struct rtas_error_log *)ras_log_buf;
- if ((status == 0) && (rtas_elog->severity >= RTAS_SEVERITY_ERROR_SYNC))
+ if (status == 0 &&
+ rtas_error_severity(rtas_elog) >= RTAS_SEVERITY_ERROR_SYNC)
fatal = 1;
else
fatal = 0;
@@ -287,6 +288,9 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
unsigned long *savep;
struct rtas_error_log *h, *errhdr = NULL;
+ /* Mask top two bits */
+ regs->gpr[3] &= ~(0x3UL << 62);
+
if (!VALID_FWNMI_BUFFER(regs->gpr[3])) {
printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]);
return NULL;
@@ -297,13 +301,14 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
/* If it isn't an extended log we can use the per cpu 64bit buffer */
h = (struct rtas_error_log *)&savep[1];
- if (!h->extended) {
+ if (!rtas_error_extended(h)) {
memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64));
errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf);
} else {
- int len;
+ int len, error_log_length;
- len = max_t(int, 8+h->extended_log_length, RTAS_ERROR_LOG_MAX);
+ error_log_length = 8 + rtas_error_extended_log_length(h);
+ len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
memcpy(global_mce_data_buf, h, len);
errhdr = (struct rtas_error_log *)global_mce_data_buf;
@@ -347,23 +352,24 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err)
{
int recovered = 0;
+ int disposition = rtas_error_disposition(err);
if (!(regs->msr & MSR_RI)) {
/* If MSR_RI isn't set, we cannot recover */
recovered = 0;
- } else if (err->disposition == RTAS_DISP_FULLY_RECOVERED) {
+ } else if (disposition == RTAS_DISP_FULLY_RECOVERED) {
/* Platform corrected itself */
recovered = 1;
- } else if (err->disposition == RTAS_DISP_LIMITED_RECOVERY) {
+ } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
/* Platform corrected itself but could be degraded */
printk(KERN_ERR "MCE: limited recovery, system may "
"be degraded\n");
recovered = 1;
} else if (user_mode(regs) && !is_global_init(current) &&
- err->severity == RTAS_SEVERITY_ERROR_SYNC) {
+ rtas_error_severity(err) == RTAS_SEVERITY_ERROR_SYNC) {
/*
* If we received a synchronous error when in userspace
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index d6491bd481d..1c0a60d9886 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -12,7 +12,6 @@
*/
#include <linux/kernel.h>
-#include <linux/kref.h>
#include <linux/notifier.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
@@ -70,7 +69,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
np->properties = proplist;
of_node_set_flag(np, OF_DYNAMIC);
- kref_init(&np->kref);
+ of_node_init(np);
np->parent = derive_parent(path);
if (IS_ERR(np->parent)) {
@@ -452,7 +451,7 @@ static int proc_ppc64_create_ofdt(void)
ent = proc_create("powerpc/ofdt", S_IWUSR, NULL, &ofdt_fops);
if (ent)
- ent->size = 0;
+ proc_set_size(ent, 0);
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
new file mode 100644
index 00000000000..72a102758d4
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/rng.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2013, Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "pseries-rng: " fmt
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <asm/archrandom.h>
+#include <asm/machdep.h>
+#include <asm/plpar_wrappers.h>
+
+
+static int pseries_get_random_long(unsigned long *v)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ if (plpar_hcall(H_RANDOM, retbuf) == H_SUCCESS) {
+ *v = retbuf[0];
+ return 1;
+ }
+
+ return 0;
+}
+
+static __init int rng_init(void)
+{
+ struct device_node *dn;
+
+ dn = of_find_compatible_node(NULL, NULL, "ibm,random");
+ if (!dn)
+ return -ENODEV;
+
+ pr_info("Registering arch random hook.\n");
+
+ ppc_md.get_random_long = pseries_get_random_long;
+
+ return 0;
+}
+subsys_initcall(rng_init);
diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c
index 554457294a2..b502ab61aaf 100644
--- a/arch/powerpc/platforms/pseries/scanlog.c
+++ b/arch/powerpc/platforms/pseries/scanlog.c
@@ -41,21 +41,16 @@
static unsigned int ibm_scan_log_dump; /* RTAS token */
-static struct proc_dir_entry *proc_ppc64_scan_log_dump; /* The proc file */
+static unsigned int *scanlog_buffer; /* The data buffer */
static ssize_t scanlog_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct inode * inode = file->f_path.dentry->d_inode;
- struct proc_dir_entry *dp;
- unsigned int *data;
+ unsigned int *data = scanlog_buffer;
int status;
unsigned long len, off;
unsigned int wait_time;
- dp = PDE(inode);
- data = (unsigned int *)dp->data;
-
if (count > RTAS_DATA_BUF_SIZE)
count = RTAS_DATA_BUF_SIZE;
@@ -139,8 +134,7 @@ static ssize_t scanlog_write(struct file * file, const char __user * buf,
static int scanlog_open(struct inode * inode, struct file * file)
{
- struct proc_dir_entry *dp = PDE(inode);
- unsigned int *data = (unsigned int *)dp->data;
+ unsigned int *data = scanlog_buffer;
if (data[0] != 0) {
/* This imperfect test stops a second copy of the
@@ -156,11 +150,9 @@ static int scanlog_open(struct inode * inode, struct file * file)
static int scanlog_release(struct inode * inode, struct file * file)
{
- struct proc_dir_entry *dp = PDE(inode);
- unsigned int *data = (unsigned int *)dp->data;
+ unsigned int *data = scanlog_buffer;
data[0] = 0;
-
return 0;
}
@@ -176,7 +168,6 @@ const struct file_operations scanlog_fops = {
static int __init scanlog_init(void)
{
struct proc_dir_entry *ent;
- void *data;
int err = -ENOMEM;
ibm_scan_log_dump = rtas_token("ibm,scan-log-dump");
@@ -184,29 +175,24 @@ static int __init scanlog_init(void)
return -ENODEV;
/* Ideally we could allocate a buffer < 4G */
- data = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
- if (!data)
+ scanlog_buffer = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
+ if (!scanlog_buffer)
goto err;
- ent = proc_create_data("powerpc/rtas/scan-log-dump", S_IRUSR, NULL,
- &scanlog_fops, data);
+ ent = proc_create("powerpc/rtas/scan-log-dump", S_IRUSR, NULL,
+ &scanlog_fops);
if (!ent)
goto err;
-
- proc_ppc64_scan_log_dump = ent;
-
return 0;
err:
- kfree(data);
+ kfree(scanlog_buffer);
return err;
}
static void __exit scanlog_cleanup(void)
{
- if (proc_ppc64_scan_log_dump) {
- kfree(proc_ppc64_scan_log_dump->data);
- remove_proc_entry("scan-log-dump", proc_ppc64_scan_log_dump->parent);
- }
+ remove_proc_entry("powerpc/rtas/scan-log-dump", NULL);
+ kfree(scanlog_buffer);
}
module_init(scanlog_init);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index ca55882465d..f2f40e64658 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -39,7 +39,6 @@
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
-#include <linux/cpuidle.h>
#include <linux/of.h>
#include <linux/kexec.h>
@@ -65,13 +64,14 @@
#include <asm/smp.h>
#include <asm/firmware.h>
#include <asm/eeh.h>
+#include <asm/reg.h>
+#include <asm/plpar_wrappers.h>
-#include "plpar_wrappers.h"
#include "pseries.h"
int CMO_PrPSP = -1;
int CMO_SecPSP = -1;
-unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT);
+unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
EXPORT_SYMBOL(CMO_PageSize);
int fwnmi_active; /* TRUE if an FWNMI handler is present */
@@ -182,7 +182,7 @@ static void __init pseries_mpic_init_IRQ(void)
np = of_find_node_by_path("/");
naddr = of_n_addr_cells(np);
opprop = of_get_property(np, "platform-open-pic", &opplen);
- if (opprop != 0) {
+ if (opprop != NULL) {
openpic_addr = of_read_number(opprop, naddr);
printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
}
@@ -281,7 +281,7 @@ static struct notifier_block pci_dn_reconfig_nb = {
struct kmem_cache *dtl_cache;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/*
* Allocate space for the dispatch trace log for all possible cpus
* and register the buffers with the hypervisor. This is used for
@@ -322,7 +322,7 @@ static int alloc_dispatch_logs(void)
get_paca()->lppaca_ptr->dtl_idx = 0;
/* hypervisor reads buffer length from this field */
- dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
+ dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
ret = register_dtl(hard_smp_processor_id(), __pa(dtl));
if (ret)
pr_err("WARNING: DTL registration of cpu %d (hw %d) failed "
@@ -332,12 +332,12 @@ static int alloc_dispatch_logs(void)
return 0;
}
-#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static inline int alloc_dispatch_logs(void)
{
return 0;
}
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static int alloc_dispatch_log_kmem_cache(void)
{
@@ -353,19 +353,26 @@ static int alloc_dispatch_log_kmem_cache(void)
}
early_initcall(alloc_dispatch_log_kmem_cache);
-static void pSeries_idle(void)
+static void pseries_lpar_idle(void)
{
- /* This would call on the cpuidle framework, and the back-end pseries
- * driver to go to idle states
+ /*
+ * Default handler to go into low thread priority and possibly
+ * low power mode by cedeing processor to hypervisor
*/
- if (cpuidle_idle_call()) {
- /* On error, execute default handler
- * to go into low thread priority and possibly
- * low power mode.
- */
- HMT_low();
- HMT_very_low();
- }
+
+ /* Indicate to hypervisor that we are idle. */
+ get_lppaca()->idle = 1;
+
+ /*
+ * Yield the processor to the hypervisor. We return if
+ * an external interrupt occurs (which are driven prior
+ * to returning here) or if a prod occurs from another
+ * processor. When returning here, external interrupts
+ * are enabled.
+ */
+ cede_processor();
+
+ get_lppaca()->idle = 0;
}
/*
@@ -375,7 +382,7 @@ static void pSeries_idle(void)
* to ever be a problem in practice we can move this into a kernel thread to
* finish off the process later in boot.
*/
-static int __init pSeries_enable_reloc_on_exc(void)
+long pSeries_enable_reloc_on_exc(void)
{
long rc;
unsigned int delay, total_delay = 0;
@@ -397,9 +404,9 @@ static int __init pSeries_enable_reloc_on_exc(void)
mdelay(delay);
}
}
+EXPORT_SYMBOL(pSeries_enable_reloc_on_exc);
-#ifdef CONFIG_KEXEC
-static long pSeries_disable_reloc_on_exc(void)
+long pSeries_disable_reloc_on_exc(void)
{
long rc;
@@ -410,13 +417,14 @@ static long pSeries_disable_reloc_on_exc(void)
mdelay(get_longbusy_msecs(rc));
}
}
+EXPORT_SYMBOL(pSeries_disable_reloc_on_exc);
+#ifdef CONFIG_KEXEC
static void pSeries_machine_kexec(struct kimage *image)
{
long rc;
- if (firmware_has_feature(FW_FEATURE_SET_MODE) &&
- (image->type != KEXEC_TYPE_CRASH)) {
+ if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
rc = pSeries_disable_reloc_on_exc();
if (rc != H_SUCCESS)
pr_warning("Warning: Failed to disable relocation on "
@@ -427,9 +435,35 @@ static void pSeries_machine_kexec(struct kimage *image)
}
#endif
+#ifdef __LITTLE_ENDIAN__
+long pseries_big_endian_exceptions(void)
+{
+ long rc;
+
+ while (1) {
+ rc = enable_big_endian_exceptions();
+ if (!H_IS_LONG_BUSY(rc))
+ return rc;
+ mdelay(get_longbusy_msecs(rc));
+ }
+}
+
+static long pseries_little_endian_exceptions(void)
+{
+ long rc;
+
+ while (1) {
+ rc = enable_little_endian_exceptions();
+ if (!H_IS_LONG_BUSY(rc))
+ return rc;
+ mdelay(get_longbusy_msecs(rc));
+ }
+}
+#endif
+
static void __init pSeries_setup_arch(void)
{
- panic_timeout = 10;
+ set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
/* Discover PIC type and setup ppc_md accordingly */
pseries_discover_pic();
@@ -453,15 +487,16 @@ static void __init pSeries_setup_arch(void)
pSeries_nvram_init();
- if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
vpa_init(boot_cpuid);
- ppc_md.power_save = pSeries_idle;
- }
-
- if (firmware_has_feature(FW_FEATURE_LPAR))
+ ppc_md.power_save = pseries_lpar_idle;
ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
- else
+ } else {
+ /* No special idle routine */
ppc_md.enable_pmcs = power4_enable_pmcs;
+ }
+
+ ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
long rc;
@@ -475,7 +510,11 @@ static void __init pSeries_setup_arch(void)
static int __init pSeries_init_panel(void)
{
/* Manually leave the kernel version on the panel. */
+#ifdef __BIG_ENDIAN__
ppc_md.progress("Linux ppc64\n", 0);
+#else
+ ppc_md.progress("Linux ppc64le\n", 0);
+#endif
ppc_md.progress(init_utsname()->version, 0);
return 0;
@@ -498,6 +537,14 @@ static int pseries_set_xdabr(unsigned long dabr, unsigned long dabrx)
return plpar_hcall_norets(H_SET_XDABR, dabr, dabrx);
}
+static int pseries_set_dawr(unsigned long dawr, unsigned long dawrx)
+{
+ /* PAPR says we can't set HYP */
+ dawrx &= ~DAWRX_HYP;
+
+ return plapr_set_watchpoint0(dawr, dawrx);
+}
+
#define CMO_CHARACTERISTICS_TOKEN 44
#define CMO_MAXLENGTH 1026
@@ -519,7 +566,7 @@ void pSeries_cmo_feature_init(void)
{
char *ptr, *key, *value, *end;
int call_status;
- int page_order = IOMMU_PAGE_SHIFT;
+ int page_order = IOMMU_PAGE_SHIFT_4K;
pr_debug(" -> fw_cmo_feature_init()\n");
spin_lock(&rtas_data_buf_lock);
@@ -604,6 +651,9 @@ static void __init pSeries_init_early(void)
else if (firmware_has_feature(FW_FEATURE_DABR))
ppc_md.set_dabr = pseries_set_dabr;
+ if (firmware_has_feature(FW_FEATURE_SET_MODE))
+ ppc_md.set_dawr = pseries_set_dawr;
+
pSeries_cmo_feature_init();
iommu_init_early_pSeries();
@@ -614,31 +664,45 @@ static void __init pSeries_init_early(void)
* Called very early, MMU is off, device-tree isn't unflattened
*/
-static int __init pSeries_probe_hypertas(unsigned long node,
- const char *uname, int depth,
- void *data)
+static int __init pseries_probe_fw_features(unsigned long node,
+ const char *uname, int depth,
+ void *data)
{
- const char *hypertas;
- unsigned long len;
+ const char *prop;
+ int len;
+ static int hypertas_found;
+ static int vec5_found;
- if (depth != 1 ||
- (strcmp(uname, "rtas") != 0 && strcmp(uname, "rtas@0") != 0))
+ if (depth != 1)
return 0;
- hypertas = of_get_flat_dt_prop(node, "ibm,hypertas-functions", &len);
- if (!hypertas)
- return 1;
+ if (!strcmp(uname, "rtas") || !strcmp(uname, "rtas@0")) {
+ prop = of_get_flat_dt_prop(node, "ibm,hypertas-functions",
+ &len);
+ if (prop) {
+ powerpc_firmware_features |= FW_FEATURE_LPAR;
+ fw_hypertas_feature_init(prop, len);
+ }
+
+ hypertas_found = 1;
+ }
- powerpc_firmware_features |= FW_FEATURE_LPAR;
- fw_feature_init(hypertas, len);
+ if (!strcmp(uname, "chosen")) {
+ prop = of_get_flat_dt_prop(node, "ibm,architecture-vec-5",
+ &len);
+ if (prop)
+ fw_vec5_feature_init(prop, len);
- return 1;
+ vec5_found = 1;
+ }
+
+ return hypertas_found && vec5_found;
}
static int __init pSeries_probe(void)
{
unsigned long root = of_get_flat_dt_root();
- char *dtype = of_get_flat_dt_prop(root, "device_type", NULL);
+ const char *dtype = of_get_flat_dt_prop(root, "device_type", NULL);
if (dtype == NULL)
return 0;
@@ -655,7 +719,23 @@ static int __init pSeries_probe(void)
pr_debug("pSeries detected, looking for LPAR capability...\n");
/* Now try to figure out if we are running on LPAR */
- of_scan_flat_dt(pSeries_probe_hypertas, NULL);
+ of_scan_flat_dt(pseries_probe_fw_features, NULL);
+
+#ifdef __LITTLE_ENDIAN__
+ if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
+ long rc;
+ /*
+ * Tell the hypervisor that we want our exceptions to
+ * be taken in little endian mode. If this fails we don't
+ * want to use BUG() because it will trigger an exception.
+ */
+ rc = pseries_little_endian_exceptions();
+ if (rc) {
+ ppc_md.progress("H_SET_MODE LE exception fail", 0);
+ panic("Could not enable little endian exceptions");
+ }
+ }
+#endif
if (firmware_has_feature(FW_FEATURE_LPAR))
hpte_init_lpar();
@@ -730,4 +810,7 @@ define_machine(pseries) {
#ifdef CONFIG_KEXEC
.machine_kexec = pSeries_machine_kexec,
#endif
+#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+ .memory_block_size = pseries_memory_block_size,
+#endif
};
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 80cd0be71e0..a3555b10c1a 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -42,8 +42,10 @@
#include <asm/vdso_datapage.h>
#include <asm/cputhreads.h>
#include <asm/xics.h>
+#include <asm/dbell.h>
+#include <asm/plpar_wrappers.h>
+#include <asm/code-patching.h>
-#include "plpar_wrappers.h"
#include "pseries.h"
#include "offline_states.h"
@@ -54,6 +56,11 @@
*/
static cpumask_var_t of_spin_mask;
+/*
+ * If we multiplex IPI mechanisms, store the appropriate XICS IPI mechanism here
+ */
+static void (*xics_cause_ipi)(int cpu, unsigned long data);
+
/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */
int smp_query_cpu_stopped(unsigned int pcpu)
{
@@ -90,8 +97,8 @@ int smp_query_cpu_stopped(unsigned int pcpu)
static inline int smp_startup_cpu(unsigned int lcpu)
{
int status;
- unsigned long start_here = __pa((u32)*((unsigned long *)
- generic_secondary_smp_init));
+ unsigned long start_here =
+ __pa(ppc_function_entry(generic_secondary_smp_init));
unsigned int pcpu;
int start_cpu;
@@ -137,6 +144,8 @@ static void smp_xics_setup_cpu(int cpu)
{
if (cpu != boot_cpuid)
xics_setup_cpu();
+ if (cpu_has_feature(CPU_FTR_DBELL))
+ doorbell_setup_this_cpu();
if (firmware_has_feature(FW_FEATURE_SPLPAR))
vpa_init(cpu);
@@ -179,20 +188,25 @@ static int smp_pSeries_kick_cpu(int nr)
return 0;
}
-static int smp_pSeries_cpu_bootable(unsigned int nr)
+/* Only used on systems that support multiple IPI mechanisms */
+static void pSeries_cause_ipi_mux(int cpu, unsigned long data)
{
- /* Special case - we inhibit secondary thread startup
- * during boot if the user requests it.
- */
- if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
- if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
- return 0;
- if (smt_enabled_at_boot
- && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
- return 0;
+ if (cpumask_test_cpu(cpu, cpu_sibling_mask(smp_processor_id())))
+ doorbell_cause_ipi(cpu, data);
+ else
+ xics_cause_ipi(cpu, data);
+}
+
+static __init int pSeries_smp_probe(void)
+{
+ int ret = xics_smp_probe();
+
+ if (cpu_has_feature(CPU_FTR_DBELL)) {
+ xics_cause_ipi = smp_ops->cause_ipi;
+ smp_ops->cause_ipi = pSeries_cause_ipi_mux;
}
- return 1;
+ return ret;
}
static struct smp_ops_t pSeries_mpic_smp_ops = {
@@ -204,11 +218,11 @@ static struct smp_ops_t pSeries_mpic_smp_ops = {
static struct smp_ops_t pSeries_xics_smp_ops = {
.message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
- .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */
- .probe = xics_smp_probe,
+ .cause_ipi = NULL, /* Filled at runtime by pSeries_smp_probe() */
+ .probe = pSeries_smp_probe,
.kick_cpu = smp_pSeries_kick_cpu,
.setup_cpu = smp_xics_setup_cpu,
- .cpu_bootable = smp_pSeries_cpu_bootable,
+ .cpu_bootable = smp_generic_cpu_bootable,
};
/* This is called very early */
@@ -220,18 +234,24 @@ static void __init smp_init_pseries(void)
alloc_bootmem_cpumask_var(&of_spin_mask);
- /* Mark threads which are still spinning in hold loops. */
- if (cpu_has_feature(CPU_FTR_SMT)) {
- for_each_present_cpu(i) {
- if (cpu_thread_in_core(i) == 0)
- cpumask_set_cpu(i, of_spin_mask);
- }
- } else {
- cpumask_copy(of_spin_mask, cpu_present_mask);
+ /*
+ * Mark threads which are still spinning in hold loops
+ *
+ * We know prom_init will not have started them if RTAS supports
+ * query-cpu-stopped-state.
+ */
+ if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) {
+ if (cpu_has_feature(CPU_FTR_SMT)) {
+ for_each_present_cpu(i) {
+ if (cpu_thread_in_core(i) == 0)
+ cpumask_set_cpu(i, of_spin_mask);
+ }
+ } else
+ cpumask_copy(of_spin_mask, cpu_present_mask);
+
+ cpumask_clear_cpu(boot_cpuid, of_spin_mask);
}
- cpumask_clear_cpu(boot_cpuid, of_spin_mask);
-
/* Non-lpar has additional take/give timebase */
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
smp_ops->give_timebase = rtas_give_timebase;
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index 47226e04126..b87b97849d4 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -16,6 +16,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/suspend.h>
#include <linux/stat.h>
@@ -25,6 +26,7 @@
#include <asm/mmu.h>
#include <asm/rtas.h>
#include <asm/topology.h>
+#include "../../kernel/cacheinfo.h"
static u64 stream_id;
static struct device suspend_dev;
@@ -78,6 +80,23 @@ static int pseries_suspend_cpu(void)
}
/**
+ * pseries_suspend_enable_irqs
+ *
+ * Post suspend configuration updates
+ *
+ **/
+static void pseries_suspend_enable_irqs(void)
+{
+ /*
+ * Update configuration which can be modified based on device tree
+ * changes during resume.
+ */
+ cacheinfo_cpu_offline(smp_processor_id());
+ post_mobility_fixup();
+ cacheinfo_cpu_online(smp_processor_id());
+}
+
+/**
* pseries_suspend_enter - Final phase of hibernation
*
* Return value:
@@ -105,7 +124,7 @@ static int pseries_prepare_late(void)
atomic_set(&suspend_data.done, 0);
atomic_set(&suspend_data.error, 0);
suspend_data.complete = &suspend_work;
- INIT_COMPLETION(suspend_work);
+ reinit_completion(&suspend_work);
return 0;
}
@@ -126,11 +145,15 @@ static ssize_t store_hibernate(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ cpumask_var_t offline_mask;
int rc;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
+ return -ENOMEM;
+
stream_id = simple_strtoul(buf, NULL, 16);
do {
@@ -140,19 +163,59 @@ static ssize_t store_hibernate(struct device *dev,
} while (rc == -EAGAIN);
if (!rc) {
+ /* All present CPUs must be online */
+ cpumask_andnot(offline_mask, cpu_present_mask,
+ cpu_online_mask);
+ rc = rtas_online_cpus_mask(offline_mask);
+ if (rc) {
+ pr_err("%s: Could not bring present CPUs online.\n",
+ __func__);
+ goto out;
+ }
+
stop_topology_update();
rc = pm_suspend(PM_SUSPEND_MEM);
start_topology_update();
+
+ /* Take down CPUs not online prior to suspend */
+ if (!rtas_offline_cpus_mask(offline_mask))
+ pr_warn("%s: Could not restore CPUs to offline "
+ "state.\n", __func__);
}
stream_id = 0;
if (!rc)
rc = count;
+out:
+ free_cpumask_var(offline_mask);
return rc;
}
-static DEVICE_ATTR(hibernate, S_IWUSR, NULL, store_hibernate);
+#define USER_DT_UPDATE 0
+#define KERN_DT_UPDATE 1
+
+/**
+ * show_hibernate - Report device tree update responsibilty
+ * @dev: subsys root device
+ * @attr: device attribute struct
+ * @buf: buffer
+ *
+ * Report whether a device tree update is performed by the kernel after a
+ * resume, or if drmgr must coordinate the update from user space.
+ *
+ * Return value:
+ * 0 if drmgr is to initiate update, and 1 otherwise
+ **/
+static ssize_t show_hibernate(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", KERN_DT_UPDATE);
+}
+
+static DEVICE_ATTR(hibernate, S_IWUSR | S_IRUGO,
+ show_hibernate, store_hibernate);
static struct bus_type suspend_subsys = {
.name = "power",
@@ -213,6 +276,7 @@ static int __init pseries_suspend_init(void)
return rc;
ppc_md.suspend_disable_cpu = pseries_suspend_cpu;
+ ppc_md.suspend_enable_irqs = pseries_suspend_enable_irqs;
suspend_set_ops(&pseries_suspend_ops);
return 0;
}