aboutsummaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/.gitignore4
-rw-r--r--drivers/pci/Kconfig70
-rw-r--r--drivers/pci/Makefile41
-rw-r--r--drivers/pci/access.c315
-rw-r--r--drivers/pci/ats.c375
-rw-r--r--drivers/pci/bus.c323
-rw-r--r--drivers/pci/dmar.c1461
-rw-r--r--drivers/pci/host-bridge.c84
-rw-r--r--drivers/pci/host/Kconfig49
-rw-r--r--drivers/pci/host/Makefile8
-rw-r--r--drivers/pci/host/pci-exynos.c667
-rw-r--r--drivers/pci/host/pci-host-generic.c388
-rw-r--r--drivers/pci/host/pci-imx6.c616
-rw-r--r--drivers/pci/host/pci-mvebu.c1097
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c426
-rw-r--r--drivers/pci/host/pci-tegra.c1719
-rw-r--r--drivers/pci/host/pcie-designware.c835
-rw-r--r--drivers/pci/host/pcie-designware.h76
-rw-r--r--drivers/pci/host/pcie-rcar.c1006
-rw-r--r--drivers/pci/hotplug-pci.c31
-rw-r--r--drivers/pci/hotplug.c37
-rw-r--r--drivers/pci/hotplug/Kconfig53
-rw-r--r--drivers/pci/hotplug/Makefile6
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c21
-rw-r--r--drivers/pci/hotplug/acpiphp.h131
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c123
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c1657
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c76
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h50
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c49
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c97
-rw-r--r--drivers/pci/hotplug/cpcihp_generic.c38
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.c32
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.h18
-rw-r--r--drivers/pci/hotplug/cpqphp.h74
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c55
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c117
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.c33
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.h12
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c49
-rw-r--r--drivers/pci/hotplug/cpqphp_sysfs.c24
-rw-r--r--drivers/pci/hotplug/fakephp.c164
-rw-r--r--drivers/pci/hotplug/ibmphp.h78
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c180
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c127
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c30
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c109
-rw-r--r--drivers/pci/hotplug/ibmphp_res.c91
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c131
-rw-r--r--drivers/pci/hotplug/pciehp.h64
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c23
-rw-r--r--drivers/pci/hotplug/pciehp_core.c102
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c277
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c680
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c108
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c62
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c23
-rw-r--r--drivers/pci/hotplug/rpadlpar.h8
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c30
-rw-r--r--drivers/pci/hotplug/rpaphp.h24
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c44
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c3
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c20
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c214
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c124
-rw-r--r--drivers/pci/hotplug/shpchp.h45
-rw-r--r--drivers/pci/hotplug/shpchp_core.c66
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c49
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c35
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c96
-rw-r--r--drivers/pci/hotplug/shpchp_sysfs.c31
-rw-r--r--drivers/pci/htirq.c37
-rw-r--r--drivers/pci/intel-iommu.c3843
-rw-r--r--drivers/pci/intr_remapping.c798
-rw-r--r--drivers/pci/intr_remapping.h17
-rw-r--r--drivers/pci/ioapic.c24
-rw-r--r--drivers/pci/iov.c463
-rw-r--r--drivers/pci/iova.c427
-rw-r--r--drivers/pci/irq.c13
-rw-r--r--drivers/pci/msi.c626
-rw-r--r--drivers/pci/msi.h30
-rw-r--r--drivers/pci/of.c61
-rw-r--r--drivers/pci/pci-acpi.c299
-rw-r--r--drivers/pci/pci-driver.c542
-rw-r--r--drivers/pci/pci-label.c206
-rw-r--r--drivers/pci/pci-stub.c15
-rw-r--r--drivers/pci/pci-sysfs.c837
-rw-r--r--drivers/pci/pci.c2462
-rw-r--r--drivers/pci/pci.h209
-rw-r--r--drivers/pci/pcie/Kconfig38
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c52
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c33
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h43
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c101
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c244
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c229
-rw-r--r--drivers/pci/pcie/aspm.c282
-rw-r--r--drivers/pci/pcie/pme.c80
-rw-r--r--drivers/pci/pcie/portdrv.h35
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c24
-rw-r--r--drivers/pci/pcie/portdrv_bus.c6
-rw-r--r--drivers/pci/pcie/portdrv_core.c132
-rw-r--r--drivers/pci/pcie/portdrv_pci.c159
-rw-r--r--drivers/pci/probe.c1442
-rw-r--r--drivers/pci/proc.c107
-rw-r--r--drivers/pci/quirks.c1714
-rw-r--r--drivers/pci/remove.c171
-rw-r--r--drivers/pci/rom.c92
-rw-r--r--drivers/pci/search.c214
-rw-r--r--drivers/pci/setup-bus.c1333
-rw-r--r--drivers/pci/setup-irq.c19
-rw-r--r--drivers/pci/setup-res.c311
-rw-r--r--drivers/pci/slot.c42
-rw-r--r--drivers/pci/syscall.c4
-rw-r--r--drivers/pci/vc.c434
-rw-r--r--drivers/pci/vpd.c1
-rw-r--r--drivers/pci/xen-pcifront.c128
117 files changed, 19514 insertions, 14246 deletions
diff --git a/drivers/pci/.gitignore b/drivers/pci/.gitignore
deleted file mode 100644
index f297ca8d313..00000000000
--- a/drivers/pci/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-classlist.h
-devlist.h
-gen-devlist
-
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 5b1630e4e9e..893503fa178 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -1,14 +1,9 @@
#
# PCI configuration
#
-config ARCH_SUPPORTS_MSI
- bool
- default n
-
config PCI_MSI
bool "Message Signaled Interrupts (MSI and MSI-X)"
depends on PCI
- depends on ARCH_SUPPORTS_MSI
help
This allows device drivers to enable MSI (Message Signaled
Interrupts). Message Signaled Interrupts enable a device to
@@ -31,6 +26,19 @@ config PCI_DEBUG
When in doubt, say N.
+config PCI_REALLOC_ENABLE_AUTO
+ bool "Enable PCI resource re-allocation detection"
+ depends on PCI
+ help
+ Say Y here if you want the PCI core to detect if PCI resource
+ re-allocation needs to be enabled. You can always use pci=realloc=on
+ or pci=realloc=off to override it. Note this feature is a no-op
+ unless PCI_IOV support is also enabled; in that case it will
+ automatically re-allocate PCI resources if SR-IOV BARs have not
+ been allocated by the BIOS.
+
+ When in doubt, say N.
+
config PCI_STUB
tristate "PCI Stub driver"
depends on PCI
@@ -43,24 +51,13 @@ config PCI_STUB
config XEN_PCIDEV_FRONTEND
tristate "Xen PCI Frontend"
depends on PCI && X86 && XEN
- select HOTPLUG
select PCI_XEN
+ select XEN_XENBUS_FRONTEND
default y
help
The PCI device frontend driver allows the kernel to import arbitrary
PCI devices from a PCI backend to support PCI driver domains.
-config XEN_PCIDEV_FE_DEBUG
- bool "Xen PCI Frontend debugging"
- depends on XEN_PCIDEV_FRONTEND && PCI_DEBUG
- help
- Say Y here if you want the Xen PCI frontend to produce a bunch of debug
- messages to the system log. Select this if you are having a
- problem with Xen PCI frontend support and want to see more of what is
- going on.
-
- When in doubt, say N.
-
config HT_IRQ
bool "Interrupts on hypertransport devices"
default y
@@ -70,9 +67,13 @@ config HT_IRQ
If unsure say Y.
+config PCI_ATS
+ bool
+
config PCI_IOV
bool "PCI IOV support"
depends on PCI
+ select PCI_ATS
help
I/O Virtualization is a PCI feature supported by some devices
which allows them to create virtual devices which share their
@@ -80,9 +81,38 @@ config PCI_IOV
If unsure, say N.
+config PCI_PRI
+ bool "PCI PRI support"
+ depends on PCI
+ select PCI_ATS
+ help
+ PRI is the PCI Page Request Interface. It allows PCI devices that are
+ behind an IOMMU to recover from page faults.
+
+ If unsure, say N.
+
+config PCI_PASID
+ bool "PCI PASID support"
+ depends on PCI
+ select PCI_ATS
+ help
+ Process Address Space Identifiers (PASIDs) can be used by PCI devices
+ to access more than one IO address space at the same time. To make
+ use of this feature an IOMMU is required which also supports PASIDs.
+ Select this option if you have such an IOMMU and want to compile the
+ driver for it into your kernel.
+
+ If unsure, say N.
+
config PCI_IOAPIC
- bool
+ bool "PCI IO-APIC hotplug support" if X86
depends on PCI
depends on ACPI
- depends on HOTPLUG
- default y
+ depends on X86_IO_APIC
+ default !X86
+
+config PCI_LABEL
+ def_bool y if (DMI || ACPI)
+ select NLS
+
+source "drivers/pci/host/Kconfig"
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index f01e344cf4b..e04fe2d9df3 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -2,9 +2,9 @@
# Makefile for the PCI bus specific drivers.
#
-obj-y += access.o bus.o probe.o remove.o pci.o \
+obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
- irq.o vpd.o
+ irq.o vpd.o setup-bus.o vc.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
@@ -15,8 +15,6 @@ obj-$(CONFIG_PCIEPORTBUS) += pcie/
obj-$(CONFIG_PCI_IOAPIC) += ioapic.o
-obj-$(CONFIG_HOTPLUG) += hotplug.o
-
# Build the PCI Hotplug drivers if we were asked to
obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
ifdef CONFIG_HOTPLUG_PCI
@@ -29,37 +27,29 @@ obj-$(CONFIG_PCI_MSI) += msi.o
# Build the Hypertransport interrupt support
obj-$(CONFIG_HT_IRQ) += htirq.o
-# Build Intel IOMMU support
-obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
-
-obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
-
+obj-$(CONFIG_PCI_ATS) += ats.o
obj-$(CONFIG_PCI_IOV) += iov.o
#
# Some architectures use the generic PCI setup functions
#
-obj-$(CONFIG_X86) += setup-bus.o
-obj-$(CONFIG_ALPHA) += setup-bus.o setup-irq.o
-obj-$(CONFIG_ARM) += setup-bus.o setup-irq.o
-obj-$(CONFIG_PARISC) += setup-bus.o
-obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o
-obj-$(CONFIG_PPC) += setup-bus.o
-obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
-obj-$(CONFIG_X86_VISWS) += setup-irq.o
-obj-$(CONFIG_MN10300) += setup-bus.o
-obj-$(CONFIG_MICROBLAZE) += setup-bus.o
+obj-$(CONFIG_ALPHA) += setup-irq.o
+obj-$(CONFIG_ARM) += setup-irq.o
+obj-$(CONFIG_UNICORE32) += setup-irq.o
+obj-$(CONFIG_SUPERH) += setup-irq.o
+obj-$(CONFIG_MIPS) += setup-irq.o
+obj-$(CONFIG_TILE) += setup-irq.o
+obj-$(CONFIG_SPARC_LEON) += setup-irq.o
+obj-$(CONFIG_M68K) += setup-irq.o
#
# ACPI Related PCI FW Functions
+# ACPI _DSM provided firmware instance and string name
#
obj-$(CONFIG_ACPI) += pci-acpi.o
# SMBIOS provided firmware instance and labels
-obj-$(CONFIG_DMI) += pci-label.o
-
-# Cardbus & CompactPCI use setup-bus
-obj-$(CONFIG_HOTPLUG) += setup-bus.o
+obj-$(CONFIG_PCI_LABEL) += pci-label.o
obj-$(CONFIG_PCI_SYSCALL) += syscall.o
@@ -67,4 +57,9 @@ obj-$(CONFIG_PCI_STUB) += pci-stub.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
+obj-$(CONFIG_OF) += of.o
+
ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
+
+# PCI host controller drivers
+obj-y += host/
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 531bc697d80..d292d7cb341 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -13,7 +13,7 @@
* configuration space.
*/
-static DEFINE_RAW_SPINLOCK(pci_lock);
+DEFINE_RAW_SPINLOCK(pci_lock);
/*
* Wrappers for all PCI configuration access functions. They just check
@@ -127,51 +127,59 @@ EXPORT_SYMBOL(pci_write_vpd);
* We have a bit per device to indicate it's blocked and a global wait queue
* for callers to sleep on until devices are unblocked.
*/
-static DECLARE_WAIT_QUEUE_HEAD(pci_ucfg_wait);
+static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
-static noinline void pci_wait_ucfg(struct pci_dev *dev)
+static noinline void pci_wait_cfg(struct pci_dev *dev)
{
DECLARE_WAITQUEUE(wait, current);
- __add_wait_queue(&pci_ucfg_wait, &wait);
+ __add_wait_queue(&pci_cfg_wait, &wait);
do {
set_current_state(TASK_UNINTERRUPTIBLE);
raw_spin_unlock_irq(&pci_lock);
schedule();
raw_spin_lock_irq(&pci_lock);
- } while (dev->block_ucfg_access);
- __remove_wait_queue(&pci_ucfg_wait, &wait);
+ } while (dev->block_cfg_access);
+ __remove_wait_queue(&pci_cfg_wait, &wait);
}
+/* Returns 0 on success, negative values indicate error. */
#define PCI_USER_READ_CONFIG(size,type) \
int pci_user_read_config_##size \
(struct pci_dev *dev, int pos, type *val) \
{ \
- int ret = 0; \
+ int ret = PCIBIOS_SUCCESSFUL; \
u32 data = -1; \
- if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
+ if (PCI_##size##_BAD) \
+ return -EINVAL; \
raw_spin_lock_irq(&pci_lock); \
- if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
+ if (unlikely(dev->block_cfg_access)) \
+ pci_wait_cfg(dev); \
ret = dev->bus->ops->read(dev->bus, dev->devfn, \
pos, sizeof(type), &data); \
raw_spin_unlock_irq(&pci_lock); \
*val = (type)data; \
- return ret; \
-}
+ return pcibios_err_to_errno(ret); \
+} \
+EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
+/* Returns 0 on success, negative values indicate error. */
#define PCI_USER_WRITE_CONFIG(size,type) \
int pci_user_write_config_##size \
(struct pci_dev *dev, int pos, type val) \
{ \
- int ret = -EIO; \
- if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
+ int ret = PCIBIOS_SUCCESSFUL; \
+ if (PCI_##size##_BAD) \
+ return -EINVAL; \
raw_spin_lock_irq(&pci_lock); \
- if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
+ if (unlikely(dev->block_cfg_access)) \
+ pci_wait_cfg(dev); \
ret = dev->bus->ops->write(dev->bus, dev->devfn, \
pos, sizeof(type), val); \
raw_spin_unlock_irq(&pci_lock); \
- return ret; \
-}
+ return pcibios_err_to_errno(ret); \
+} \
+EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
PCI_USER_READ_CONFIG(byte, u8)
PCI_USER_READ_CONFIG(word, u16)
@@ -197,6 +205,8 @@ struct pci_vpd_pci22 {
* This code has to spin since there is no other notification from the PCI
* hardware. Since the VPD is often implemented by serial attachment to an
* EEPROM, it may take many milliseconds to complete.
+ *
+ * Returns 0 on success, negative values indicate error.
*/
static int pci_vpd_pci22_wait(struct pci_dev *dev)
{
@@ -212,7 +222,7 @@ static int pci_vpd_pci22_wait(struct pci_dev *dev)
for (;;) {
ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
&status);
- if (ret)
+ if (ret < 0)
return ret;
if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
@@ -221,10 +231,7 @@ static int pci_vpd_pci22_wait(struct pci_dev *dev)
}
if (time_after(jiffies, timeout)) {
- dev_printk(KERN_DEBUG, &dev->dev,
- "vpd r/w failed. This is likely a firmware "
- "bug on this device. Contact the card "
- "vendor for a firmware update.");
+ dev_printk(KERN_DEBUG, &dev->dev, "vpd r/w failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
return -ETIMEDOUT;
}
if (fatal_signal_pending(current))
@@ -324,6 +331,8 @@ static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count
vpd->busy = true;
vpd->flag = 0;
ret = pci_vpd_pci22_wait(dev);
+ if (ret < 0)
+ break;
pos += sizeof(u32);
}
@@ -365,60 +374,56 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
}
/**
- * pci_vpd_truncate - Set available Vital Product Data size
+ * pci_cfg_access_lock - Lock PCI config reads/writes
* @dev: pci device struct
- * @size: available memory in bytes
*
- * Adjust size of available VPD area.
+ * When access is locked, any userspace reads or writes to config
+ * space and concurrent lock requests will sleep until access is
+ * allowed via pci_cfg_access_unlocked again.
*/
-int pci_vpd_truncate(struct pci_dev *dev, size_t size)
+void pci_cfg_access_lock(struct pci_dev *dev)
{
- if (!dev->vpd)
- return -EINVAL;
-
- /* limited by the access method */
- if (size > dev->vpd->len)
- return -EINVAL;
-
- dev->vpd->len = size;
- if (dev->vpd->attr)
- dev->vpd->attr->size = size;
+ might_sleep();
- return 0;
+ raw_spin_lock_irq(&pci_lock);
+ if (dev->block_cfg_access)
+ pci_wait_cfg(dev);
+ dev->block_cfg_access = 1;
+ raw_spin_unlock_irq(&pci_lock);
}
-EXPORT_SYMBOL(pci_vpd_truncate);
+EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
/**
- * pci_block_user_cfg_access - Block userspace PCI config reads/writes
+ * pci_cfg_access_trylock - try to lock PCI config reads/writes
* @dev: pci device struct
*
- * When user access is blocked, any reads or writes to config space will
- * sleep until access is unblocked again. We don't allow nesting of
- * block/unblock calls.
+ * Same as pci_cfg_access_lock, but will return 0 if access is
+ * already locked, 1 otherwise. This function can be used from
+ * atomic contexts.
*/
-void pci_block_user_cfg_access(struct pci_dev *dev)
+bool pci_cfg_access_trylock(struct pci_dev *dev)
{
unsigned long flags;
- int was_blocked;
+ bool locked = true;
raw_spin_lock_irqsave(&pci_lock, flags);
- was_blocked = dev->block_ucfg_access;
- dev->block_ucfg_access = 1;
+ if (dev->block_cfg_access)
+ locked = false;
+ else
+ dev->block_cfg_access = 1;
raw_spin_unlock_irqrestore(&pci_lock, flags);
- /* If we BUG() inside the pci_lock, we're guaranteed to hose
- * the machine */
- BUG_ON(was_blocked);
+ return locked;
}
-EXPORT_SYMBOL_GPL(pci_block_user_cfg_access);
+EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
/**
- * pci_unblock_user_cfg_access - Unblock userspace PCI config reads/writes
+ * pci_cfg_access_unlock - Unlock PCI config reads/writes
* @dev: pci device struct
*
- * This function allows userspace PCI config accesses to resume.
+ * This function allows PCI config accesses to resume.
*/
-void pci_unblock_user_cfg_access(struct pci_dev *dev)
+void pci_cfg_access_unlock(struct pci_dev *dev)
{
unsigned long flags;
@@ -426,10 +431,208 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev)
/* This indicates a problem in the caller, but we don't need
* to kill them, unlike a double-block above. */
- WARN_ON(!dev->block_ucfg_access);
+ WARN_ON(!dev->block_cfg_access);
- dev->block_ucfg_access = 0;
- wake_up_all(&pci_ucfg_wait);
+ dev->block_cfg_access = 0;
+ wake_up_all(&pci_cfg_wait);
raw_spin_unlock_irqrestore(&pci_lock, flags);
}
-EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access);
+EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
+
+static inline int pcie_cap_version(const struct pci_dev *dev)
+{
+ return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
+}
+
+static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+
+ return type == PCI_EXP_TYPE_ENDPOINT ||
+ type == PCI_EXP_TYPE_LEG_END ||
+ type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_UPSTREAM ||
+ type == PCI_EXP_TYPE_DOWNSTREAM ||
+ type == PCI_EXP_TYPE_PCI_BRIDGE ||
+ type == PCI_EXP_TYPE_PCIE_BRIDGE;
+}
+
+static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+
+ return (type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_DOWNSTREAM) &&
+ pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
+}
+
+static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+
+ return type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_RC_EC;
+}
+
+static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
+{
+ if (!pci_is_pcie(dev))
+ return false;
+
+ switch (pos) {
+ case PCI_EXP_FLAGS:
+ return true;
+ case PCI_EXP_DEVCAP:
+ case PCI_EXP_DEVCTL:
+ case PCI_EXP_DEVSTA:
+ return true;
+ case PCI_EXP_LNKCAP:
+ case PCI_EXP_LNKCTL:
+ case PCI_EXP_LNKSTA:
+ return pcie_cap_has_lnkctl(dev);
+ case PCI_EXP_SLTCAP:
+ case PCI_EXP_SLTCTL:
+ case PCI_EXP_SLTSTA:
+ return pcie_cap_has_sltctl(dev);
+ case PCI_EXP_RTCTL:
+ case PCI_EXP_RTCAP:
+ case PCI_EXP_RTSTA:
+ return pcie_cap_has_rtctl(dev);
+ case PCI_EXP_DEVCAP2:
+ case PCI_EXP_DEVCTL2:
+ case PCI_EXP_LNKCAP2:
+ case PCI_EXP_LNKCTL2:
+ case PCI_EXP_LNKSTA2:
+ return pcie_cap_version(dev) > 1;
+ default:
+ return false;
+ }
+}
+
+/*
+ * Note that these accessor functions are only for the "PCI Express
+ * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
+ * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
+ */
+int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
+{
+ int ret;
+
+ *val = 0;
+ if (pos & 1)
+ return -EINVAL;
+
+ if (pcie_capability_reg_implemented(dev, pos)) {
+ ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
+ /*
+ * Reset *val to 0 if pci_read_config_word() fails, it may
+ * have been written as 0xFFFF if hardware error happens
+ * during pci_read_config_word().
+ */
+ if (ret)
+ *val = 0;
+ return ret;
+ }
+
+ /*
+ * For Functions that do not implement the Slot Capabilities,
+ * Slot Status, and Slot Control registers, these spaces must
+ * be hardwired to 0b, with the exception of the Presence Detect
+ * State bit in the Slot Status register of Downstream Ports,
+ * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
+ */
+ if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
+ *val = PCI_EXP_SLTSTA_PDS;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pcie_capability_read_word);
+
+int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
+{
+ int ret;
+
+ *val = 0;
+ if (pos & 3)
+ return -EINVAL;
+
+ if (pcie_capability_reg_implemented(dev, pos)) {
+ ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
+ /*
+ * Reset *val to 0 if pci_read_config_dword() fails, it may
+ * have been written as 0xFFFFFFFF if hardware error happens
+ * during pci_read_config_dword().
+ */
+ if (ret)
+ *val = 0;
+ return ret;
+ }
+
+ if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL &&
+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
+ *val = PCI_EXP_SLTSTA_PDS;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pcie_capability_read_dword);
+
+int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
+{
+ if (pos & 1)
+ return -EINVAL;
+
+ if (!pcie_capability_reg_implemented(dev, pos))
+ return 0;
+
+ return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
+}
+EXPORT_SYMBOL(pcie_capability_write_word);
+
+int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
+{
+ if (pos & 3)
+ return -EINVAL;
+
+ if (!pcie_capability_reg_implemented(dev, pos))
+ return 0;
+
+ return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
+}
+EXPORT_SYMBOL(pcie_capability_write_dword);
+
+int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+ u16 clear, u16 set)
+{
+ int ret;
+ u16 val;
+
+ ret = pcie_capability_read_word(dev, pos, &val);
+ if (!ret) {
+ val &= ~clear;
+ val |= set;
+ ret = pcie_capability_write_word(dev, pos, val);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(pcie_capability_clear_and_set_word);
+
+int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
+ u32 clear, u32 set)
+{
+ int ret;
+ u32 val;
+
+ ret = pcie_capability_read_dword(dev, pos, &val);
+ if (!ret) {
+ val &= ~clear;
+ val |= set;
+ ret = pcie_capability_write_dword(dev, pos, val);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
new file mode 100644
index 00000000000..a8099d4d0c9
--- /dev/null
+++ b/drivers/pci/ats.c
@@ -0,0 +1,375 @@
+/*
+ * drivers/pci/ats.c
+ *
+ * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
+ * Copyright (C) 2011 Advanced Micro Devices,
+ *
+ * PCI Express I/O Virtualization (IOV) support.
+ * Address Translation Service 1.0
+ * Page Request Interface added by Joerg Roedel <joerg.roedel@amd.com>
+ * PASID support added by Joerg Roedel <joerg.roedel@amd.com>
+ */
+
+#include <linux/export.h>
+#include <linux/pci-ats.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "pci.h"
+
+static int ats_alloc_one(struct pci_dev *dev, int ps)
+{
+ int pos;
+ u16 cap;
+ struct pci_ats *ats;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
+ if (!pos)
+ return -ENODEV;
+
+ ats = kzalloc(sizeof(*ats), GFP_KERNEL);
+ if (!ats)
+ return -ENOMEM;
+
+ ats->pos = pos;
+ ats->stu = ps;
+ pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
+ ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
+ PCI_ATS_MAX_QDEP;
+ dev->ats = ats;
+
+ return 0;
+}
+
+static void ats_free_one(struct pci_dev *dev)
+{
+ kfree(dev->ats);
+ dev->ats = NULL;
+}
+
+/**
+ * pci_enable_ats - enable the ATS capability
+ * @dev: the PCI device
+ * @ps: the IOMMU page shift
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int pci_enable_ats(struct pci_dev *dev, int ps)
+{
+ int rc;
+ u16 ctrl;
+
+ BUG_ON(dev->ats && dev->ats->is_enabled);
+
+ if (ps < PCI_ATS_MIN_STU)
+ return -EINVAL;
+
+ if (dev->is_physfn || dev->is_virtfn) {
+ struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
+
+ mutex_lock(&pdev->sriov->lock);
+ if (pdev->ats)
+ rc = pdev->ats->stu == ps ? 0 : -EINVAL;
+ else
+ rc = ats_alloc_one(pdev, ps);
+
+ if (!rc)
+ pdev->ats->ref_cnt++;
+ mutex_unlock(&pdev->sriov->lock);
+ if (rc)
+ return rc;
+ }
+
+ if (!dev->is_physfn) {
+ rc = ats_alloc_one(dev, ps);
+ if (rc)
+ return rc;
+ }
+
+ ctrl = PCI_ATS_CTRL_ENABLE;
+ if (!dev->is_virtfn)
+ ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+
+ dev->ats->is_enabled = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_enable_ats);
+
+/**
+ * pci_disable_ats - disable the ATS capability
+ * @dev: the PCI device
+ */
+void pci_disable_ats(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ BUG_ON(!dev->ats || !dev->ats->is_enabled);
+
+ pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
+ ctrl &= ~PCI_ATS_CTRL_ENABLE;
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+
+ dev->ats->is_enabled = 0;
+
+ if (dev->is_physfn || dev->is_virtfn) {
+ struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
+
+ mutex_lock(&pdev->sriov->lock);
+ pdev->ats->ref_cnt--;
+ if (!pdev->ats->ref_cnt)
+ ats_free_one(pdev);
+ mutex_unlock(&pdev->sriov->lock);
+ }
+
+ if (!dev->is_physfn)
+ ats_free_one(dev);
+}
+EXPORT_SYMBOL_GPL(pci_disable_ats);
+
+void pci_restore_ats_state(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ if (!pci_ats_enabled(dev))
+ return;
+ if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS))
+ BUG();
+
+ ctrl = PCI_ATS_CTRL_ENABLE;
+ if (!dev->is_virtfn)
+ ctrl |= PCI_ATS_CTRL_STU(dev->ats->stu - PCI_ATS_MIN_STU);
+
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+}
+EXPORT_SYMBOL_GPL(pci_restore_ats_state);
+
+/**
+ * pci_ats_queue_depth - query the ATS Invalidate Queue Depth
+ * @dev: the PCI device
+ *
+ * Returns the queue depth on success, or negative on failure.
+ *
+ * The ATS spec uses 0 in the Invalidate Queue Depth field to
+ * indicate that the function can accept 32 Invalidate Request.
+ * But here we use the `real' values (i.e. 1~32) for the Queue
+ * Depth; and 0 indicates the function shares the Queue with
+ * other functions (doesn't exclusively own a Queue).
+ */
+int pci_ats_queue_depth(struct pci_dev *dev)
+{
+ int pos;
+ u16 cap;
+
+ if (dev->is_virtfn)
+ return 0;
+
+ if (dev->ats)
+ return dev->ats->qdep;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
+
+ return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
+ PCI_ATS_MAX_QDEP;
+}
+EXPORT_SYMBOL_GPL(pci_ats_queue_depth);
+
+#ifdef CONFIG_PCI_PRI
+/**
+ * pci_enable_pri - Enable PRI capability
+ * @ pdev: PCI device structure
+ *
+ * Returns 0 on success, negative value on error
+ */
+int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
+{
+ u16 control, status;
+ u32 max_requests;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
+ if ((control & PCI_PRI_CTRL_ENABLE) ||
+ !(status & PCI_PRI_STATUS_STOPPED))
+ return -EBUSY;
+
+ pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests);
+ reqs = min(max_requests, reqs);
+ pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
+
+ control |= PCI_PRI_CTRL_ENABLE;
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_enable_pri);
+
+/**
+ * pci_disable_pri - Disable PRI capability
+ * @pdev: PCI device structure
+ *
+ * Only clears the enabled-bit, regardless of its former value
+ */
+void pci_disable_pri(struct pci_dev *pdev)
+{
+ u16 control;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+ if (!pos)
+ return;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ control &= ~PCI_PRI_CTRL_ENABLE;
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+}
+EXPORT_SYMBOL_GPL(pci_disable_pri);
+
+/**
+ * pci_reset_pri - Resets device's PRI state
+ * @pdev: PCI device structure
+ *
+ * The PRI capability must be disabled before this function is called.
+ * Returns 0 on success, negative value on error.
+ */
+int pci_reset_pri(struct pci_dev *pdev)
+{
+ u16 control;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ if (control & PCI_PRI_CTRL_ENABLE)
+ return -EBUSY;
+
+ control |= PCI_PRI_CTRL_RESET;
+
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_reset_pri);
+#endif /* CONFIG_PCI_PRI */
+
+#ifdef CONFIG_PCI_PASID
+/**
+ * pci_enable_pasid - Enable the PASID capability
+ * @pdev: PCI device structure
+ * @features: Features to enable
+ *
+ * Returns 0 on success, negative value on error. This function checks
+ * whether the features are actually supported by the device and returns
+ * an error if not.
+ */
+int pci_enable_pasid(struct pci_dev *pdev, int features)
+{
+ u16 control, supported;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PASID_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
+
+ if (control & PCI_PASID_CTRL_ENABLE)
+ return -EINVAL;
+
+ supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
+
+ /* User wants to enable anything unsupported? */
+ if ((supported & features) != features)
+ return -EINVAL;
+
+ control = PCI_PASID_CTRL_ENABLE | features;
+
+ pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_enable_pasid);
+
+/**
+ * pci_disable_pasid - Disable the PASID capability
+ * @pdev: PCI device structure
+ *
+ */
+void pci_disable_pasid(struct pci_dev *pdev)
+{
+ u16 control = 0;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+ if (!pos)
+ return;
+
+ pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+}
+EXPORT_SYMBOL_GPL(pci_disable_pasid);
+
+/**
+ * pci_pasid_features - Check which PASID features are supported
+ * @pdev: PCI device structure
+ *
+ * Returns a negative value when no PASI capability is present.
+ * Otherwise is returns a bitmask with supported features. Current
+ * features reported are:
+ * PCI_PASID_CAP_EXEC - Execute permission supported
+ * PCI_PASID_CAP_PRIV - Privileged mode supported
+ */
+int pci_pasid_features(struct pci_dev *pdev)
+{
+ u16 supported;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
+
+ supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
+
+ return supported;
+}
+EXPORT_SYMBOL_GPL(pci_pasid_features);
+
+#define PASID_NUMBER_SHIFT 8
+#define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT)
+/**
+ * pci_max_pasid - Get maximum number of PASIDs supported by device
+ * @pdev: PCI device structure
+ *
+ * Returns negative value when PASID capability is not present.
+ * Otherwise it returns the numer of supported PASIDs.
+ */
+int pci_max_pasids(struct pci_dev *pdev)
+{
+ u16 supported;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
+
+ supported = (supported & PASID_NUMBER_MASK) >> PASID_NUMBER_SHIFT;
+
+ return (1 << supported);
+}
+EXPORT_SYMBOL_GPL(pci_max_pasids);
+#endif /* CONFIG_PCI_PASID */
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 5624db8c9ad..73aef51a28f 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -13,11 +13,44 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/proc_fs.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include "pci.h"
+void pci_add_resource_offset(struct list_head *resources, struct resource *res,
+ resource_size_t offset)
+{
+ struct pci_host_bridge_window *window;
+
+ window = kzalloc(sizeof(struct pci_host_bridge_window), GFP_KERNEL);
+ if (!window) {
+ printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res);
+ return;
+ }
+
+ window->res = res;
+ window->offset = offset;
+ list_add_tail(&window->list, resources);
+}
+EXPORT_SYMBOL(pci_add_resource_offset);
+
+void pci_add_resource(struct list_head *resources, struct resource *res)
+{
+ pci_add_resource_offset(resources, res, 0);
+}
+EXPORT_SYMBOL(pci_add_resource);
+
+void pci_free_resource_list(struct list_head *resources)
+{
+ struct pci_host_bridge_window *window, *tmp;
+
+ list_for_each_entry_safe(window, tmp, resources, list) {
+ list_del(&window->list);
+ kfree(window);
+ }
+}
+EXPORT_SYMBOL(pci_free_resource_list);
+
void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
unsigned int flags)
{
@@ -52,8 +85,8 @@ EXPORT_SYMBOL_GPL(pci_bus_resource_n);
void pci_bus_remove_resources(struct pci_bus *bus)
{
- struct pci_bus_resource *bus_res, *tmp;
int i;
+ struct pci_bus_resource *bus_res, *tmp;
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
bus->resource[i] = NULL;
@@ -64,47 +97,89 @@ void pci_bus_remove_resources(struct pci_bus *bus)
}
}
+static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL};
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+static struct pci_bus_region pci_64_bit = {0,
+ (dma_addr_t) 0xffffffffffffffffULL};
+static struct pci_bus_region pci_high = {(dma_addr_t) 0x100000000ULL,
+ (dma_addr_t) 0xffffffffffffffffULL};
+#endif
+
/*
- * Find the highest-address bus resource below the cursor "res". If the
- * cursor is NULL, return the highest resource.
+ * @res contains CPU addresses. Clip it so the corresponding bus addresses
+ * on @bus are entirely within @region. This is used to control the bus
+ * addresses of resources we allocate, e.g., we may need a resource that
+ * can be mapped by a 32-bit BAR.
*/
-static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus,
- unsigned int type,
- struct resource *res)
+static void pci_clip_resource_to_region(struct pci_bus *bus,
+ struct resource *res,
+ struct pci_bus_region *region)
{
- struct resource *r, *prev = NULL;
- int i;
+ struct pci_bus_region r;
+
+ pcibios_resource_to_bus(bus, &r, res);
+ if (r.start < region->start)
+ r.start = region->start;
+ if (r.end > region->end)
+ r.end = region->end;
+
+ if (r.end < r.start)
+ res->end = res->start - 1;
+ else
+ pcibios_bus_to_resource(bus, res, &r);
+}
+
+static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
+ resource_size_t size, resource_size_t align,
+ resource_size_t min, unsigned long type_mask,
+ resource_size_t (*alignf)(void *,
+ const struct resource *,
+ resource_size_t,
+ resource_size_t),
+ void *alignf_data,
+ struct pci_bus_region *region)
+{
+ int i, ret;
+ struct resource *r, avail;
+ resource_size_t max;
+
+ type_mask |= IORESOURCE_TYPE_BITS;
pci_bus_for_each_resource(bus, r, i) {
if (!r)
continue;
- if ((r->flags & IORESOURCE_TYPE_BITS) != type)
+ /* type_mask must match */
+ if ((res->flags ^ r->flags) & type_mask)
continue;
- /* If this resource is at or past the cursor, skip it */
- if (res) {
- if (r == res)
- continue;
- if (r->end > res->end)
- continue;
- if (r->end == res->end && r->start > res->start)
- continue;
- }
+ /* We cannot allocate a non-prefetching resource
+ from a pre-fetching area */
+ if ((r->flags & IORESOURCE_PREFETCH) &&
+ !(res->flags & IORESOURCE_PREFETCH))
+ continue;
- if (!prev)
- prev = r;
+ avail = *r;
+ pci_clip_resource_to_region(bus, &avail, region);
/*
- * A small resource is higher than a large one that ends at
- * the same address.
+ * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
+ * protect badly documented motherboard resources, but if
+ * this is an already-configured bridge window, its start
+ * overrides "min".
*/
- if (r->end > prev->end ||
- (r->end == prev->end && r->start > prev->start))
- prev = r;
- }
+ if (avail.start)
+ min = avail.start;
- return prev;
+ max = avail.end;
+
+ /* Ok, try it out.. */
+ ret = allocate_resource(r, res, size, min, max,
+ align, alignf, alignf_data);
+ if (ret == 0)
+ return 0;
+ }
+ return -ENOMEM;
}
/**
@@ -122,175 +197,91 @@ static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus,
* alignment and type, try to find an acceptable resource allocation
* for a specific device resource.
*/
-int
-pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
+int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
resource_size_t size, resource_size_t align,
- resource_size_t min, unsigned int type_mask,
+ resource_size_t min, unsigned long type_mask,
resource_size_t (*alignf)(void *,
const struct resource *,
resource_size_t,
resource_size_t),
void *alignf_data)
{
- int ret = -ENOMEM;
- struct resource *r;
- resource_size_t max = -1;
- unsigned int type = res->flags & IORESOURCE_TYPE_BITS;
-
- type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
-
- /* don't allocate too high if the pref mem doesn't support 64bit*/
- if (!(res->flags & IORESOURCE_MEM_64))
- max = PCIBIOS_MAX_MEM_32;
-
- /* Look for space at highest addresses first */
- r = pci_bus_find_resource_prev(bus, type, NULL);
- for ( ; r; r = pci_bus_find_resource_prev(bus, type, r)) {
- /* type_mask must match */
- if ((res->flags ^ r->flags) & type_mask)
- continue;
-
- /* We cannot allocate a non-prefetching resource
- from a pre-fetching area */
- if ((r->flags & IORESOURCE_PREFETCH) &&
- !(res->flags & IORESOURCE_PREFETCH))
- continue;
-
- /* Ok, try it out.. */
- ret = allocate_resource(r, res, size,
- r->start ? : min,
- max, align,
- alignf, alignf_data);
- if (ret == 0)
- break;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ int rc;
+
+ if (res->flags & IORESOURCE_MEM_64) {
+ rc = pci_bus_alloc_from_region(bus, res, size, align, min,
+ type_mask, alignf, alignf_data,
+ &pci_high);
+ if (rc == 0)
+ return 0;
+
+ return pci_bus_alloc_from_region(bus, res, size, align, min,
+ type_mask, alignf, alignf_data,
+ &pci_64_bit);
}
- return ret;
+#endif
+
+ return pci_bus_alloc_from_region(bus, res, size, align, min,
+ type_mask, alignf, alignf_data,
+ &pci_32_bit);
}
+EXPORT_SYMBOL(pci_bus_alloc_resource);
+
+void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
/**
- * pci_bus_add_device - add a single device
+ * pci_bus_add_device - start driver for a single device
* @dev: device to add
*
- * This adds a single pci device to the global
- * device list and adds sysfs and procfs entries
+ * This adds add sysfs entries and start device drivers
*/
-int pci_bus_add_device(struct pci_dev *dev)
+void pci_bus_add_device(struct pci_dev *dev)
{
int retval;
- retval = device_add(&dev->dev);
- if (retval)
- return retval;
- dev->is_added = 1;
- pci_proc_attach_device(dev);
+ /*
+ * Can not put in pci_device_add yet because resources
+ * are not assigned yet for some devices.
+ */
+ pci_fixup_device(pci_fixup_final, dev);
pci_create_sysfs_dev_files(dev);
- return 0;
-}
-
-/**
- * pci_bus_add_child - add a child bus
- * @bus: bus to add
- *
- * This adds sysfs entries for a single bus
- */
-int pci_bus_add_child(struct pci_bus *bus)
-{
- int retval;
-
- if (bus->bridge)
- bus->dev.parent = bus->bridge;
-
- retval = device_register(&bus->dev);
- if (retval)
- return retval;
-
- bus->is_added = 1;
-
- retval = device_create_file(&bus->dev, &dev_attr_cpuaffinity);
- if (retval)
- return retval;
-
- retval = device_create_file(&bus->dev, &dev_attr_cpulistaffinity);
+ pci_proc_attach_device(dev);
- /* Create legacy_io and legacy_mem files for this bus */
- pci_create_legacy_files(bus);
+ dev->match_driver = true;
+ retval = device_attach(&dev->dev);
+ WARN_ON(retval < 0);
- return retval;
+ dev->is_added = 1;
}
+EXPORT_SYMBOL_GPL(pci_bus_add_device);
/**
- * pci_bus_add_devices - insert newly discovered PCI devices
+ * pci_bus_add_devices - start driver for PCI devices
* @bus: bus to check for new devices
*
- * Add newly discovered PCI devices (which are on the bus->devices
- * list) to the global PCI device list, add the sysfs and procfs
- * entries. Where a bridge is found, add the discovered bus to
- * the parents list of child buses, and recurse (breadth-first
- * to be compatible with 2.4)
- *
- * Call hotplug for each new devices.
+ * Start driver for PCI devices and add some sysfs entries.
*/
void pci_bus_add_devices(const struct pci_bus *bus)
{
struct pci_dev *dev;
struct pci_bus *child;
- int retval;
list_for_each_entry(dev, &bus->devices, bus_list) {
/* Skip already-added devices */
if (dev->is_added)
continue;
- retval = pci_bus_add_device(dev);
- if (retval)
- dev_err(&dev->dev, "Error adding device, continuing\n");
+ pci_bus_add_device(dev);
}
list_for_each_entry(dev, &bus->devices, bus_list) {
BUG_ON(!dev->is_added);
-
child = dev->subordinate;
- /*
- * If there is an unattached subordinate bus, attach
- * it and then scan for unattached PCI devices.
- */
- if (!child)
- continue;
- if (list_empty(&child->node)) {
- down_write(&pci_bus_sem);
- list_add_tail(&child->node, &dev->bus->children);
- up_write(&pci_bus_sem);
- }
- pci_bus_add_devices(child);
-
- /*
- * register the bus with sysfs as the parent is now
- * properly registered.
- */
- if (child->is_added)
- continue;
- retval = pci_bus_add_child(child);
- if (retval)
- dev_err(&dev->dev, "Error adding bus, continuing\n");
- }
-}
-
-void pci_enable_bridges(struct pci_bus *bus)
-{
- struct pci_dev *dev;
- int retval;
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- if (dev->subordinate) {
- if (!pci_is_enabled(dev)) {
- retval = pci_enable_device(dev);
- if (retval)
- dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", retval);
- pci_set_master(dev);
- }
- pci_enable_bridges(dev->subordinate);
- }
+ if (child)
+ pci_bus_add_devices(child);
}
}
+EXPORT_SYMBOL(pci_bus_add_devices);
/** pci_walk_bus - walk devices on/under bus, calling callback.
* @top bus whose devices should be walked
@@ -333,10 +324,7 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
} else
next = dev->bus_list.next;
- /* Run device routines with the device locked */
- device_lock(&dev->dev);
retval = cb(dev, userdata);
- device_unlock(&dev->dev);
if (retval)
break;
}
@@ -344,7 +332,18 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
}
EXPORT_SYMBOL_GPL(pci_walk_bus);
-EXPORT_SYMBOL(pci_bus_alloc_resource);
-EXPORT_SYMBOL_GPL(pci_bus_add_device);
-EXPORT_SYMBOL(pci_bus_add_devices);
-EXPORT_SYMBOL(pci_enable_bridges);
+struct pci_bus *pci_bus_get(struct pci_bus *bus)
+{
+ if (bus)
+ get_device(&bus->dev);
+ return bus;
+}
+EXPORT_SYMBOL(pci_bus_get);
+
+void pci_bus_put(struct pci_bus *bus)
+{
+ if (bus)
+ put_device(&bus->dev);
+}
+EXPORT_SYMBOL(pci_bus_put);
+
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
deleted file mode 100644
index 0157708d474..00000000000
--- a/drivers/pci/dmar.c
+++ /dev/null
@@ -1,1461 +0,0 @@
-/*
- * Copyright (c) 2006, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Copyright (C) 2006-2008 Intel Corporation
- * Author: Ashok Raj <ashok.raj@intel.com>
- * Author: Shaohua Li <shaohua.li@intel.com>
- * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
- *
- * This file implements early detection/parsing of Remapping Devices
- * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
- * tables.
- *
- * These routines are used by both DMA-remapping and Interrupt-remapping
- */
-
-#include <linux/pci.h>
-#include <linux/dmar.h>
-#include <linux/iova.h>
-#include <linux/intel-iommu.h>
-#include <linux/timer.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/tboot.h>
-#include <linux/dmi.h>
-#include <linux/slab.h>
-#include <asm/iommu_table.h>
-
-#define PREFIX "DMAR: "
-
-/* No locks are needed as DMA remapping hardware unit
- * list is constructed at boot time and hotplug of
- * these units are not supported by the architecture.
- */
-LIST_HEAD(dmar_drhd_units);
-
-static struct acpi_table_header * __initdata dmar_tbl;
-static acpi_size dmar_tbl_size;
-
-static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
-{
- /*
- * add INCLUDE_ALL at the tail, so scan the list will find it at
- * the very end.
- */
- if (drhd->include_all)
- list_add_tail(&drhd->list, &dmar_drhd_units);
- else
- list_add(&drhd->list, &dmar_drhd_units);
-}
-
-static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
- struct pci_dev **dev, u16 segment)
-{
- struct pci_bus *bus;
- struct pci_dev *pdev = NULL;
- struct acpi_dmar_pci_path *path;
- int count;
-
- bus = pci_find_bus(segment, scope->bus);
- path = (struct acpi_dmar_pci_path *)(scope + 1);
- count = (scope->length - sizeof(struct acpi_dmar_device_scope))
- / sizeof(struct acpi_dmar_pci_path);
-
- while (count) {
- if (pdev)
- pci_dev_put(pdev);
- /*
- * Some BIOSes list non-exist devices in DMAR table, just
- * ignore it
- */
- if (!bus) {
- printk(KERN_WARNING
- PREFIX "Device scope bus [%d] not found\n",
- scope->bus);
- break;
- }
- pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
- if (!pdev) {
- printk(KERN_WARNING PREFIX
- "Device scope device [%04x:%02x:%02x.%02x] not found\n",
- segment, bus->number, path->dev, path->fn);
- break;
- }
- path ++;
- count --;
- bus = pdev->subordinate;
- }
- if (!pdev) {
- printk(KERN_WARNING PREFIX
- "Device scope device [%04x:%02x:%02x.%02x] not found\n",
- segment, scope->bus, path->dev, path->fn);
- *dev = NULL;
- return 0;
- }
- if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
- pdev->subordinate) || (scope->entry_type == \
- ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
- pci_dev_put(pdev);
- printk(KERN_WARNING PREFIX
- "Device scope type does not match for %s\n",
- pci_name(pdev));
- return -EINVAL;
- }
- *dev = pdev;
- return 0;
-}
-
-static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
- struct pci_dev ***devices, u16 segment)
-{
- struct acpi_dmar_device_scope *scope;
- void * tmp = start;
- int index;
- int ret;
-
- *cnt = 0;
- while (start < end) {
- scope = start;
- if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
- scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
- (*cnt)++;
- else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
- printk(KERN_WARNING PREFIX
- "Unsupported device scope\n");
- }
- start += scope->length;
- }
- if (*cnt == 0)
- return 0;
-
- *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
- if (!*devices)
- return -ENOMEM;
-
- start = tmp;
- index = 0;
- while (start < end) {
- scope = start;
- if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
- scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
- ret = dmar_parse_one_dev_scope(scope,
- &(*devices)[index], segment);
- if (ret) {
- kfree(*devices);
- return ret;
- }
- index ++;
- }
- start += scope->length;
- }
-
- return 0;
-}
-
-/**
- * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
- * structure which uniquely represent one DMA remapping hardware unit
- * present in the platform
- */
-static int __init
-dmar_parse_one_drhd(struct acpi_dmar_header *header)
-{
- struct acpi_dmar_hardware_unit *drhd;
- struct dmar_drhd_unit *dmaru;
- int ret = 0;
-
- drhd = (struct acpi_dmar_hardware_unit *)header;
- dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
- if (!dmaru)
- return -ENOMEM;
-
- dmaru->hdr = header;
- dmaru->reg_base_addr = drhd->address;
- dmaru->segment = drhd->segment;
- dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
-
- ret = alloc_iommu(dmaru);
- if (ret) {
- kfree(dmaru);
- return ret;
- }
- dmar_register_drhd_unit(dmaru);
- return 0;
-}
-
-static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
-{
- struct acpi_dmar_hardware_unit *drhd;
- int ret = 0;
-
- drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
-
- if (dmaru->include_all)
- return 0;
-
- ret = dmar_parse_dev_scope((void *)(drhd + 1),
- ((void *)drhd) + drhd->header.length,
- &dmaru->devices_cnt, &dmaru->devices,
- drhd->segment);
- if (ret) {
- list_del(&dmaru->list);
- kfree(dmaru);
- }
- return ret;
-}
-
-#ifdef CONFIG_DMAR
-LIST_HEAD(dmar_rmrr_units);
-
-static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
-{
- list_add(&rmrr->list, &dmar_rmrr_units);
-}
-
-
-static int __init
-dmar_parse_one_rmrr(struct acpi_dmar_header *header)
-{
- struct acpi_dmar_reserved_memory *rmrr;
- struct dmar_rmrr_unit *rmrru;
-
- rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
- if (!rmrru)
- return -ENOMEM;
-
- rmrru->hdr = header;
- rmrr = (struct acpi_dmar_reserved_memory *)header;
- rmrru->base_address = rmrr->base_address;
- rmrru->end_address = rmrr->end_address;
-
- dmar_register_rmrr_unit(rmrru);
- return 0;
-}
-
-static int __init
-rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
-{
- struct acpi_dmar_reserved_memory *rmrr;
- int ret;
-
- rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
- ret = dmar_parse_dev_scope((void *)(rmrr + 1),
- ((void *)rmrr) + rmrr->header.length,
- &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
-
- if (ret || (rmrru->devices_cnt == 0)) {
- list_del(&rmrru->list);
- kfree(rmrru);
- }
- return ret;
-}
-
-static LIST_HEAD(dmar_atsr_units);
-
-static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
-{
- struct acpi_dmar_atsr *atsr;
- struct dmar_atsr_unit *atsru;
-
- atsr = container_of(hdr, struct acpi_dmar_atsr, header);
- atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
- if (!atsru)
- return -ENOMEM;
-
- atsru->hdr = hdr;
- atsru->include_all = atsr->flags & 0x1;
-
- list_add(&atsru->list, &dmar_atsr_units);
-
- return 0;
-}
-
-static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
-{
- int rc;
- struct acpi_dmar_atsr *atsr;
-
- if (atsru->include_all)
- return 0;
-
- atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
- rc = dmar_parse_dev_scope((void *)(atsr + 1),
- (void *)atsr + atsr->header.length,
- &atsru->devices_cnt, &atsru->devices,
- atsr->segment);
- if (rc || !atsru->devices_cnt) {
- list_del(&atsru->list);
- kfree(atsru);
- }
-
- return rc;
-}
-
-int dmar_find_matched_atsr_unit(struct pci_dev *dev)
-{
- int i;
- struct pci_bus *bus;
- struct acpi_dmar_atsr *atsr;
- struct dmar_atsr_unit *atsru;
-
- dev = pci_physfn(dev);
-
- list_for_each_entry(atsru, &dmar_atsr_units, list) {
- atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
- if (atsr->segment == pci_domain_nr(dev->bus))
- goto found;
- }
-
- return 0;
-
-found:
- for (bus = dev->bus; bus; bus = bus->parent) {
- struct pci_dev *bridge = bus->self;
-
- if (!bridge || !pci_is_pcie(bridge) ||
- bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
- return 0;
-
- if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
- for (i = 0; i < atsru->devices_cnt; i++)
- if (atsru->devices[i] == bridge)
- return 1;
- break;
- }
- }
-
- if (atsru->include_all)
- return 1;
-
- return 0;
-}
-#endif
-
-#ifdef CONFIG_ACPI_NUMA
-static int __init
-dmar_parse_one_rhsa(struct acpi_dmar_header *header)
-{
- struct acpi_dmar_rhsa *rhsa;
- struct dmar_drhd_unit *drhd;
-
- rhsa = (struct acpi_dmar_rhsa *)header;
- for_each_drhd_unit(drhd) {
- if (drhd->reg_base_addr == rhsa->base_address) {
- int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
-
- if (!node_online(node))
- node = -1;
- drhd->iommu->node = node;
- return 0;
- }
- }
- WARN_TAINT(
- 1, TAINT_FIRMWARE_WORKAROUND,
- "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- drhd->reg_base_addr,
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
-
- return 0;
-}
-#endif
-
-static void __init
-dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
-{
- struct acpi_dmar_hardware_unit *drhd;
- struct acpi_dmar_reserved_memory *rmrr;
- struct acpi_dmar_atsr *atsr;
- struct acpi_dmar_rhsa *rhsa;
-
- switch (header->type) {
- case ACPI_DMAR_TYPE_HARDWARE_UNIT:
- drhd = container_of(header, struct acpi_dmar_hardware_unit,
- header);
- printk (KERN_INFO PREFIX
- "DRHD base: %#016Lx flags: %#x\n",
- (unsigned long long)drhd->address, drhd->flags);
- break;
- case ACPI_DMAR_TYPE_RESERVED_MEMORY:
- rmrr = container_of(header, struct acpi_dmar_reserved_memory,
- header);
- printk (KERN_INFO PREFIX
- "RMRR base: %#016Lx end: %#016Lx\n",
- (unsigned long long)rmrr->base_address,
- (unsigned long long)rmrr->end_address);
- break;
- case ACPI_DMAR_TYPE_ATSR:
- atsr = container_of(header, struct acpi_dmar_atsr, header);
- printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
- break;
- case ACPI_DMAR_HARDWARE_AFFINITY:
- rhsa = container_of(header, struct acpi_dmar_rhsa, header);
- printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
- (unsigned long long)rhsa->base_address,
- rhsa->proximity_domain);
- break;
- }
-}
-
-/**
- * dmar_table_detect - checks to see if the platform supports DMAR devices
- */
-static int __init dmar_table_detect(void)
-{
- acpi_status status = AE_OK;
-
- /* if we could find DMAR table, then there are DMAR devices */
- status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
- (struct acpi_table_header **)&dmar_tbl,
- &dmar_tbl_size);
-
- if (ACPI_SUCCESS(status) && !dmar_tbl) {
- printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
- status = AE_NOT_FOUND;
- }
-
- return (ACPI_SUCCESS(status) ? 1 : 0);
-}
-
-/**
- * parse_dmar_table - parses the DMA reporting table
- */
-static int __init
-parse_dmar_table(void)
-{
- struct acpi_table_dmar *dmar;
- struct acpi_dmar_header *entry_header;
- int ret = 0;
-
- /*
- * Do it again, earlier dmar_tbl mapping could be mapped with
- * fixed map.
- */
- dmar_table_detect();
-
- /*
- * ACPI tables may not be DMA protected by tboot, so use DMAR copy
- * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
- */
- dmar_tbl = tboot_get_dmar_table(dmar_tbl);
-
- dmar = (struct acpi_table_dmar *)dmar_tbl;
- if (!dmar)
- return -ENODEV;
-
- if (dmar->width < PAGE_SHIFT - 1) {
- printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
- return -EINVAL;
- }
-
- printk (KERN_INFO PREFIX "Host address width %d\n",
- dmar->width + 1);
-
- entry_header = (struct acpi_dmar_header *)(dmar + 1);
- while (((unsigned long)entry_header) <
- (((unsigned long)dmar) + dmar_tbl->length)) {
- /* Avoid looping forever on bad ACPI tables */
- if (entry_header->length == 0) {
- printk(KERN_WARNING PREFIX
- "Invalid 0-length structure\n");
- ret = -EINVAL;
- break;
- }
-
- dmar_table_print_dmar_entry(entry_header);
-
- switch (entry_header->type) {
- case ACPI_DMAR_TYPE_HARDWARE_UNIT:
- ret = dmar_parse_one_drhd(entry_header);
- break;
- case ACPI_DMAR_TYPE_RESERVED_MEMORY:
-#ifdef CONFIG_DMAR
- ret = dmar_parse_one_rmrr(entry_header);
-#endif
- break;
- case ACPI_DMAR_TYPE_ATSR:
-#ifdef CONFIG_DMAR
- ret = dmar_parse_one_atsr(entry_header);
-#endif
- break;
- case ACPI_DMAR_HARDWARE_AFFINITY:
-#ifdef CONFIG_ACPI_NUMA
- ret = dmar_parse_one_rhsa(entry_header);
-#endif
- break;
- default:
- printk(KERN_WARNING PREFIX
- "Unknown DMAR structure type %d\n",
- entry_header->type);
- ret = 0; /* for forward compatibility */
- break;
- }
- if (ret)
- break;
-
- entry_header = ((void *)entry_header + entry_header->length);
- }
- return ret;
-}
-
-static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
- struct pci_dev *dev)
-{
- int index;
-
- while (dev) {
- for (index = 0; index < cnt; index++)
- if (dev == devices[index])
- return 1;
-
- /* Check our parent */
- dev = dev->bus->self;
- }
-
- return 0;
-}
-
-struct dmar_drhd_unit *
-dmar_find_matched_drhd_unit(struct pci_dev *dev)
-{
- struct dmar_drhd_unit *dmaru = NULL;
- struct acpi_dmar_hardware_unit *drhd;
-
- dev = pci_physfn(dev);
-
- list_for_each_entry(dmaru, &dmar_drhd_units, list) {
- drhd = container_of(dmaru->hdr,
- struct acpi_dmar_hardware_unit,
- header);
-
- if (dmaru->include_all &&
- drhd->segment == pci_domain_nr(dev->bus))
- return dmaru;
-
- if (dmar_pci_device_match(dmaru->devices,
- dmaru->devices_cnt, dev))
- return dmaru;
- }
-
- return NULL;
-}
-
-int __init dmar_dev_scope_init(void)
-{
- struct dmar_drhd_unit *drhd, *drhd_n;
- int ret = -ENODEV;
-
- list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
- ret = dmar_parse_dev(drhd);
- if (ret)
- return ret;
- }
-
-#ifdef CONFIG_DMAR
- {
- struct dmar_rmrr_unit *rmrr, *rmrr_n;
- struct dmar_atsr_unit *atsr, *atsr_n;
-
- list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
- ret = rmrr_parse_dev(rmrr);
- if (ret)
- return ret;
- }
-
- list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
- ret = atsr_parse_dev(atsr);
- if (ret)
- return ret;
- }
- }
-#endif
-
- return ret;
-}
-
-
-int __init dmar_table_init(void)
-{
- static int dmar_table_initialized;
- int ret;
-
- if (dmar_table_initialized)
- return 0;
-
- dmar_table_initialized = 1;
-
- ret = parse_dmar_table();
- if (ret) {
- if (ret != -ENODEV)
- printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
- return ret;
- }
-
- if (list_empty(&dmar_drhd_units)) {
- printk(KERN_INFO PREFIX "No DMAR devices found\n");
- return -ENODEV;
- }
-
-#ifdef CONFIG_DMAR
- if (list_empty(&dmar_rmrr_units))
- printk(KERN_INFO PREFIX "No RMRR found\n");
-
- if (list_empty(&dmar_atsr_units))
- printk(KERN_INFO PREFIX "No ATSR found\n");
-#endif
-
- return 0;
-}
-
-static void warn_invalid_dmar(u64 addr, const char *message)
-{
- WARN_TAINT_ONCE(
- 1, TAINT_FIRMWARE_WORKAROUND,
- "Your BIOS is broken; DMAR reported at address %llx%s!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- addr, message,
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
-}
-
-int __init check_zero_address(void)
-{
- struct acpi_table_dmar *dmar;
- struct acpi_dmar_header *entry_header;
- struct acpi_dmar_hardware_unit *drhd;
-
- dmar = (struct acpi_table_dmar *)dmar_tbl;
- entry_header = (struct acpi_dmar_header *)(dmar + 1);
-
- while (((unsigned long)entry_header) <
- (((unsigned long)dmar) + dmar_tbl->length)) {
- /* Avoid looping forever on bad ACPI tables */
- if (entry_header->length == 0) {
- printk(KERN_WARNING PREFIX
- "Invalid 0-length structure\n");
- return 0;
- }
-
- if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
- void __iomem *addr;
- u64 cap, ecap;
-
- drhd = (void *)entry_header;
- if (!drhd->address) {
- warn_invalid_dmar(0, "");
- goto failed;
- }
-
- addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
- if (!addr ) {
- printk("IOMMU: can't validate: %llx\n", drhd->address);
- goto failed;
- }
- cap = dmar_readq(addr + DMAR_CAP_REG);
- ecap = dmar_readq(addr + DMAR_ECAP_REG);
- early_iounmap(addr, VTD_PAGE_SIZE);
- if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
- warn_invalid_dmar(drhd->address,
- " returns all ones");
- goto failed;
- }
- }
-
- entry_header = ((void *)entry_header + entry_header->length);
- }
- return 1;
-
-failed:
-#ifdef CONFIG_DMAR
- dmar_disabled = 1;
-#endif
- return 0;
-}
-
-int __init detect_intel_iommu(void)
-{
- int ret;
-
- ret = dmar_table_detect();
- if (ret)
- ret = check_zero_address();
- {
-#ifdef CONFIG_INTR_REMAP
- struct acpi_table_dmar *dmar;
- /*
- * for now we will disable dma-remapping when interrupt
- * remapping is enabled.
- * When support for queued invalidation for IOTLB invalidation
- * is added, we will not need this any more.
- */
- dmar = (struct acpi_table_dmar *) dmar_tbl;
- if (ret && cpu_has_x2apic && dmar->flags & 0x1)
- printk(KERN_INFO
- "Queued invalidation will be enabled to support "
- "x2apic and Intr-remapping.\n");
-#endif
-#ifdef CONFIG_DMAR
- if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
- iommu_detected = 1;
- /* Make sure ACS will be enabled */
- pci_request_acs();
- }
-#endif
-#ifdef CONFIG_X86
- if (ret)
- x86_init.iommu.iommu_init = intel_iommu_init;
-#endif
- }
- early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
- dmar_tbl = NULL;
-
- return ret ? 1 : -ENODEV;
-}
-
-
-int alloc_iommu(struct dmar_drhd_unit *drhd)
-{
- struct intel_iommu *iommu;
- int map_size;
- u32 ver;
- static int iommu_allocated = 0;
- int agaw = 0;
- int msagaw = 0;
-
- if (!drhd->reg_base_addr) {
- warn_invalid_dmar(0, "");
- return -EINVAL;
- }
-
- iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
- if (!iommu)
- return -ENOMEM;
-
- iommu->seq_id = iommu_allocated++;
- sprintf (iommu->name, "dmar%d", iommu->seq_id);
-
- iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
- if (!iommu->reg) {
- printk(KERN_ERR "IOMMU: can't map the region\n");
- goto error;
- }
- iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
- iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
-
- if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
- warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
- goto err_unmap;
- }
-
-#ifdef CONFIG_DMAR
- agaw = iommu_calculate_agaw(iommu);
- if (agaw < 0) {
- printk(KERN_ERR
- "Cannot get a valid agaw for iommu (seq_id = %d)\n",
- iommu->seq_id);
- goto err_unmap;
- }
- msagaw = iommu_calculate_max_sagaw(iommu);
- if (msagaw < 0) {
- printk(KERN_ERR
- "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
- iommu->seq_id);
- goto err_unmap;
- }
-#endif
- iommu->agaw = agaw;
- iommu->msagaw = msagaw;
-
- iommu->node = -1;
-
- /* the registers might be more than one page */
- map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
- cap_max_fault_reg_offset(iommu->cap));
- map_size = VTD_PAGE_ALIGN(map_size);
- if (map_size > VTD_PAGE_SIZE) {
- iounmap(iommu->reg);
- iommu->reg = ioremap(drhd->reg_base_addr, map_size);
- if (!iommu->reg) {
- printk(KERN_ERR "IOMMU: can't map the region\n");
- goto error;
- }
- }
-
- ver = readl(iommu->reg + DMAR_VER_REG);
- pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
- iommu->seq_id,
- (unsigned long long)drhd->reg_base_addr,
- DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
- (unsigned long long)iommu->cap,
- (unsigned long long)iommu->ecap);
-
- spin_lock_init(&iommu->register_lock);
-
- drhd->iommu = iommu;
- return 0;
-
- err_unmap:
- iounmap(iommu->reg);
- error:
- kfree(iommu);
- return -1;
-}
-
-void free_iommu(struct intel_iommu *iommu)
-{
- if (!iommu)
- return;
-
-#ifdef CONFIG_DMAR
- free_dmar_iommu(iommu);
-#endif
-
- if (iommu->reg)
- iounmap(iommu->reg);
- kfree(iommu);
-}
-
-/*
- * Reclaim all the submitted descriptors which have completed its work.
- */
-static inline void reclaim_free_desc(struct q_inval *qi)
-{
- while (qi->desc_status[qi->free_tail] == QI_DONE ||
- qi->desc_status[qi->free_tail] == QI_ABORT) {
- qi->desc_status[qi->free_tail] = QI_FREE;
- qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
- qi->free_cnt++;
- }
-}
-
-static int qi_check_fault(struct intel_iommu *iommu, int index)
-{
- u32 fault;
- int head, tail;
- struct q_inval *qi = iommu->qi;
- int wait_index = (index + 1) % QI_LENGTH;
-
- if (qi->desc_status[wait_index] == QI_ABORT)
- return -EAGAIN;
-
- fault = readl(iommu->reg + DMAR_FSTS_REG);
-
- /*
- * If IQE happens, the head points to the descriptor associated
- * with the error. No new descriptors are fetched until the IQE
- * is cleared.
- */
- if (fault & DMA_FSTS_IQE) {
- head = readl(iommu->reg + DMAR_IQH_REG);
- if ((head >> DMAR_IQ_SHIFT) == index) {
- printk(KERN_ERR "VT-d detected invalid descriptor: "
- "low=%llx, high=%llx\n",
- (unsigned long long)qi->desc[index].low,
- (unsigned long long)qi->desc[index].high);
- memcpy(&qi->desc[index], &qi->desc[wait_index],
- sizeof(struct qi_desc));
- __iommu_flush_cache(iommu, &qi->desc[index],
- sizeof(struct qi_desc));
- writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
- return -EINVAL;
- }
- }
-
- /*
- * If ITE happens, all pending wait_desc commands are aborted.
- * No new descriptors are fetched until the ITE is cleared.
- */
- if (fault & DMA_FSTS_ITE) {
- head = readl(iommu->reg + DMAR_IQH_REG);
- head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
- head |= 1;
- tail = readl(iommu->reg + DMAR_IQT_REG);
- tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
-
- writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
-
- do {
- if (qi->desc_status[head] == QI_IN_USE)
- qi->desc_status[head] = QI_ABORT;
- head = (head - 2 + QI_LENGTH) % QI_LENGTH;
- } while (head != tail);
-
- if (qi->desc_status[wait_index] == QI_ABORT)
- return -EAGAIN;
- }
-
- if (fault & DMA_FSTS_ICE)
- writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
-
- return 0;
-}
-
-/*
- * Submit the queued invalidation descriptor to the remapping
- * hardware unit and wait for its completion.
- */
-int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
-{
- int rc;
- struct q_inval *qi = iommu->qi;
- struct qi_desc *hw, wait_desc;
- int wait_index, index;
- unsigned long flags;
-
- if (!qi)
- return 0;
-
- hw = qi->desc;
-
-restart:
- rc = 0;
-
- spin_lock_irqsave(&qi->q_lock, flags);
- while (qi->free_cnt < 3) {
- spin_unlock_irqrestore(&qi->q_lock, flags);
- cpu_relax();
- spin_lock_irqsave(&qi->q_lock, flags);
- }
-
- index = qi->free_head;
- wait_index = (index + 1) % QI_LENGTH;
-
- qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
-
- hw[index] = *desc;
-
- wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
- QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
- wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
-
- hw[wait_index] = wait_desc;
-
- __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
- __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
-
- qi->free_head = (qi->free_head + 2) % QI_LENGTH;
- qi->free_cnt -= 2;
-
- /*
- * update the HW tail register indicating the presence of
- * new descriptors.
- */
- writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
-
- while (qi->desc_status[wait_index] != QI_DONE) {
- /*
- * We will leave the interrupts disabled, to prevent interrupt
- * context to queue another cmd while a cmd is already submitted
- * and waiting for completion on this cpu. This is to avoid
- * a deadlock where the interrupt context can wait indefinitely
- * for free slots in the queue.
- */
- rc = qi_check_fault(iommu, index);
- if (rc)
- break;
-
- spin_unlock(&qi->q_lock);
- cpu_relax();
- spin_lock(&qi->q_lock);
- }
-
- qi->desc_status[index] = QI_DONE;
-
- reclaim_free_desc(qi);
- spin_unlock_irqrestore(&qi->q_lock, flags);
-
- if (rc == -EAGAIN)
- goto restart;
-
- return rc;
-}
-
-/*
- * Flush the global interrupt entry cache.
- */
-void qi_global_iec(struct intel_iommu *iommu)
-{
- struct qi_desc desc;
-
- desc.low = QI_IEC_TYPE;
- desc.high = 0;
-
- /* should never fail */
- qi_submit_sync(&desc, iommu);
-}
-
-void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
- u64 type)
-{
- struct qi_desc desc;
-
- desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
- | QI_CC_GRAN(type) | QI_CC_TYPE;
- desc.high = 0;
-
- qi_submit_sync(&desc, iommu);
-}
-
-void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type)
-{
- u8 dw = 0, dr = 0;
-
- struct qi_desc desc;
- int ih = 0;
-
- if (cap_write_drain(iommu->cap))
- dw = 1;
-
- if (cap_read_drain(iommu->cap))
- dr = 1;
-
- desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
- | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
- desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
- | QI_IOTLB_AM(size_order);
-
- qi_submit_sync(&desc, iommu);
-}
-
-void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
- u64 addr, unsigned mask)
-{
- struct qi_desc desc;
-
- if (mask) {
- BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
- addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
- desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
- } else
- desc.high = QI_DEV_IOTLB_ADDR(addr);
-
- if (qdep >= QI_DEV_IOTLB_MAX_INVS)
- qdep = 0;
-
- desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
- QI_DIOTLB_TYPE;
-
- qi_submit_sync(&desc, iommu);
-}
-
-/*
- * Disable Queued Invalidation interface.
- */
-void dmar_disable_qi(struct intel_iommu *iommu)
-{
- unsigned long flags;
- u32 sts;
- cycles_t start_time = get_cycles();
-
- if (!ecap_qis(iommu->ecap))
- return;
-
- spin_lock_irqsave(&iommu->register_lock, flags);
-
- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
- if (!(sts & DMA_GSTS_QIES))
- goto end;
-
- /*
- * Give a chance to HW to complete the pending invalidation requests.
- */
- while ((readl(iommu->reg + DMAR_IQT_REG) !=
- readl(iommu->reg + DMAR_IQH_REG)) &&
- (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
- cpu_relax();
-
- iommu->gcmd &= ~DMA_GCMD_QIE;
- writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
- !(sts & DMA_GSTS_QIES), sts);
-end:
- spin_unlock_irqrestore(&iommu->register_lock, flags);
-}
-
-/*
- * Enable queued invalidation.
- */
-static void __dmar_enable_qi(struct intel_iommu *iommu)
-{
- u32 sts;
- unsigned long flags;
- struct q_inval *qi = iommu->qi;
-
- qi->free_head = qi->free_tail = 0;
- qi->free_cnt = QI_LENGTH;
-
- spin_lock_irqsave(&iommu->register_lock, flags);
-
- /* write zero to the tail reg */
- writel(0, iommu->reg + DMAR_IQT_REG);
-
- dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
-
- iommu->gcmd |= DMA_GCMD_QIE;
- writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
-
- spin_unlock_irqrestore(&iommu->register_lock, flags);
-}
-
-/*
- * Enable Queued Invalidation interface. This is a must to support
- * interrupt-remapping. Also used by DMA-remapping, which replaces
- * register based IOTLB invalidation.
- */
-int dmar_enable_qi(struct intel_iommu *iommu)
-{
- struct q_inval *qi;
- struct page *desc_page;
-
- if (!ecap_qis(iommu->ecap))
- return -ENOENT;
-
- /*
- * queued invalidation is already setup and enabled.
- */
- if (iommu->qi)
- return 0;
-
- iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
- if (!iommu->qi)
- return -ENOMEM;
-
- qi = iommu->qi;
-
-
- desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
- if (!desc_page) {
- kfree(qi);
- iommu->qi = 0;
- return -ENOMEM;
- }
-
- qi->desc = page_address(desc_page);
-
- qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
- if (!qi->desc_status) {
- free_page((unsigned long) qi->desc);
- kfree(qi);
- iommu->qi = 0;
- return -ENOMEM;
- }
-
- qi->free_head = qi->free_tail = 0;
- qi->free_cnt = QI_LENGTH;
-
- spin_lock_init(&qi->q_lock);
-
- __dmar_enable_qi(iommu);
-
- return 0;
-}
-
-/* iommu interrupt handling. Most stuff are MSI-like. */
-
-enum faulttype {
- DMA_REMAP,
- INTR_REMAP,
- UNKNOWN,
-};
-
-static const char *dma_remap_fault_reasons[] =
-{
- "Software",
- "Present bit in root entry is clear",
- "Present bit in context entry is clear",
- "Invalid context entry",
- "Access beyond MGAW",
- "PTE Write access is not set",
- "PTE Read access is not set",
- "Next page table ptr is invalid",
- "Root table address invalid",
- "Context table ptr is invalid",
- "non-zero reserved fields in RTP",
- "non-zero reserved fields in CTP",
- "non-zero reserved fields in PTE",
-};
-
-static const char *intr_remap_fault_reasons[] =
-{
- "Detected reserved fields in the decoded interrupt-remapped request",
- "Interrupt index exceeded the interrupt-remapping table size",
- "Present field in the IRTE entry is clear",
- "Error accessing interrupt-remapping table pointed by IRTA_REG",
- "Detected reserved fields in the IRTE entry",
- "Blocked a compatibility format interrupt request",
- "Blocked an interrupt request due to source-id verification failure",
-};
-
-#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
-
-const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
-{
- if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
- ARRAY_SIZE(intr_remap_fault_reasons))) {
- *fault_type = INTR_REMAP;
- return intr_remap_fault_reasons[fault_reason - 0x20];
- } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
- *fault_type = DMA_REMAP;
- return dma_remap_fault_reasons[fault_reason];
- } else {
- *fault_type = UNKNOWN;
- return "Unknown";
- }
-}
-
-void dmar_msi_unmask(struct irq_data *data)
-{
- struct intel_iommu *iommu = irq_data_get_irq_data(data);
- unsigned long flag;
-
- /* unmask it */
- spin_lock_irqsave(&iommu->register_lock, flag);
- writel(0, iommu->reg + DMAR_FECTL_REG);
- /* Read a reg to force flush the post write */
- readl(iommu->reg + DMAR_FECTL_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
-}
-
-void dmar_msi_mask(struct irq_data *data)
-{
- unsigned long flag;
- struct intel_iommu *iommu = irq_data_get_irq_data(data);
-
- /* mask it */
- spin_lock_irqsave(&iommu->register_lock, flag);
- writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
- /* Read a reg to force flush the post write */
- readl(iommu->reg + DMAR_FECTL_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
-}
-
-void dmar_msi_write(int irq, struct msi_msg *msg)
-{
- struct intel_iommu *iommu = get_irq_data(irq);
- unsigned long flag;
-
- spin_lock_irqsave(&iommu->register_lock, flag);
- writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
- writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
- writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
-}
-
-void dmar_msi_read(int irq, struct msi_msg *msg)
-{
- struct intel_iommu *iommu = get_irq_data(irq);
- unsigned long flag;
-
- spin_lock_irqsave(&iommu->register_lock, flag);
- msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
- msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
- msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
-}
-
-static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
- u8 fault_reason, u16 source_id, unsigned long long addr)
-{
- const char *reason;
- int fault_type;
-
- reason = dmar_get_fault_reason(fault_reason, &fault_type);
-
- if (fault_type == INTR_REMAP)
- printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
- "fault index %llx\n"
- "INTR-REMAP:[fault reason %02d] %s\n",
- (source_id >> 8), PCI_SLOT(source_id & 0xFF),
- PCI_FUNC(source_id & 0xFF), addr >> 48,
- fault_reason, reason);
- else
- printk(KERN_ERR
- "DMAR:[%s] Request device [%02x:%02x.%d] "
- "fault addr %llx \n"
- "DMAR:[fault reason %02d] %s\n",
- (type ? "DMA Read" : "DMA Write"),
- (source_id >> 8), PCI_SLOT(source_id & 0xFF),
- PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
- return 0;
-}
-
-#define PRIMARY_FAULT_REG_LEN (16)
-irqreturn_t dmar_fault(int irq, void *dev_id)
-{
- struct intel_iommu *iommu = dev_id;
- int reg, fault_index;
- u32 fault_status;
- unsigned long flag;
-
- spin_lock_irqsave(&iommu->register_lock, flag);
- fault_status = readl(iommu->reg + DMAR_FSTS_REG);
- if (fault_status)
- printk(KERN_ERR "DRHD: handling fault status reg %x\n",
- fault_status);
-
- /* TBD: ignore advanced fault log currently */
- if (!(fault_status & DMA_FSTS_PPF))
- goto clear_rest;
-
- fault_index = dma_fsts_fault_record_index(fault_status);
- reg = cap_fault_reg_offset(iommu->cap);
- while (1) {
- u8 fault_reason;
- u16 source_id;
- u64 guest_addr;
- int type;
- u32 data;
-
- /* highest 32 bits */
- data = readl(iommu->reg + reg +
- fault_index * PRIMARY_FAULT_REG_LEN + 12);
- if (!(data & DMA_FRCD_F))
- break;
-
- fault_reason = dma_frcd_fault_reason(data);
- type = dma_frcd_type(data);
-
- data = readl(iommu->reg + reg +
- fault_index * PRIMARY_FAULT_REG_LEN + 8);
- source_id = dma_frcd_source_id(data);
-
- guest_addr = dmar_readq(iommu->reg + reg +
- fault_index * PRIMARY_FAULT_REG_LEN);
- guest_addr = dma_frcd_page_addr(guest_addr);
- /* clear the fault */
- writel(DMA_FRCD_F, iommu->reg + reg +
- fault_index * PRIMARY_FAULT_REG_LEN + 12);
-
- spin_unlock_irqrestore(&iommu->register_lock, flag);
-
- dmar_fault_do_one(iommu, type, fault_reason,
- source_id, guest_addr);
-
- fault_index++;
- if (fault_index >= cap_num_fault_regs(iommu->cap))
- fault_index = 0;
- spin_lock_irqsave(&iommu->register_lock, flag);
- }
-clear_rest:
- /* clear all the other faults */
- fault_status = readl(iommu->reg + DMAR_FSTS_REG);
- writel(fault_status, iommu->reg + DMAR_FSTS_REG);
-
- spin_unlock_irqrestore(&iommu->register_lock, flag);
- return IRQ_HANDLED;
-}
-
-int dmar_set_interrupt(struct intel_iommu *iommu)
-{
- int irq, ret;
-
- /*
- * Check if the fault interrupt is already initialized.
- */
- if (iommu->irq)
- return 0;
-
- irq = create_irq();
- if (!irq) {
- printk(KERN_ERR "IOMMU: no free vectors\n");
- return -EINVAL;
- }
-
- set_irq_data(irq, iommu);
- iommu->irq = irq;
-
- ret = arch_setup_dmar_msi(irq);
- if (ret) {
- set_irq_data(irq, NULL);
- iommu->irq = 0;
- destroy_irq(irq);
- return ret;
- }
-
- ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
- if (ret)
- printk(KERN_ERR "IOMMU: can't request irq\n");
- return ret;
-}
-
-int __init enable_drhd_fault_handling(void)
-{
- struct dmar_drhd_unit *drhd;
-
- /*
- * Enable fault control interrupt.
- */
- for_each_drhd_unit(drhd) {
- int ret;
- struct intel_iommu *iommu = drhd->iommu;
- ret = dmar_set_interrupt(iommu);
-
- if (ret) {
- printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
- " interrupt, ret %d\n",
- (unsigned long long)drhd->reg_base_addr, ret);
- return -1;
- }
- }
-
- return 0;
-}
-
-/*
- * Re-enable Queued Invalidation interface.
- */
-int dmar_reenable_qi(struct intel_iommu *iommu)
-{
- if (!ecap_qis(iommu->ecap))
- return -ENOENT;
-
- if (!iommu->qi)
- return -ENOENT;
-
- /*
- * First disable queued invalidation.
- */
- dmar_disable_qi(iommu);
- /*
- * Then enable queued invalidation again. Since there is no pending
- * invalidation requests now, it's safe to re-enable queued
- * invalidation.
- */
- __dmar_enable_qi(iommu);
-
- return 0;
-}
-
-/*
- * Check interrupt remapping support in DMAR table description.
- */
-int __init dmar_ir_support(void)
-{
- struct acpi_table_dmar *dmar;
- dmar = (struct acpi_table_dmar *)dmar_tbl;
- if (!dmar)
- return 0;
- return dmar->flags & 0x1;
-}
-IOMMU_INIT_POST(detect_intel_iommu);
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
new file mode 100644
index 00000000000..0e5f3c95af5
--- /dev/null
+++ b/drivers/pci/host-bridge.c
@@ -0,0 +1,84 @@
+/*
+ * host bridge related code
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+
+#include "pci.h"
+
+static struct pci_bus *find_pci_root_bus(struct pci_bus *bus)
+{
+ while (bus->parent)
+ bus = bus->parent;
+
+ return bus;
+}
+
+static struct pci_host_bridge *find_pci_host_bridge(struct pci_bus *bus)
+{
+ struct pci_bus *root_bus = find_pci_root_bus(bus);
+
+ return to_pci_host_bridge(root_bus->bridge);
+}
+
+void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
+ void (*release_fn)(struct pci_host_bridge *),
+ void *release_data)
+{
+ bridge->release_fn = release_fn;
+ bridge->release_data = release_data;
+}
+
+void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
+ struct resource *res)
+{
+ struct pci_host_bridge *bridge = find_pci_host_bridge(bus);
+ struct pci_host_bridge_window *window;
+ resource_size_t offset = 0;
+
+ list_for_each_entry(window, &bridge->windows, list) {
+ if (resource_contains(window->res, res)) {
+ offset = window->offset;
+ break;
+ }
+ }
+
+ region->start = res->start - offset;
+ region->end = res->end - offset;
+}
+EXPORT_SYMBOL(pcibios_resource_to_bus);
+
+static bool region_contains(struct pci_bus_region *region1,
+ struct pci_bus_region *region2)
+{
+ return region1->start <= region2->start && region1->end >= region2->end;
+}
+
+void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
+ struct pci_bus_region *region)
+{
+ struct pci_host_bridge *bridge = find_pci_host_bridge(bus);
+ struct pci_host_bridge_window *window;
+ resource_size_t offset = 0;
+
+ list_for_each_entry(window, &bridge->windows, list) {
+ struct pci_bus_region bus_region;
+
+ if (resource_type(res) != resource_type(window->res))
+ continue;
+
+ bus_region.start = window->res->start - window->offset;
+ bus_region.end = window->res->end - window->offset;
+
+ if (region_contains(&bus_region, region)) {
+ offset = window->offset;
+ break;
+ }
+ }
+
+ res->start = region->start + offset;
+ res->end = region->end + offset;
+}
+EXPORT_SYMBOL(pcibios_bus_to_resource);
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
new file mode 100644
index 00000000000..21df477be0c
--- /dev/null
+++ b/drivers/pci/host/Kconfig
@@ -0,0 +1,49 @@
+menu "PCI host controller drivers"
+ depends on PCI
+
+config PCI_MVEBU
+ bool "Marvell EBU PCIe controller"
+ depends on ARCH_MVEBU || ARCH_DOVE || ARCH_KIRKWOOD
+ depends on OF
+
+config PCIE_DW
+ bool
+
+config PCI_EXYNOS
+ bool "Samsung Exynos PCIe controller"
+ depends on SOC_EXYNOS5440
+ select PCIEPORTBUS
+ select PCIE_DW
+
+config PCI_IMX6
+ bool "Freescale i.MX6 PCIe controller"
+ depends on SOC_IMX6Q
+ select PCIEPORTBUS
+ select PCIE_DW
+
+config PCI_TEGRA
+ bool "NVIDIA Tegra PCIe controller"
+ depends on ARCH_TEGRA
+
+config PCI_RCAR_GEN2
+ bool "Renesas R-Car Gen2 Internal PCI controller"
+ depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST)
+ help
+ Say Y here if you want internal PCI support on R-Car Gen2 SoC.
+ There are 3 internal PCI controllers available with a single
+ built-in EHCI/OHCI host controller present on each one.
+
+config PCI_RCAR_GEN2_PCIE
+ bool "Renesas R-Car PCIe controller"
+ depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST)
+ help
+ Say Y here if you want PCIe controller support on R-Car Gen2 SoCs.
+
+config PCI_HOST_GENERIC
+ bool "Generic PCI host controller"
+ depends on ARM && OF
+ help
+ Say Y here if you want to support a simple generic PCI host
+ controller, such as the one emulated by kvmtool.
+
+endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
new file mode 100644
index 00000000000..611ba4b48c9
--- /dev/null
+++ b/drivers/pci/host/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
+obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
+obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
+obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
+obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o
+obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
new file mode 100644
index 00000000000..c5d0ca38450
--- /dev/null
+++ b/drivers/pci/host/pci-exynos.c
@@ -0,0 +1,667 @@
+/*
+ * PCIe host controller driver for Samsung EXYNOS SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define to_exynos_pcie(x) container_of(x, struct exynos_pcie, pp)
+
+struct exynos_pcie {
+ void __iomem *elbi_base;
+ void __iomem *phy_base;
+ void __iomem *block_base;
+ int reset_gpio;
+ struct clk *clk;
+ struct clk *bus_clk;
+ struct pcie_port pp;
+};
+
+/* PCIe ELBI registers */
+#define PCIE_IRQ_PULSE 0x000
+#define IRQ_INTA_ASSERT (0x1 << 0)
+#define IRQ_INTB_ASSERT (0x1 << 2)
+#define IRQ_INTC_ASSERT (0x1 << 4)
+#define IRQ_INTD_ASSERT (0x1 << 6)
+#define PCIE_IRQ_LEVEL 0x004
+#define PCIE_IRQ_SPECIAL 0x008
+#define PCIE_IRQ_EN_PULSE 0x00c
+#define PCIE_IRQ_EN_LEVEL 0x010
+#define IRQ_MSI_ENABLE (0x1 << 2)
+#define PCIE_IRQ_EN_SPECIAL 0x014
+#define PCIE_PWR_RESET 0x018
+#define PCIE_CORE_RESET 0x01c
+#define PCIE_CORE_RESET_ENABLE (0x1 << 0)
+#define PCIE_STICKY_RESET 0x020
+#define PCIE_NONSTICKY_RESET 0x024
+#define PCIE_APP_INIT_RESET 0x028
+#define PCIE_APP_LTSSM_ENABLE 0x02c
+#define PCIE_ELBI_RDLH_LINKUP 0x064
+#define PCIE_ELBI_LTSSM_ENABLE 0x1
+#define PCIE_ELBI_SLV_AWMISC 0x11c
+#define PCIE_ELBI_SLV_ARMISC 0x120
+#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
+
+/* PCIe Purple registers */
+#define PCIE_PHY_GLOBAL_RESET 0x000
+#define PCIE_PHY_COMMON_RESET 0x004
+#define PCIE_PHY_CMN_REG 0x008
+#define PCIE_PHY_MAC_RESET 0x00c
+#define PCIE_PHY_PLL_LOCKED 0x010
+#define PCIE_PHY_TRSVREG_RESET 0x020
+#define PCIE_PHY_TRSV_RESET 0x024
+
+/* PCIe PHY registers */
+#define PCIE_PHY_IMPEDANCE 0x004
+#define PCIE_PHY_PLL_DIV_0 0x008
+#define PCIE_PHY_PLL_BIAS 0x00c
+#define PCIE_PHY_DCC_FEEDBACK 0x014
+#define PCIE_PHY_PLL_DIV_1 0x05c
+#define PCIE_PHY_COMMON_POWER 0x064
+#define PCIE_PHY_COMMON_PD_CMN (0x1 << 3)
+#define PCIE_PHY_TRSV0_EMP_LVL 0x084
+#define PCIE_PHY_TRSV0_DRV_LVL 0x088
+#define PCIE_PHY_TRSV0_RXCDR 0x0ac
+#define PCIE_PHY_TRSV0_POWER 0x0c4
+#define PCIE_PHY_TRSV0_PD_TSV (0x1 << 7)
+#define PCIE_PHY_TRSV0_LVCC 0x0dc
+#define PCIE_PHY_TRSV1_EMP_LVL 0x144
+#define PCIE_PHY_TRSV1_RXCDR 0x16c
+#define PCIE_PHY_TRSV1_POWER 0x184
+#define PCIE_PHY_TRSV1_PD_TSV (0x1 << 7)
+#define PCIE_PHY_TRSV1_LVCC 0x19c
+#define PCIE_PHY_TRSV2_EMP_LVL 0x204
+#define PCIE_PHY_TRSV2_RXCDR 0x22c
+#define PCIE_PHY_TRSV2_POWER 0x244
+#define PCIE_PHY_TRSV2_PD_TSV (0x1 << 7)
+#define PCIE_PHY_TRSV2_LVCC 0x25c
+#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
+#define PCIE_PHY_TRSV3_RXCDR 0x2ec
+#define PCIE_PHY_TRSV3_POWER 0x304
+#define PCIE_PHY_TRSV3_PD_TSV (0x1 << 7)
+#define PCIE_PHY_TRSV3_LVCC 0x31c
+
+static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+{
+ writel(val, pcie->elbi_base + reg);
+}
+
+static inline u32 exynos_elb_readl(struct exynos_pcie *pcie, u32 reg)
+{
+ return readl(pcie->elbi_base + reg);
+}
+
+static inline void exynos_phy_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+{
+ writel(val, pcie->phy_base + reg);
+}
+
+static inline u32 exynos_phy_readl(struct exynos_pcie *pcie, u32 reg)
+{
+ return readl(pcie->phy_base + reg);
+}
+
+static inline void exynos_blk_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+{
+ writel(val, pcie->block_base + reg);
+}
+
+static inline u32 exynos_blk_readl(struct exynos_pcie *pcie, u32 reg)
+{
+ return readl(pcie->block_base + reg);
+}
+
+static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ if (on) {
+ val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_AWMISC);
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_AWMISC);
+ } else {
+ val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_AWMISC);
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_AWMISC);
+ }
+}
+
+static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ if (on) {
+ val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_ARMISC);
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_ARMISC);
+ } else {
+ val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_ARMISC);
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_ARMISC);
+ }
+}
+
+static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
+ val &= ~PCIE_CORE_RESET_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_CORE_RESET);
+ exynos_elb_writel(exynos_pcie, 0, PCIE_PWR_RESET);
+ exynos_elb_writel(exynos_pcie, 0, PCIE_STICKY_RESET);
+ exynos_elb_writel(exynos_pcie, 0, PCIE_NONSTICKY_RESET);
+}
+
+static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
+ val |= PCIE_CORE_RESET_ENABLE;
+
+ exynos_elb_writel(exynos_pcie, val, PCIE_CORE_RESET);
+ exynos_elb_writel(exynos_pcie, 1, PCIE_STICKY_RESET);
+ exynos_elb_writel(exynos_pcie, 1, PCIE_NONSTICKY_RESET);
+ exynos_elb_writel(exynos_pcie, 1, PCIE_APP_INIT_RESET);
+ exynos_elb_writel(exynos_pcie, 0, PCIE_APP_INIT_RESET);
+ exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_MAC_RESET);
+}
+
+static void exynos_pcie_assert_phy_reset(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_MAC_RESET);
+ exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_GLOBAL_RESET);
+}
+
+static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_GLOBAL_RESET);
+ exynos_elb_writel(exynos_pcie, 1, PCIE_PWR_RESET);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_CMN_REG);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSVREG_RESET);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET);
+}
+
+static void exynos_pcie_power_on_phy(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
+ val &= ~PCIE_PHY_COMMON_PD_CMN;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
+ val &= ~PCIE_PHY_TRSV0_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
+ val &= ~PCIE_PHY_TRSV1_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
+ val &= ~PCIE_PHY_TRSV2_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
+ val &= ~PCIE_PHY_TRSV3_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
+}
+
+static void exynos_pcie_power_off_phy(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
+ val |= PCIE_PHY_COMMON_PD_CMN;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
+ val |= PCIE_PHY_TRSV0_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
+ val |= PCIE_PHY_TRSV1_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
+ val |= PCIE_PHY_TRSV2_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
+ val |= PCIE_PHY_TRSV3_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
+}
+
+static void exynos_pcie_init_phy(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ /* DCC feedback control off */
+ exynos_phy_writel(exynos_pcie, 0x29, PCIE_PHY_DCC_FEEDBACK);
+
+ /* set TX/RX impedance */
+ exynos_phy_writel(exynos_pcie, 0xd5, PCIE_PHY_IMPEDANCE);
+
+ /* set 50Mhz PHY clock */
+ exynos_phy_writel(exynos_pcie, 0x14, PCIE_PHY_PLL_DIV_0);
+ exynos_phy_writel(exynos_pcie, 0x12, PCIE_PHY_PLL_DIV_1);
+
+ /* set TX Differential output for lane 0 */
+ exynos_phy_writel(exynos_pcie, 0x7f, PCIE_PHY_TRSV0_DRV_LVL);
+
+ /* set TX Pre-emphasis Level Control for lane 0 to minimum */
+ exynos_phy_writel(exynos_pcie, 0x0, PCIE_PHY_TRSV0_EMP_LVL);
+
+ /* set RX clock and data recovery bandwidth */
+ exynos_phy_writel(exynos_pcie, 0xe7, PCIE_PHY_PLL_BIAS);
+ exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV0_RXCDR);
+ exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV1_RXCDR);
+ exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV2_RXCDR);
+ exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV3_RXCDR);
+
+ /* change TX Pre-emphasis Level Control for lanes */
+ exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV0_EMP_LVL);
+ exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV1_EMP_LVL);
+ exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV2_EMP_LVL);
+ exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV3_EMP_LVL);
+
+ /* set LVCC */
+ exynos_phy_writel(exynos_pcie, 0x20, PCIE_PHY_TRSV0_LVCC);
+ exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV1_LVCC);
+ exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV2_LVCC);
+ exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV3_LVCC);
+}
+
+static void exynos_pcie_assert_reset(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ if (exynos_pcie->reset_gpio >= 0)
+ devm_gpio_request_one(pp->dev, exynos_pcie->reset_gpio,
+ GPIOF_OUT_INIT_HIGH, "RESET");
+ return;
+}
+
+static int exynos_pcie_establish_link(struct pcie_port *pp)
+{
+ u32 val;
+ int count = 0;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ if (dw_pcie_link_up(pp)) {
+ dev_err(pp->dev, "Link already up\n");
+ return 0;
+ }
+
+ /* assert reset signals */
+ exynos_pcie_assert_core_reset(pp);
+ exynos_pcie_assert_phy_reset(pp);
+
+ /* de-assert phy reset */
+ exynos_pcie_deassert_phy_reset(pp);
+
+ /* power on phy */
+ exynos_pcie_power_on_phy(pp);
+
+ /* initialize phy */
+ exynos_pcie_init_phy(pp);
+
+ /* pulse for common reset */
+ exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_COMMON_RESET);
+ udelay(500);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
+
+ /* de-assert core reset */
+ exynos_pcie_deassert_core_reset(pp);
+
+ /* setup root complex */
+ dw_pcie_setup_rc(pp);
+
+ /* assert reset signal */
+ exynos_pcie_assert_reset(pp);
+
+ /* assert LTSSM enable */
+ exynos_elb_writel(exynos_pcie, PCIE_ELBI_LTSSM_ENABLE,
+ PCIE_APP_LTSSM_ENABLE);
+
+ /* check if the link is up or not */
+ while (!dw_pcie_link_up(pp)) {
+ mdelay(100);
+ count++;
+ if (count == 10) {
+ while (exynos_phy_readl(exynos_pcie,
+ PCIE_PHY_PLL_LOCKED) == 0) {
+ val = exynos_blk_readl(exynos_pcie,
+ PCIE_PHY_PLL_LOCKED);
+ dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
+ }
+ /* power off phy */
+ exynos_pcie_power_off_phy(pp);
+
+ dev_err(pp->dev, "PCIe Link Fail\n");
+ return -EINVAL;
+ }
+ }
+
+ dev_info(pp->dev, "Link up\n");
+
+ return 0;
+}
+
+static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_PULSE);
+ exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_PULSE);
+ return;
+}
+
+static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ /* enable INTX interrupt */
+ val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
+ IRQ_INTC_ASSERT | IRQ_INTD_ASSERT,
+ exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_PULSE);
+ return;
+}
+
+static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+
+ exynos_pcie_clear_irq_pulse(pp);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+
+ return dw_handle_msi_irq(pp);
+}
+
+static void exynos_pcie_msi_init(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ dw_pcie_msi_init(pp);
+
+ /* enable MSI interrupt */
+ val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_EN_LEVEL);
+ val |= IRQ_MSI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL);
+ return;
+}
+
+static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
+{
+ exynos_pcie_enable_irq_pulse(pp);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ exynos_pcie_msi_init(pp);
+
+ return;
+}
+
+static inline void exynos_pcie_readl_rc(struct pcie_port *pp,
+ void __iomem *dbi_base, u32 *val)
+{
+ exynos_pcie_sideband_dbi_r_mode(pp, true);
+ *val = readl(dbi_base);
+ exynos_pcie_sideband_dbi_r_mode(pp, false);
+ return;
+}
+
+static inline void exynos_pcie_writel_rc(struct pcie_port *pp,
+ u32 val, void __iomem *dbi_base)
+{
+ exynos_pcie_sideband_dbi_w_mode(pp, true);
+ writel(val, dbi_base);
+ exynos_pcie_sideband_dbi_w_mode(pp, false);
+ return;
+}
+
+static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+ u32 *val)
+{
+ int ret;
+
+ exynos_pcie_sideband_dbi_r_mode(pp, true);
+ ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
+ exynos_pcie_sideband_dbi_r_mode(pp, false);
+ return ret;
+}
+
+static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
+ u32 val)
+{
+ int ret;
+
+ exynos_pcie_sideband_dbi_w_mode(pp, true);
+ ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3),
+ where, size, val);
+ exynos_pcie_sideband_dbi_w_mode(pp, false);
+ return ret;
+}
+
+static int exynos_pcie_link_up(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+ u32 val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_RDLH_LINKUP);
+
+ if (val == PCIE_ELBI_LTSSM_ENABLE)
+ return 1;
+
+ return 0;
+}
+
+static void exynos_pcie_host_init(struct pcie_port *pp)
+{
+ exynos_pcie_establish_link(pp);
+ exynos_pcie_enable_interrupts(pp);
+}
+
+static struct pcie_host_ops exynos_pcie_host_ops = {
+ .readl_rc = exynos_pcie_readl_rc,
+ .writel_rc = exynos_pcie_writel_rc,
+ .rd_own_conf = exynos_pcie_rd_own_conf,
+ .wr_own_conf = exynos_pcie_wr_own_conf,
+ .link_up = exynos_pcie_link_up,
+ .host_init = exynos_pcie_host_init,
+};
+
+static int __init add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ pp->irq = platform_get_irq(pdev, 1);
+ if (!pp->irq) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return -ENODEV;
+ }
+ ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler,
+ IRQF_SHARED, "exynos-pcie", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ return ret;
+ }
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq(pdev, 0);
+ if (!pp->msi_irq) {
+ dev_err(&pdev->dev, "failed to get msi irq\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ exynos_pcie_msi_irq_handler,
+ IRQF_SHARED, "exynos-pcie", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request msi irq\n");
+ return ret;
+ }
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &exynos_pcie_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init exynos_pcie_probe(struct platform_device *pdev)
+{
+ struct exynos_pcie *exynos_pcie;
+ struct pcie_port *pp;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *elbi_base;
+ struct resource *phy_base;
+ struct resource *block_base;
+ int ret;
+
+ exynos_pcie = devm_kzalloc(&pdev->dev, sizeof(*exynos_pcie),
+ GFP_KERNEL);
+ if (!exynos_pcie)
+ return -ENOMEM;
+
+ pp = &exynos_pcie->pp;
+
+ pp->dev = &pdev->dev;
+
+ exynos_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+
+ exynos_pcie->clk = devm_clk_get(&pdev->dev, "pcie");
+ if (IS_ERR(exynos_pcie->clk)) {
+ dev_err(&pdev->dev, "Failed to get pcie rc clock\n");
+ return PTR_ERR(exynos_pcie->clk);
+ }
+ ret = clk_prepare_enable(exynos_pcie->clk);
+ if (ret)
+ return ret;
+
+ exynos_pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
+ if (IS_ERR(exynos_pcie->bus_clk)) {
+ dev_err(&pdev->dev, "Failed to get pcie bus clock\n");
+ ret = PTR_ERR(exynos_pcie->bus_clk);
+ goto fail_clk;
+ }
+ ret = clk_prepare_enable(exynos_pcie->bus_clk);
+ if (ret)
+ goto fail_clk;
+
+ elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
+ if (IS_ERR(exynos_pcie->elbi_base)) {
+ ret = PTR_ERR(exynos_pcie->elbi_base);
+ goto fail_bus_clk;
+ }
+
+ phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
+ if (IS_ERR(exynos_pcie->phy_base)) {
+ ret = PTR_ERR(exynos_pcie->phy_base);
+ goto fail_bus_clk;
+ }
+
+ block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base);
+ if (IS_ERR(exynos_pcie->block_base)) {
+ ret = PTR_ERR(exynos_pcie->block_base);
+ goto fail_bus_clk;
+ }
+
+ ret = add_pcie_port(pp, pdev);
+ if (ret < 0)
+ goto fail_bus_clk;
+
+ platform_set_drvdata(pdev, exynos_pcie);
+ return 0;
+
+fail_bus_clk:
+ clk_disable_unprepare(exynos_pcie->bus_clk);
+fail_clk:
+ clk_disable_unprepare(exynos_pcie->clk);
+ return ret;
+}
+
+static int __exit exynos_pcie_remove(struct platform_device *pdev)
+{
+ struct exynos_pcie *exynos_pcie = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(exynos_pcie->bus_clk);
+ clk_disable_unprepare(exynos_pcie->clk);
+
+ return 0;
+}
+
+static const struct of_device_id exynos_pcie_of_match[] = {
+ { .compatible = "samsung,exynos5440-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
+
+static struct platform_driver exynos_pcie_driver = {
+ .remove = __exit_p(exynos_pcie_remove),
+ .driver = {
+ .name = "exynos-pcie",
+ .owner = THIS_MODULE,
+ .of_match_table = exynos_pcie_of_match,
+ },
+};
+
+/* Exynos PCIe driver does not allow module unload */
+
+static int __init pcie_init(void)
+{
+ return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
+}
+subsys_initcall(pcie_init);
+
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_DESCRIPTION("Samsung PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
new file mode 100644
index 00000000000..44fe6aa6a43
--- /dev/null
+++ b/drivers/pci/host/pci-host-generic.c
@@ -0,0 +1,388 @@
+/*
+ * Simple, generic PCI host controller driver targetting firmware-initialised
+ * systems and virtual machines (e.g. the PCI emulation provided by kvmtool).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+
+struct gen_pci_cfg_bus_ops {
+ u32 bus_shift;
+ void __iomem *(*map_bus)(struct pci_bus *, unsigned int, int);
+};
+
+struct gen_pci_cfg_windows {
+ struct resource res;
+ struct resource bus_range;
+ void __iomem **win;
+
+ const struct gen_pci_cfg_bus_ops *ops;
+};
+
+struct gen_pci {
+ struct pci_host_bridge host;
+ struct gen_pci_cfg_windows cfg;
+ struct list_head resources;
+};
+
+static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ struct pci_sys_data *sys = bus->sysdata;
+ struct gen_pci *pci = sys->private_data;
+ resource_size_t idx = bus->number - pci->cfg.bus_range.start;
+
+ return pci->cfg.win[idx] + ((devfn << 8) | where);
+}
+
+static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = {
+ .bus_shift = 16,
+ .map_bus = gen_pci_map_cfg_bus_cam,
+};
+
+static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ struct pci_sys_data *sys = bus->sysdata;
+ struct gen_pci *pci = sys->private_data;
+ resource_size_t idx = bus->number - pci->cfg.bus_range.start;
+
+ return pci->cfg.win[idx] + ((devfn << 12) | where);
+}
+
+static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
+ .bus_shift = 20,
+ .map_bus = gen_pci_map_cfg_bus_ecam,
+};
+
+static int gen_pci_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ void __iomem *addr;
+ struct pci_sys_data *sys = bus->sysdata;
+ struct gen_pci *pci = sys->private_data;
+
+ addr = pci->cfg.ops->map_bus(bus, devfn, where);
+
+ switch (size) {
+ case 1:
+ *val = readb(addr);
+ break;
+ case 2:
+ *val = readw(addr);
+ break;
+ default:
+ *val = readl(addr);
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int gen_pci_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ void __iomem *addr;
+ struct pci_sys_data *sys = bus->sysdata;
+ struct gen_pci *pci = sys->private_data;
+
+ addr = pci->cfg.ops->map_bus(bus, devfn, where);
+
+ switch (size) {
+ case 1:
+ writeb(val, addr);
+ break;
+ case 2:
+ writew(val, addr);
+ break;
+ default:
+ writel(val, addr);
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops gen_pci_ops = {
+ .read = gen_pci_config_read,
+ .write = gen_pci_config_write,
+};
+
+static const struct of_device_id gen_pci_of_match[] = {
+ { .compatible = "pci-host-cam-generic",
+ .data = &gen_pci_cfg_cam_bus_ops },
+
+ { .compatible = "pci-host-ecam-generic",
+ .data = &gen_pci_cfg_ecam_bus_ops },
+
+ { },
+};
+MODULE_DEVICE_TABLE(of, gen_pci_of_match);
+
+static int gen_pci_calc_io_offset(struct device *dev,
+ struct of_pci_range *range,
+ struct resource *res,
+ resource_size_t *offset)
+{
+ static atomic_t wins = ATOMIC_INIT(0);
+ int err, idx, max_win;
+ unsigned int window;
+
+ if (!PAGE_ALIGNED(range->cpu_addr))
+ return -EINVAL;
+
+ max_win = (IO_SPACE_LIMIT + 1) / SZ_64K;
+ idx = atomic_inc_return(&wins);
+ if (idx > max_win)
+ return -ENOSPC;
+
+ window = (idx - 1) * SZ_64K;
+ err = pci_ioremap_io(window, range->cpu_addr);
+ if (err)
+ return err;
+
+ of_pci_range_to_resource(range, dev->of_node, res);
+ res->start = window;
+ res->end = res->start + range->size - 1;
+ *offset = window - range->pci_addr;
+ return 0;
+}
+
+static int gen_pci_calc_mem_offset(struct device *dev,
+ struct of_pci_range *range,
+ struct resource *res,
+ resource_size_t *offset)
+{
+ of_pci_range_to_resource(range, dev->of_node, res);
+ *offset = range->cpu_addr - range->pci_addr;
+ return 0;
+}
+
+static void gen_pci_release_of_pci_ranges(struct gen_pci *pci)
+{
+ struct pci_host_bridge_window *win;
+
+ list_for_each_entry(win, &pci->resources, list)
+ release_resource(win->res);
+
+ pci_free_resource_list(&pci->resources);
+}
+
+static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
+{
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ int err, res_valid = 0;
+ struct device *dev = pci->host.dev.parent;
+ struct device_node *np = dev->of_node;
+
+ if (of_pci_range_parser_init(&parser, np)) {
+ dev_err(dev, "missing \"ranges\" property\n");
+ return -EINVAL;
+ }
+
+ for_each_of_pci_range(&parser, &range) {
+ struct resource *parent, *res;
+ resource_size_t offset;
+ u32 restype = range.flags & IORESOURCE_TYPE_BITS;
+
+ res = devm_kmalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ err = -ENOMEM;
+ goto out_release_res;
+ }
+
+ switch (restype) {
+ case IORESOURCE_IO:
+ parent = &ioport_resource;
+ err = gen_pci_calc_io_offset(dev, &range, res, &offset);
+ break;
+ case IORESOURCE_MEM:
+ parent = &iomem_resource;
+ err = gen_pci_calc_mem_offset(dev, &range, res, &offset);
+ res_valid |= !(res->flags & IORESOURCE_PREFETCH || err);
+ break;
+ default:
+ err = -EINVAL;
+ continue;
+ }
+
+ if (err) {
+ dev_warn(dev,
+ "error %d: failed to add resource [type 0x%x, %lld bytes]\n",
+ err, restype, range.size);
+ continue;
+ }
+
+ err = request_resource(parent, res);
+ if (err)
+ goto out_release_res;
+
+ pci_add_resource_offset(&pci->resources, res, offset);
+ }
+
+ if (!res_valid) {
+ dev_err(dev, "non-prefetchable memory resource required\n");
+ err = -EINVAL;
+ goto out_release_res;
+ }
+
+ return 0;
+
+out_release_res:
+ gen_pci_release_of_pci_ranges(pci);
+ return err;
+}
+
+static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
+{
+ int err;
+ u8 bus_max;
+ resource_size_t busn;
+ struct resource *bus_range;
+ struct device *dev = pci->host.dev.parent;
+ struct device_node *np = dev->of_node;
+
+ if (of_pci_parse_bus_range(np, &pci->cfg.bus_range))
+ pci->cfg.bus_range = (struct resource) {
+ .name = np->name,
+ .start = 0,
+ .end = 0xff,
+ .flags = IORESOURCE_BUS,
+ };
+
+ err = of_address_to_resource(np, 0, &pci->cfg.res);
+ if (err) {
+ dev_err(dev, "missing \"reg\" property\n");
+ return err;
+ }
+
+ pci->cfg.win = devm_kcalloc(dev, resource_size(&pci->cfg.bus_range),
+ sizeof(*pci->cfg.win), GFP_KERNEL);
+ if (!pci->cfg.win)
+ return -ENOMEM;
+
+ /* Limit the bus-range to fit within reg */
+ bus_max = pci->cfg.bus_range.start +
+ (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1;
+ pci->cfg.bus_range.end = min_t(resource_size_t, pci->cfg.bus_range.end,
+ bus_max);
+
+ /* Map our Configuration Space windows */
+ if (!devm_request_mem_region(dev, pci->cfg.res.start,
+ resource_size(&pci->cfg.res),
+ "Configuration Space"))
+ return -ENOMEM;
+
+ bus_range = &pci->cfg.bus_range;
+ for (busn = bus_range->start; busn <= bus_range->end; ++busn) {
+ u32 idx = busn - bus_range->start;
+ u32 sz = 1 << pci->cfg.ops->bus_shift;
+
+ pci->cfg.win[idx] = devm_ioremap(dev,
+ pci->cfg.res.start + busn * sz,
+ sz);
+ if (!pci->cfg.win[idx])
+ return -ENOMEM;
+ }
+
+ /* Register bus resource */
+ pci_add_resource(&pci->resources, bus_range);
+ return 0;
+}
+
+static int gen_pci_setup(int nr, struct pci_sys_data *sys)
+{
+ struct gen_pci *pci = sys->private_data;
+ list_splice_init(&pci->resources, &sys->resources);
+ return 1;
+}
+
+static int gen_pci_probe(struct platform_device *pdev)
+{
+ int err;
+ const char *type;
+ const struct of_device_id *of_id;
+ const int *prop;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ struct hw_pci hw = {
+ .nr_controllers = 1,
+ .private_data = (void **)&pci,
+ .setup = gen_pci_setup,
+ .map_irq = of_irq_parse_and_map_pci,
+ .ops = &gen_pci_ops,
+ };
+
+ if (!pci)
+ return -ENOMEM;
+
+ type = of_get_property(np, "device_type", NULL);
+ if (!type || strcmp(type, "pci")) {
+ dev_err(dev, "invalid \"device_type\" %s\n", type);
+ return -EINVAL;
+ }
+
+ prop = of_get_property(of_chosen, "linux,pci-probe-only", NULL);
+ if (prop) {
+ if (*prop)
+ pci_add_flags(PCI_PROBE_ONLY);
+ else
+ pci_clear_flags(PCI_PROBE_ONLY);
+ }
+
+ of_id = of_match_node(gen_pci_of_match, np);
+ pci->cfg.ops = of_id->data;
+ pci->host.dev.parent = dev;
+ INIT_LIST_HEAD(&pci->host.windows);
+ INIT_LIST_HEAD(&pci->resources);
+
+ /* Parse our PCI ranges and request their resources */
+ err = gen_pci_parse_request_of_pci_ranges(pci);
+ if (err)
+ return err;
+
+ /* Parse and map our Configuration Space windows */
+ err = gen_pci_parse_map_cfg_windows(pci);
+ if (err) {
+ gen_pci_release_of_pci_ranges(pci);
+ return err;
+ }
+
+ pci_common_init_dev(dev, &hw);
+ return 0;
+}
+
+static struct platform_driver gen_pci_driver = {
+ .driver = {
+ .name = "pci-host-generic",
+ .owner = THIS_MODULE,
+ .of_match_table = gen_pci_of_match,
+ },
+ .probe = gen_pci_probe,
+};
+module_platform_driver(gen_pci_driver);
+
+MODULE_DESCRIPTION("Generic PCI host driver");
+MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
new file mode 100644
index 00000000000..a568efaa331
--- /dev/null
+++ b/drivers/pci/host/pci-imx6.c
@@ -0,0 +1,616 @@
+/*
+ * PCIe host controller driver for Freescale i.MX6 SoCs
+ *
+ * Copyright (C) 2013 Kosagi
+ * http://www.kosagi.com
+ *
+ * Author: Sean Cross <xobs@kosagi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+
+#include "pcie-designware.h"
+
+#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
+
+struct imx6_pcie {
+ int reset_gpio;
+ struct clk *pcie_bus;
+ struct clk *pcie_phy;
+ struct clk *pcie;
+ struct pcie_port pp;
+ struct regmap *iomuxc_gpr;
+ void __iomem *mem_base;
+};
+
+/* PCIe Root Complex registers (memory-mapped) */
+#define PCIE_RC_LCR 0x7c
+#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
+#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
+#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
+
+/* PCIe Port Logic registers (memory-mapped) */
+#define PL_OFFSET 0x700
+#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
+#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
+#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
+#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
+
+#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
+#define PCIE_PHY_CTRL_DATA_LOC 0
+#define PCIE_PHY_CTRL_CAP_ADR_LOC 16
+#define PCIE_PHY_CTRL_CAP_DAT_LOC 17
+#define PCIE_PHY_CTRL_WR_LOC 18
+#define PCIE_PHY_CTRL_RD_LOC 19
+
+#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
+#define PCIE_PHY_STAT_ACK_LOC 16
+
+#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
+#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
+
+/* PHY registers (not memory-mapped) */
+#define PCIE_PHY_RX_ASIC_OUT 0x100D
+
+#define PHY_RX_OVRD_IN_LO 0x1005
+#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
+#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
+
+static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
+{
+ u32 val;
+ u32 max_iterations = 10;
+ u32 wait_counter = 0;
+
+ do {
+ val = readl(dbi_base + PCIE_PHY_STAT);
+ val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
+ wait_counter++;
+
+ if (val == exp_val)
+ return 0;
+
+ udelay(1);
+ } while (wait_counter < max_iterations);
+
+ return -ETIMEDOUT;
+}
+
+static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
+{
+ u32 val;
+ int ret;
+
+ val = addr << PCIE_PHY_CTRL_DATA_LOC;
+ writel(val, dbi_base + PCIE_PHY_CTRL);
+
+ val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
+ writel(val, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 1);
+ if (ret)
+ return ret;
+
+ val = addr << PCIE_PHY_CTRL_DATA_LOC;
+ writel(val, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
+static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
+{
+ u32 val, phy_ctl;
+ int ret;
+
+ ret = pcie_phy_wait_ack(dbi_base, addr);
+ if (ret)
+ return ret;
+
+ /* assert Read signal */
+ phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
+ writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 1);
+ if (ret)
+ return ret;
+
+ val = readl(dbi_base + PCIE_PHY_STAT);
+ *data = val & 0xffff;
+
+ /* deassert Read signal */
+ writel(0x00, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
+{
+ u32 var;
+ int ret;
+
+ /* write addr */
+ /* cap addr */
+ ret = pcie_phy_wait_ack(dbi_base, addr);
+ if (ret)
+ return ret;
+
+ var = data << PCIE_PHY_CTRL_DATA_LOC;
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ /* capture data */
+ var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 1);
+ if (ret)
+ return ret;
+
+ /* deassert cap data */
+ var = data << PCIE_PHY_CTRL_DATA_LOC;
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ /* wait for ack de-assertion */
+ ret = pcie_phy_poll_ack(dbi_base, 0);
+ if (ret)
+ return ret;
+
+ /* assert wr signal */
+ var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ /* wait for ack */
+ ret = pcie_phy_poll_ack(dbi_base, 1);
+ if (ret)
+ return ret;
+
+ /* deassert wr signal */
+ var = data << PCIE_PHY_CTRL_DATA_LOC;
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ /* wait for ack de-assertion */
+ ret = pcie_phy_poll_ack(dbi_base, 0);
+ if (ret)
+ return ret;
+
+ writel(0x0, dbi_base + PCIE_PHY_CTRL);
+
+ return 0;
+}
+
+/* Added for PCI abort handling */
+static int imx6q_pcie_abort_handler(unsigned long addr,
+ unsigned int fsr, struct pt_regs *regs)
+{
+ return 0;
+}
+
+static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
+{
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+
+ return 0;
+}
+
+static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
+{
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+ int ret;
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+
+ ret = clk_prepare_enable(imx6_pcie->pcie_phy);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable pcie_phy clock\n");
+ goto err_pcie_phy;
+ }
+
+ ret = clk_prepare_enable(imx6_pcie->pcie_bus);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable pcie_bus clock\n");
+ goto err_pcie_bus;
+ }
+
+ ret = clk_prepare_enable(imx6_pcie->pcie);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable pcie clock\n");
+ goto err_pcie;
+ }
+
+ /* allow the clocks to stabilize */
+ usleep_range(200, 500);
+
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ gpio_set_value(imx6_pcie->reset_gpio, 0);
+ msleep(100);
+ gpio_set_value(imx6_pcie->reset_gpio, 1);
+ }
+ return 0;
+
+err_pcie:
+ clk_disable_unprepare(imx6_pcie->pcie_bus);
+err_pcie_bus:
+ clk_disable_unprepare(imx6_pcie->pcie_phy);
+err_pcie_phy:
+ return ret;
+
+}
+
+static void imx6_pcie_init_phy(struct pcie_port *pp)
+{
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+
+ /* configure constant input signal to the pcie ctrl and phy */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN1, 0 << 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 0 << 6);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 20 << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_FULL, 127 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_LOW, 127 << 25);
+}
+
+static int imx6_pcie_wait_for_link(struct pcie_port *pp)
+{
+ int count = 200;
+
+ while (!dw_pcie_link_up(pp)) {
+ usleep_range(100, 1000);
+ if (--count)
+ continue;
+
+ dev_err(pp->dev, "phy link never came up\n");
+ dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
+ readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
+ readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+
+ return dw_handle_msi_irq(pp);
+}
+
+static int imx6_pcie_start_link(struct pcie_port *pp)
+{
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+ uint32_t tmp;
+ int ret, count;
+
+ /*
+ * Force Gen1 operation when starting the link. In case the link is
+ * started in Gen2 mode, there is a possibility the devices on the
+ * bus will not be detected at all. This happens with PCIe switches.
+ */
+ tmp = readl(pp->dbi_base + PCIE_RC_LCR);
+ tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
+ tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
+ writel(tmp, pp->dbi_base + PCIE_RC_LCR);
+
+ /* Start LTSSM. */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+
+ ret = imx6_pcie_wait_for_link(pp);
+ if (ret)
+ return ret;
+
+ /* Allow Gen2 mode after the link is up. */
+ tmp = readl(pp->dbi_base + PCIE_RC_LCR);
+ tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
+ tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
+ writel(tmp, pp->dbi_base + PCIE_RC_LCR);
+
+ /*
+ * Start Directed Speed Change so the best possible speed both link
+ * partners support can be negotiated.
+ */
+ tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+ tmp |= PORT_LOGIC_SPEED_CHANGE;
+ writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+
+ count = 200;
+ while (count--) {
+ tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+ /* Test if the speed change finished. */
+ if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
+ break;
+ usleep_range(100, 1000);
+ }
+
+ /* Make sure link training is finished as well! */
+ if (count)
+ ret = imx6_pcie_wait_for_link(pp);
+ else
+ ret = -EINVAL;
+
+ if (ret) {
+ dev_err(pp->dev, "Failed to bring link up!\n");
+ } else {
+ tmp = readl(pp->dbi_base + 0x80);
+ dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
+ }
+
+ return ret;
+}
+
+static void imx6_pcie_host_init(struct pcie_port *pp)
+{
+ imx6_pcie_assert_core_reset(pp);
+
+ imx6_pcie_init_phy(pp);
+
+ imx6_pcie_deassert_core_reset(pp);
+
+ dw_pcie_setup_rc(pp);
+
+ imx6_pcie_start_link(pp);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
+}
+
+static void imx6_pcie_reset_phy(struct pcie_port *pp)
+{
+ uint32_t temp;
+
+ pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
+ temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+ PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+ pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
+
+ usleep_range(2000, 3000);
+
+ pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
+ temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+ PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+ pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
+}
+
+static int imx6_pcie_link_up(struct pcie_port *pp)
+{
+ u32 rc, debug_r0, rx_valid;
+ int count = 5;
+
+ /*
+ * Test if the PHY reports that the link is up and also that the LTSSM
+ * training finished. There are three possible states of the link when
+ * this code is called:
+ * 1) The link is DOWN (unlikely)
+ * The link didn't come up yet for some reason. This usually means
+ * we have a real problem somewhere. Reset the PHY and exit. This
+ * state calls for inspection of the DEBUG registers.
+ * 2) The link is UP, but still in LTSSM training
+ * Wait for the training to finish, which should take a very short
+ * time. If the training does not finish, we have a problem and we
+ * need to inspect the DEBUG registers. If the training does finish,
+ * the link is up and operating correctly.
+ * 3) The link is UP and no longer in LTSSM training
+ * The link is up and operating correctly.
+ */
+ while (1) {
+ rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
+ if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP))
+ break;
+ if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
+ return 1;
+ if (!count--)
+ break;
+ dev_dbg(pp->dev, "Link is up, but still in training\n");
+ /*
+ * Wait a little bit, then re-check if the link finished
+ * the training.
+ */
+ usleep_range(1000, 2000);
+ }
+ /*
+ * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
+ * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
+ * If (MAC/LTSSM.state == Recovery.RcvrLock)
+ * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
+ * to gen2 is stuck
+ */
+ pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
+ debug_r0 = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0);
+
+ if (rx_valid & 0x01)
+ return 0;
+
+ if ((debug_r0 & 0x3f) != 0x0d)
+ return 0;
+
+ dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
+ dev_dbg(pp->dev, "debug_r0=%08x debug_r1=%08x\n", debug_r0, rc);
+
+ imx6_pcie_reset_phy(pp);
+
+ return 0;
+}
+
+static struct pcie_host_ops imx6_pcie_host_ops = {
+ .link_up = imx6_pcie_link_up,
+ .host_init = imx6_pcie_host_init,
+};
+
+static int __init imx6_add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+ if (pp->msi_irq <= 0) {
+ dev_err(&pdev->dev, "failed to get MSI irq\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ imx6_pcie_msi_handler,
+ IRQF_SHARED, "mx6-pcie-msi", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request MSI irq\n");
+ return -ENODEV;
+ }
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &imx6_pcie_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init imx6_pcie_probe(struct platform_device *pdev)
+{
+ struct imx6_pcie *imx6_pcie;
+ struct pcie_port *pp;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *dbi_base;
+ int ret;
+
+ imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
+ if (!imx6_pcie)
+ return -ENOMEM;
+
+ pp = &imx6_pcie->pp;
+ pp->dev = &pdev->dev;
+
+ /* Added for PCI abort handling */
+ hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
+ "imprecise external abort");
+
+ dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
+ if (IS_ERR(pp->dbi_base))
+ return PTR_ERR(pp->dbi_base);
+
+ /* Fetch GPIOs */
+ imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
+ GPIOF_OUT_INIT_LOW, "PCIe reset");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get reset gpio\n");
+ return ret;
+ }
+ }
+
+ /* Fetch clocks */
+ imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
+ if (IS_ERR(imx6_pcie->pcie_phy)) {
+ dev_err(&pdev->dev,
+ "pcie_phy clock source missing or invalid\n");
+ return PTR_ERR(imx6_pcie->pcie_phy);
+ }
+
+ imx6_pcie->pcie_bus = devm_clk_get(&pdev->dev, "pcie_bus");
+ if (IS_ERR(imx6_pcie->pcie_bus)) {
+ dev_err(&pdev->dev,
+ "pcie_bus clock source missing or invalid\n");
+ return PTR_ERR(imx6_pcie->pcie_bus);
+ }
+
+ imx6_pcie->pcie = devm_clk_get(&pdev->dev, "pcie");
+ if (IS_ERR(imx6_pcie->pcie)) {
+ dev_err(&pdev->dev,
+ "pcie clock source missing or invalid\n");
+ return PTR_ERR(imx6_pcie->pcie);
+ }
+
+ /* Grab GPR config register range */
+ imx6_pcie->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
+ dev_err(&pdev->dev, "unable to find iomuxc registers\n");
+ return PTR_ERR(imx6_pcie->iomuxc_gpr);
+ }
+
+ ret = imx6_add_pcie_port(pp, pdev);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, imx6_pcie);
+ return 0;
+}
+
+static const struct of_device_id imx6_pcie_of_match[] = {
+ { .compatible = "fsl,imx6q-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
+
+static struct platform_driver imx6_pcie_driver = {
+ .driver = {
+ .name = "imx6q-pcie",
+ .owner = THIS_MODULE,
+ .of_match_table = imx6_pcie_of_match,
+ },
+};
+
+/* Freescale PCIe driver does not allow module unload */
+
+static int __init imx6_pcie_init(void)
+{
+ return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
+}
+fs_initcall(imx6_pcie_init);
+
+MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
+MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
new file mode 100644
index 00000000000..ce23e0f076b
--- /dev/null
+++ b/drivers/pci/host/pci-mvebu.c
@@ -0,0 +1,1097 @@
+/*
+ * PCIe driver for Marvell Armada 370 and Armada XP SoCs
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/mbus.h>
+#include <linux/msi.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+
+/*
+ * PCIe unit register offsets.
+ */
+#define PCIE_DEV_ID_OFF 0x0000
+#define PCIE_CMD_OFF 0x0004
+#define PCIE_DEV_REV_OFF 0x0008
+#define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
+#define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
+#define PCIE_HEADER_LOG_4_OFF 0x0128
+#define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4))
+#define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
+#define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4))
+#define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4))
+#define PCIE_WIN5_CTRL_OFF 0x1880
+#define PCIE_WIN5_BASE_OFF 0x1884
+#define PCIE_WIN5_REMAP_OFF 0x188c
+#define PCIE_CONF_ADDR_OFF 0x18f8
+#define PCIE_CONF_ADDR_EN 0x80000000
+#define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
+#define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
+#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
+#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
+#define PCIE_CONF_ADDR(bus, devfn, where) \
+ (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
+ PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
+ PCIE_CONF_ADDR_EN)
+#define PCIE_CONF_DATA_OFF 0x18fc
+#define PCIE_MASK_OFF 0x1910
+#define PCIE_MASK_ENABLE_INTS 0x0f000000
+#define PCIE_CTRL_OFF 0x1a00
+#define PCIE_CTRL_X1_MODE 0x0001
+#define PCIE_STAT_OFF 0x1a04
+#define PCIE_STAT_BUS 0xff00
+#define PCIE_STAT_DEV 0x1f0000
+#define PCIE_STAT_LINK_DOWN BIT(0)
+#define PCIE_DEBUG_CTRL 0x1a60
+#define PCIE_DEBUG_SOFT_RESET BIT(20)
+
+/* PCI configuration space of a PCI-to-PCI bridge */
+struct mvebu_sw_pci_bridge {
+ u16 vendor;
+ u16 device;
+ u16 command;
+ u16 class;
+ u8 interface;
+ u8 revision;
+ u8 bist;
+ u8 header_type;
+ u8 latency_timer;
+ u8 cache_line_size;
+ u32 bar[2];
+ u8 primary_bus;
+ u8 secondary_bus;
+ u8 subordinate_bus;
+ u8 secondary_latency_timer;
+ u8 iobase;
+ u8 iolimit;
+ u16 secondary_status;
+ u16 membase;
+ u16 memlimit;
+ u16 iobaseupper;
+ u16 iolimitupper;
+ u8 cappointer;
+ u8 reserved1;
+ u16 reserved2;
+ u32 romaddr;
+ u8 intline;
+ u8 intpin;
+ u16 bridgectrl;
+};
+
+struct mvebu_pcie_port;
+
+/* Structure representing all PCIe interfaces */
+struct mvebu_pcie {
+ struct platform_device *pdev;
+ struct mvebu_pcie_port *ports;
+ struct msi_chip *msi;
+ struct resource io;
+ char io_name[30];
+ struct resource realio;
+ char mem_name[30];
+ struct resource mem;
+ struct resource busn;
+ int nports;
+};
+
+/* Structure representing one PCIe interface */
+struct mvebu_pcie_port {
+ char *name;
+ void __iomem *base;
+ u32 port;
+ u32 lane;
+ int devfn;
+ unsigned int mem_target;
+ unsigned int mem_attr;
+ unsigned int io_target;
+ unsigned int io_attr;
+ struct clk *clk;
+ int reset_gpio;
+ int reset_active_low;
+ char *reset_name;
+ struct mvebu_sw_pci_bridge bridge;
+ struct device_node *dn;
+ struct mvebu_pcie *pcie;
+ phys_addr_t memwin_base;
+ size_t memwin_size;
+ phys_addr_t iowin_base;
+ size_t iowin_size;
+};
+
+static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
+{
+ writel(val, port->base + reg);
+}
+
+static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
+{
+ return readl(port->base + reg);
+}
+
+static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
+{
+ return port->io_target != -1 && port->io_attr != -1;
+}
+
+static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
+{
+ return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
+}
+
+static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
+{
+ u32 stat;
+
+ stat = mvebu_readl(port, PCIE_STAT_OFF);
+ stat &= ~PCIE_STAT_BUS;
+ stat |= nr << 8;
+ mvebu_writel(port, stat, PCIE_STAT_OFF);
+}
+
+static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
+{
+ u32 stat;
+
+ stat = mvebu_readl(port, PCIE_STAT_OFF);
+ stat &= ~PCIE_STAT_DEV;
+ stat |= nr << 16;
+ mvebu_writel(port, stat, PCIE_STAT_OFF);
+}
+
+/*
+ * Setup PCIE BARs and Address Decode Wins:
+ * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks
+ * WIN[0-3] -> DRAM bank[0-3]
+ */
+static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
+{
+ const struct mbus_dram_target_info *dram;
+ u32 size;
+ int i;
+
+ dram = mv_mbus_dram_info();
+
+ /* First, disable and clear BARs and windows. */
+ for (i = 1; i < 3; i++) {
+ mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
+ mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
+ mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
+ }
+
+ for (i = 0; i < 5; i++) {
+ mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
+ mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
+ mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
+ }
+
+ mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
+ mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
+ mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
+
+ /* Setup windows for DDR banks. Count total DDR size on the fly. */
+ size = 0;
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+
+ mvebu_writel(port, cs->base & 0xffff0000,
+ PCIE_WIN04_BASE_OFF(i));
+ mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
+ mvebu_writel(port,
+ ((cs->size - 1) & 0xffff0000) |
+ (cs->mbus_attr << 8) |
+ (dram->mbus_dram_target_id << 4) | 1,
+ PCIE_WIN04_CTRL_OFF(i));
+
+ size += cs->size;
+ }
+
+ /* Round up 'size' to the nearest power of two. */
+ if ((size & (size - 1)) != 0)
+ size = 1 << fls(size);
+
+ /* Setup BAR[1] to all DRAM banks. */
+ mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
+ mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
+ mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
+ PCIE_BAR_CTRL_OFF(1));
+}
+
+static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
+{
+ u32 cmd, mask;
+
+ /* Point PCIe unit MBUS decode windows to DRAM space. */
+ mvebu_pcie_setup_wins(port);
+
+ /* Master + slave enable. */
+ cmd = mvebu_readl(port, PCIE_CMD_OFF);
+ cmd |= PCI_COMMAND_IO;
+ cmd |= PCI_COMMAND_MEMORY;
+ cmd |= PCI_COMMAND_MASTER;
+ mvebu_writel(port, cmd, PCIE_CMD_OFF);
+
+ /* Enable interrupt lines A-D. */
+ mask = mvebu_readl(port, PCIE_MASK_OFF);
+ mask |= PCIE_MASK_ENABLE_INTS;
+ mvebu_writel(port, mask, PCIE_MASK_OFF);
+}
+
+static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
+ struct pci_bus *bus,
+ u32 devfn, int where, int size, u32 *val)
+{
+ mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
+ PCIE_CONF_ADDR_OFF);
+
+ *val = mvebu_readl(port, PCIE_CONF_DATA_OFF);
+
+ if (size == 1)
+ *val = (*val >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (8 * (where & 3))) & 0xffff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
+ struct pci_bus *bus,
+ u32 devfn, int where, int size, u32 val)
+{
+ u32 _val, shift = 8 * (where & 3);
+
+ mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
+ PCIE_CONF_ADDR_OFF);
+ _val = mvebu_readl(port, PCIE_CONF_DATA_OFF);
+
+ if (size == 4)
+ _val = val;
+ else if (size == 2)
+ _val = (_val & ~(0xffff << shift)) | ((val & 0xffff) << shift);
+ else if (size == 1)
+ _val = (_val & ~(0xff << shift)) | ((val & 0xff) << shift);
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ mvebu_writel(port, _val, PCIE_CONF_DATA_OFF);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ * Remove windows, starting from the largest ones to the smallest
+ * ones.
+ */
+static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
+ phys_addr_t base, size_t size)
+{
+ while (size) {
+ size_t sz = 1 << (fls(size) - 1);
+
+ mvebu_mbus_del_window(base, sz);
+ base += sz;
+ size -= sz;
+ }
+}
+
+/*
+ * MBus windows can only have a power of two size, but PCI BARs do not
+ * have this constraint. Therefore, we have to split the PCI BAR into
+ * areas each having a power of two size. We start from the largest
+ * one (i.e highest order bit set in the size).
+ */
+static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
+ unsigned int target, unsigned int attribute,
+ phys_addr_t base, size_t size,
+ phys_addr_t remap)
+{
+ size_t size_mapped = 0;
+
+ while (size) {
+ size_t sz = 1 << (fls(size) - 1);
+ int ret;
+
+ ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
+ sz, remap);
+ if (ret) {
+ phys_addr_t end = base + sz - 1;
+
+ dev_err(&port->pcie->pdev->dev,
+ "Could not create MBus window at [mem %pa-%pa]: %d\n",
+ &base, &end, ret);
+ mvebu_pcie_del_windows(port, base - size_mapped,
+ size_mapped);
+ return;
+ }
+
+ size -= sz;
+ size_mapped += sz;
+ base += sz;
+ if (remap != MVEBU_MBUS_NO_REMAP)
+ remap += sz;
+ }
+}
+
+static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
+{
+ phys_addr_t iobase;
+
+ /* Are the new iobase/iolimit values invalid? */
+ if (port->bridge.iolimit < port->bridge.iobase ||
+ port->bridge.iolimitupper < port->bridge.iobaseupper ||
+ !(port->bridge.command & PCI_COMMAND_IO)) {
+
+ /* If a window was configured, remove it */
+ if (port->iowin_base) {
+ mvebu_pcie_del_windows(port, port->iowin_base,
+ port->iowin_size);
+ port->iowin_base = 0;
+ port->iowin_size = 0;
+ }
+
+ return;
+ }
+
+ if (!mvebu_has_ioport(port)) {
+ dev_WARN(&port->pcie->pdev->dev,
+ "Attempt to set IO when IO is disabled\n");
+ return;
+ }
+
+ /*
+ * We read the PCI-to-PCI bridge emulated registers, and
+ * calculate the base address and size of the address decoding
+ * window to setup, according to the PCI-to-PCI bridge
+ * specifications. iobase is the bus address, port->iowin_base
+ * is the CPU address.
+ */
+ iobase = ((port->bridge.iobase & 0xF0) << 8) |
+ (port->bridge.iobaseupper << 16);
+ port->iowin_base = port->pcie->io.start + iobase;
+ port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
+ (port->bridge.iolimitupper << 16)) -
+ iobase) + 1;
+
+ mvebu_pcie_add_windows(port, port->io_target, port->io_attr,
+ port->iowin_base, port->iowin_size,
+ iobase);
+}
+
+static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
+{
+ /* Are the new membase/memlimit values invalid? */
+ if (port->bridge.memlimit < port->bridge.membase ||
+ !(port->bridge.command & PCI_COMMAND_MEMORY)) {
+
+ /* If a window was configured, remove it */
+ if (port->memwin_base) {
+ mvebu_pcie_del_windows(port, port->memwin_base,
+ port->memwin_size);
+ port->memwin_base = 0;
+ port->memwin_size = 0;
+ }
+
+ return;
+ }
+
+ /*
+ * We read the PCI-to-PCI bridge emulated registers, and
+ * calculate the base address and size of the address decoding
+ * window to setup, according to the PCI-to-PCI bridge
+ * specifications.
+ */
+ port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16);
+ port->memwin_size =
+ (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
+ port->memwin_base + 1;
+
+ mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr,
+ port->memwin_base, port->memwin_size,
+ MVEBU_MBUS_NO_REMAP);
+}
+
+/*
+ * Initialize the configuration space of the PCI-to-PCI bridge
+ * associated with the given PCIe interface.
+ */
+static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port)
+{
+ struct mvebu_sw_pci_bridge *bridge = &port->bridge;
+
+ memset(bridge, 0, sizeof(struct mvebu_sw_pci_bridge));
+
+ bridge->class = PCI_CLASS_BRIDGE_PCI;
+ bridge->vendor = PCI_VENDOR_ID_MARVELL;
+ bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
+ bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
+ bridge->header_type = PCI_HEADER_TYPE_BRIDGE;
+ bridge->cache_line_size = 0x10;
+
+ /* We support 32 bits I/O addressing */
+ bridge->iobase = PCI_IO_RANGE_TYPE_32;
+ bridge->iolimit = PCI_IO_RANGE_TYPE_32;
+}
+
+/*
+ * Read the configuration space of the PCI-to-PCI bridge associated to
+ * the given PCIe interface.
+ */
+static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
+ unsigned int where, int size, u32 *value)
+{
+ struct mvebu_sw_pci_bridge *bridge = &port->bridge;
+
+ switch (where & ~3) {
+ case PCI_VENDOR_ID:
+ *value = bridge->device << 16 | bridge->vendor;
+ break;
+
+ case PCI_COMMAND:
+ *value = bridge->command;
+ break;
+
+ case PCI_CLASS_REVISION:
+ *value = bridge->class << 16 | bridge->interface << 8 |
+ bridge->revision;
+ break;
+
+ case PCI_CACHE_LINE_SIZE:
+ *value = bridge->bist << 24 | bridge->header_type << 16 |
+ bridge->latency_timer << 8 | bridge->cache_line_size;
+ break;
+
+ case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
+ *value = bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4];
+ break;
+
+ case PCI_PRIMARY_BUS:
+ *value = (bridge->secondary_latency_timer << 24 |
+ bridge->subordinate_bus << 16 |
+ bridge->secondary_bus << 8 |
+ bridge->primary_bus);
+ break;
+
+ case PCI_IO_BASE:
+ if (!mvebu_has_ioport(port))
+ *value = bridge->secondary_status << 16;
+ else
+ *value = (bridge->secondary_status << 16 |
+ bridge->iolimit << 8 |
+ bridge->iobase);
+ break;
+
+ case PCI_MEMORY_BASE:
+ *value = (bridge->memlimit << 16 | bridge->membase);
+ break;
+
+ case PCI_PREF_MEMORY_BASE:
+ *value = 0;
+ break;
+
+ case PCI_IO_BASE_UPPER16:
+ *value = (bridge->iolimitupper << 16 | bridge->iobaseupper);
+ break;
+
+ case PCI_ROM_ADDRESS1:
+ *value = 0;
+ break;
+
+ case PCI_INTERRUPT_LINE:
+ /* LINE PIN MIN_GNT MAX_LAT */
+ *value = 0;
+ break;
+
+ default:
+ *value = 0xffffffff;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ if (size == 2)
+ *value = (*value >> (8 * (where & 3))) & 0xffff;
+ else if (size == 1)
+ *value = (*value >> (8 * (where & 3))) & 0xff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/* Write to the PCI-to-PCI bridge configuration space */
+static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
+ unsigned int where, int size, u32 value)
+{
+ struct mvebu_sw_pci_bridge *bridge = &port->bridge;
+ u32 mask, reg;
+ int err;
+
+ if (size == 4)
+ mask = 0x0;
+ else if (size == 2)
+ mask = ~(0xffff << ((where & 3) * 8));
+ else if (size == 1)
+ mask = ~(0xff << ((where & 3) * 8));
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ err = mvebu_sw_pci_bridge_read(port, where & ~3, 4, &reg);
+ if (err)
+ return err;
+
+ value = (reg & mask) | value << ((where & 3) * 8);
+
+ switch (where & ~3) {
+ case PCI_COMMAND:
+ {
+ u32 old = bridge->command;
+
+ if (!mvebu_has_ioport(port))
+ value &= ~PCI_COMMAND_IO;
+
+ bridge->command = value & 0xffff;
+ if ((old ^ bridge->command) & PCI_COMMAND_IO)
+ mvebu_pcie_handle_iobase_change(port);
+ if ((old ^ bridge->command) & PCI_COMMAND_MEMORY)
+ mvebu_pcie_handle_membase_change(port);
+ break;
+ }
+
+ case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
+ bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value;
+ break;
+
+ case PCI_IO_BASE:
+ /*
+ * We also keep bit 1 set, it is a read-only bit that
+ * indicates we support 32 bits addressing for the
+ * I/O
+ */
+ bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32;
+ bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32;
+ mvebu_pcie_handle_iobase_change(port);
+ break;
+
+ case PCI_MEMORY_BASE:
+ bridge->membase = value & 0xffff;
+ bridge->memlimit = value >> 16;
+ mvebu_pcie_handle_membase_change(port);
+ break;
+
+ case PCI_IO_BASE_UPPER16:
+ bridge->iobaseupper = value & 0xffff;
+ bridge->iolimitupper = value >> 16;
+ mvebu_pcie_handle_iobase_change(port);
+ break;
+
+ case PCI_PRIMARY_BUS:
+ bridge->primary_bus = value & 0xff;
+ bridge->secondary_bus = (value >> 8) & 0xff;
+ bridge->subordinate_bus = (value >> 16) & 0xff;
+ bridge->secondary_latency_timer = (value >> 24) & 0xff;
+ mvebu_pcie_set_local_bus_nr(port, bridge->secondary_bus);
+ break;
+
+ default:
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
+{
+ return sys->private_data;
+}
+
+static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
+ struct pci_bus *bus,
+ int devfn)
+{
+ int i;
+
+ for (i = 0; i < pcie->nports; i++) {
+ struct mvebu_pcie_port *port = &pcie->ports[i];
+ if (bus->number == 0 && port->devfn == devfn)
+ return port;
+ if (bus->number != 0 &&
+ bus->number >= port->bridge.secondary_bus &&
+ bus->number <= port->bridge.subordinate_bus)
+ return port;
+ }
+
+ return NULL;
+}
+
+/* PCI configuration space write function */
+static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct mvebu_pcie_port *port;
+ int ret;
+
+ port = mvebu_pcie_find_port(pcie, bus, devfn);
+ if (!port)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Access the emulated PCI-to-PCI bridge */
+ if (bus->number == 0)
+ return mvebu_sw_pci_bridge_write(port, where, size, val);
+
+ if (!mvebu_pcie_link_up(port))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * On the secondary bus, we don't want to expose any other
+ * device than the device physically connected in the PCIe
+ * slot, visible in slot 0. In slot 1, there's a special
+ * Marvell device that only makes sense when the Armada is
+ * used as a PCIe endpoint.
+ */
+ if (bus->number == port->bridge.secondary_bus &&
+ PCI_SLOT(devfn) != 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Access the real PCIe interface */
+ ret = mvebu_pcie_hw_wr_conf(port, bus, devfn,
+ where, size, val);
+
+ return ret;
+}
+
+/* PCI configuration space read function */
+static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct mvebu_pcie_port *port;
+ int ret;
+
+ port = mvebu_pcie_find_port(pcie, bus, devfn);
+ if (!port) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ /* Access the emulated PCI-to-PCI bridge */
+ if (bus->number == 0)
+ return mvebu_sw_pci_bridge_read(port, where, size, val);
+
+ if (!mvebu_pcie_link_up(port)) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ /*
+ * On the secondary bus, we don't want to expose any other
+ * device than the device physically connected in the PCIe
+ * slot, visible in slot 0. In slot 1, there's a special
+ * Marvell device that only makes sense when the Armada is
+ * used as a PCIe endpoint.
+ */
+ if (bus->number == port->bridge.secondary_bus &&
+ PCI_SLOT(devfn) != 0) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ /* Access the real PCIe interface */
+ ret = mvebu_pcie_hw_rd_conf(port, bus, devfn,
+ where, size, val);
+
+ return ret;
+}
+
+static struct pci_ops mvebu_pcie_ops = {
+ .read = mvebu_pcie_rd_conf,
+ .write = mvebu_pcie_wr_conf,
+};
+
+static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+ struct mvebu_pcie *pcie = sys_to_pcie(sys);
+ int i;
+ int domain = 0;
+
+#ifdef CONFIG_PCI_DOMAINS
+ domain = sys->domain;
+#endif
+
+ snprintf(pcie->mem_name, sizeof(pcie->mem_name), "PCI MEM %04x",
+ domain);
+ pcie->mem.name = pcie->mem_name;
+
+ snprintf(pcie->io_name, sizeof(pcie->io_name), "PCI I/O %04x", domain);
+ pcie->realio.name = pcie->io_name;
+
+ if (request_resource(&iomem_resource, &pcie->mem))
+ return 0;
+
+ if (resource_size(&pcie->realio) != 0) {
+ if (request_resource(&ioport_resource, &pcie->realio)) {
+ release_resource(&pcie->mem);
+ return 0;
+ }
+ pci_add_resource_offset(&sys->resources, &pcie->realio,
+ sys->io_offset);
+ }
+ pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
+ pci_add_resource(&sys->resources, &pcie->busn);
+
+ for (i = 0; i < pcie->nports; i++) {
+ struct mvebu_pcie_port *port = &pcie->ports[i];
+ if (!port->base)
+ continue;
+ mvebu_pcie_setup_hw(port);
+ }
+
+ return 1;
+}
+
+static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys)
+{
+ struct mvebu_pcie *pcie = sys_to_pcie(sys);
+ struct pci_bus *bus;
+
+ bus = pci_create_root_bus(&pcie->pdev->dev, sys->busnr,
+ &mvebu_pcie_ops, sys, &sys->resources);
+ if (!bus)
+ return NULL;
+
+ pci_scan_child_bus(bus);
+
+ return bus;
+}
+
+static void mvebu_pcie_add_bus(struct pci_bus *bus)
+{
+ struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
+ bus->msi = pcie->msi;
+}
+
+static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
+ const struct resource *res,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t align)
+{
+ if (dev->bus->number != 0)
+ return start;
+
+ /*
+ * On the PCI-to-PCI bridge side, the I/O windows must have at
+ * least a 64 KB size and the memory windows must have at
+ * least a 1 MB size. Moreover, MBus windows need to have a
+ * base address aligned on their size, and their size must be
+ * a power of two. This means that if the BAR doesn't have a
+ * power of two size, several MBus windows will actually be
+ * created. We need to ensure that the biggest MBus window
+ * (which will be the first one) is aligned on its size, which
+ * explains the rounddown_pow_of_two() being done here.
+ */
+ if (res->flags & IORESOURCE_IO)
+ return round_up(start, max_t(resource_size_t, SZ_64K,
+ rounddown_pow_of_two(size)));
+ else if (res->flags & IORESOURCE_MEM)
+ return round_up(start, max_t(resource_size_t, SZ_1M,
+ rounddown_pow_of_two(size)));
+ else
+ return start;
+}
+
+static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
+{
+ struct hw_pci hw;
+
+ memset(&hw, 0, sizeof(hw));
+
+ hw.nr_controllers = 1;
+ hw.private_data = (void **)&pcie;
+ hw.setup = mvebu_pcie_setup;
+ hw.scan = mvebu_pcie_scan_bus;
+ hw.map_irq = of_irq_parse_and_map_pci;
+ hw.ops = &mvebu_pcie_ops;
+ hw.align_resource = mvebu_pcie_align_resource;
+ hw.add_bus = mvebu_pcie_add_bus;
+
+ pci_common_init(&hw);
+}
+
+/*
+ * Looks up the list of register addresses encoded into the reg =
+ * <...> property for one that matches the given port/lane. Once
+ * found, maps it.
+ */
+static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
+ struct device_node *np,
+ struct mvebu_pcie_port *port)
+{
+ struct resource regs;
+ int ret = 0;
+
+ ret = of_address_to_resource(np, 0, &regs);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return devm_ioremap_resource(&pdev->dev, &regs);
+}
+
+#define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
+#define DT_TYPE_IO 0x1
+#define DT_TYPE_MEM32 0x2
+#define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
+#define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
+
+static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
+ unsigned long type,
+ unsigned int *tgt,
+ unsigned int *attr)
+{
+ const int na = 3, ns = 2;
+ const __be32 *range;
+ int rlen, nranges, rangesz, pna, i;
+
+ *tgt = -1;
+ *attr = -1;
+
+ range = of_get_property(np, "ranges", &rlen);
+ if (!range)
+ return -EINVAL;
+
+ pna = of_n_addr_cells(np);
+ rangesz = pna + na + ns;
+ nranges = rlen / sizeof(__be32) / rangesz;
+
+ for (i = 0; i < nranges; i++) {
+ u32 flags = of_read_number(range, 1);
+ u32 slot = of_read_number(range + 1, 1);
+ u64 cpuaddr = of_read_number(range + na, pna);
+ unsigned long rtype;
+
+ if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
+ rtype = IORESOURCE_IO;
+ else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
+ rtype = IORESOURCE_MEM;
+
+ if (slot == PCI_SLOT(devfn) && type == rtype) {
+ *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
+ *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
+ return 0;
+ }
+
+ range += rangesz;
+ }
+
+ return -ENOENT;
+}
+
+static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie)
+{
+ struct device_node *msi_node;
+
+ msi_node = of_parse_phandle(pcie->pdev->dev.of_node,
+ "msi-parent", 0);
+ if (!msi_node)
+ return;
+
+ pcie->msi = of_pci_find_msi_chip_by_node(msi_node);
+
+ if (pcie->msi)
+ pcie->msi->dev = &pcie->pdev->dev;
+}
+
+static int mvebu_pcie_probe(struct platform_device *pdev)
+{
+ struct mvebu_pcie *pcie;
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *child;
+ int i, ret;
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pcie),
+ GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->pdev = pdev;
+ platform_set_drvdata(pdev, pcie);
+
+ /* Get the PCIe memory and I/O aperture */
+ mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
+ if (resource_size(&pcie->mem) == 0) {
+ dev_err(&pdev->dev, "invalid memory aperture size\n");
+ return -EINVAL;
+ }
+
+ mvebu_mbus_get_pcie_io_aperture(&pcie->io);
+
+ if (resource_size(&pcie->io) != 0) {
+ pcie->realio.flags = pcie->io.flags;
+ pcie->realio.start = PCIBIOS_MIN_IO;
+ pcie->realio.end = min_t(resource_size_t,
+ IO_SPACE_LIMIT,
+ resource_size(&pcie->io));
+ } else
+ pcie->realio = pcie->io;
+
+ /* Get the bus range */
+ ret = of_pci_parse_bus_range(np, &pcie->busn);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse bus-range property: %d\n",
+ ret);
+ return ret;
+ }
+
+ i = 0;
+ for_each_child_of_node(pdev->dev.of_node, child) {
+ if (!of_device_is_available(child))
+ continue;
+ i++;
+ }
+
+ pcie->ports = devm_kzalloc(&pdev->dev, i *
+ sizeof(struct mvebu_pcie_port),
+ GFP_KERNEL);
+ if (!pcie->ports)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_child_of_node(pdev->dev.of_node, child) {
+ struct mvebu_pcie_port *port = &pcie->ports[i];
+ enum of_gpio_flags flags;
+
+ if (!of_device_is_available(child))
+ continue;
+
+ port->pcie = pcie;
+
+ if (of_property_read_u32(child, "marvell,pcie-port",
+ &port->port)) {
+ dev_warn(&pdev->dev,
+ "ignoring PCIe DT node, missing pcie-port property\n");
+ continue;
+ }
+
+ if (of_property_read_u32(child, "marvell,pcie-lane",
+ &port->lane))
+ port->lane = 0;
+
+ port->name = kasprintf(GFP_KERNEL, "pcie%d.%d",
+ port->port, port->lane);
+
+ port->devfn = of_pci_get_devfn(child);
+ if (port->devfn < 0)
+ continue;
+
+ ret = mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_MEM,
+ &port->mem_target, &port->mem_attr);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "PCIe%d.%d: cannot get tgt/attr for mem window\n",
+ port->port, port->lane);
+ continue;
+ }
+
+ if (resource_size(&pcie->io) != 0)
+ mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_IO,
+ &port->io_target, &port->io_attr);
+ else {
+ port->io_target = -1;
+ port->io_attr = -1;
+ }
+
+ port->reset_gpio = of_get_named_gpio_flags(child,
+ "reset-gpios", 0, &flags);
+ if (gpio_is_valid(port->reset_gpio)) {
+ u32 reset_udelay = 20000;
+
+ port->reset_active_low = flags & OF_GPIO_ACTIVE_LOW;
+ port->reset_name = kasprintf(GFP_KERNEL,
+ "pcie%d.%d-reset", port->port, port->lane);
+ of_property_read_u32(child, "reset-delay-us",
+ &reset_udelay);
+
+ ret = devm_gpio_request_one(&pdev->dev,
+ port->reset_gpio, GPIOF_DIR_OUT, port->reset_name);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ continue;
+ }
+
+ gpio_set_value(port->reset_gpio,
+ (port->reset_active_low) ? 1 : 0);
+ msleep(reset_udelay/1000);
+ }
+
+ port->clk = of_clk_get_by_name(child, NULL);
+ if (IS_ERR(port->clk)) {
+ dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n",
+ port->port, port->lane);
+ continue;
+ }
+
+ ret = clk_prepare_enable(port->clk);
+ if (ret)
+ continue;
+
+ port->base = mvebu_pcie_map_registers(pdev, child, port);
+ if (IS_ERR(port->base)) {
+ dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n",
+ port->port, port->lane);
+ port->base = NULL;
+ clk_disable_unprepare(port->clk);
+ continue;
+ }
+
+ mvebu_pcie_set_local_dev_nr(port, 1);
+
+ port->dn = child;
+ mvebu_sw_pci_bridge_init(port);
+ i++;
+ }
+
+ pcie->nports = i;
+
+ for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K)
+ pci_ioremap_io(i, pcie->io.start + i);
+
+ mvebu_pcie_msi_enable(pcie);
+ mvebu_pcie_enable(pcie);
+
+ return 0;
+}
+
+static const struct of_device_id mvebu_pcie_of_match_table[] = {
+ { .compatible = "marvell,armada-xp-pcie", },
+ { .compatible = "marvell,armada-370-pcie", },
+ { .compatible = "marvell,dove-pcie", },
+ { .compatible = "marvell,kirkwood-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
+
+static struct platform_driver mvebu_pcie_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mvebu-pcie",
+ .of_match_table = mvebu_pcie_of_match_table,
+ /* driver unloading/unbinding currently not supported */
+ .suppress_bind_attrs = true,
+ },
+ .probe = mvebu_pcie_probe,
+};
+module_platform_driver(mvebu_pcie_driver);
+
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell EBU PCIe driver");
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
new file mode 100644
index 00000000000..3ef854f5a5b
--- /dev/null
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -0,0 +1,426 @@
+/*
+ * pci-rcar-gen2: internal PCI bus support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+/* AHB-PCI Bridge PCI communication registers */
+#define RCAR_AHBPCI_PCICOM_OFFSET 0x800
+
+#define RCAR_PCIAHB_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x00)
+#define RCAR_PCIAHB_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x04)
+#define RCAR_PCIAHB_PREFETCH0 0x0
+#define RCAR_PCIAHB_PREFETCH4 0x1
+#define RCAR_PCIAHB_PREFETCH8 0x2
+#define RCAR_PCIAHB_PREFETCH16 0x3
+
+#define RCAR_AHBPCI_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x10)
+#define RCAR_AHBPCI_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x14)
+#define RCAR_AHBPCI_WIN_CTR_MEM (3 << 1)
+#define RCAR_AHBPCI_WIN_CTR_CFG (5 << 1)
+#define RCAR_AHBPCI_WIN1_HOST (1 << 30)
+#define RCAR_AHBPCI_WIN1_DEVICE (1 << 31)
+
+#define RCAR_PCI_INT_ENABLE_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x20)
+#define RCAR_PCI_INT_STATUS_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x24)
+#define RCAR_PCI_INT_SIGTABORT (1 << 0)
+#define RCAR_PCI_INT_SIGRETABORT (1 << 1)
+#define RCAR_PCI_INT_REMABORT (1 << 2)
+#define RCAR_PCI_INT_PERR (1 << 3)
+#define RCAR_PCI_INT_SIGSERR (1 << 4)
+#define RCAR_PCI_INT_RESERR (1 << 5)
+#define RCAR_PCI_INT_WIN1ERR (1 << 12)
+#define RCAR_PCI_INT_WIN2ERR (1 << 13)
+#define RCAR_PCI_INT_A (1 << 16)
+#define RCAR_PCI_INT_B (1 << 17)
+#define RCAR_PCI_INT_PME (1 << 19)
+#define RCAR_PCI_INT_ALLERRORS (RCAR_PCI_INT_SIGTABORT | \
+ RCAR_PCI_INT_SIGRETABORT | \
+ RCAR_PCI_INT_SIGRETABORT | \
+ RCAR_PCI_INT_REMABORT | \
+ RCAR_PCI_INT_PERR | \
+ RCAR_PCI_INT_SIGSERR | \
+ RCAR_PCI_INT_RESERR | \
+ RCAR_PCI_INT_WIN1ERR | \
+ RCAR_PCI_INT_WIN2ERR)
+
+#define RCAR_AHB_BUS_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x30)
+#define RCAR_AHB_BUS_MMODE_HTRANS (1 << 0)
+#define RCAR_AHB_BUS_MMODE_BYTE_BURST (1 << 1)
+#define RCAR_AHB_BUS_MMODE_WR_INCR (1 << 2)
+#define RCAR_AHB_BUS_MMODE_HBUS_REQ (1 << 7)
+#define RCAR_AHB_BUS_SMODE_READYCTR (1 << 17)
+#define RCAR_AHB_BUS_MODE (RCAR_AHB_BUS_MMODE_HTRANS | \
+ RCAR_AHB_BUS_MMODE_BYTE_BURST | \
+ RCAR_AHB_BUS_MMODE_WR_INCR | \
+ RCAR_AHB_BUS_MMODE_HBUS_REQ | \
+ RCAR_AHB_BUS_SMODE_READYCTR)
+
+#define RCAR_USBCTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x34)
+#define RCAR_USBCTR_USBH_RST (1 << 0)
+#define RCAR_USBCTR_PCICLK_MASK (1 << 1)
+#define RCAR_USBCTR_PLL_RST (1 << 2)
+#define RCAR_USBCTR_DIRPD (1 << 8)
+#define RCAR_USBCTR_PCIAHB_WIN2_EN (1 << 9)
+#define RCAR_USBCTR_PCIAHB_WIN1_256M (0 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_512M (1 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_1G (2 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_2G (3 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_MASK (3 << 10)
+
+#define RCAR_PCI_ARBITER_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x40)
+#define RCAR_PCI_ARBITER_PCIREQ0 (1 << 0)
+#define RCAR_PCI_ARBITER_PCIREQ1 (1 << 1)
+#define RCAR_PCI_ARBITER_PCIBP_MODE (1 << 12)
+
+#define RCAR_PCI_UNIT_REV_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x48)
+
+struct rcar_pci_priv {
+ struct device *dev;
+ void __iomem *reg;
+ struct resource io_res;
+ struct resource mem_res;
+ struct resource *cfg_res;
+ unsigned busnr;
+ int irq;
+ unsigned long window_size;
+};
+
+/* PCI configuration space operations */
+static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_sys_data *sys = bus->sysdata;
+ struct rcar_pci_priv *priv = sys->private_data;
+ int slot, val;
+
+ if (sys->busnr != bus->number || PCI_FUNC(devfn))
+ return NULL;
+
+ /* Only one EHCI/OHCI device built-in */
+ slot = PCI_SLOT(devfn);
+ if (slot > 2)
+ return NULL;
+
+ /* bridge logic only has registers to 0x40 */
+ if (slot == 0x0 && where >= 0x40)
+ return NULL;
+
+ val = slot ? RCAR_AHBPCI_WIN1_DEVICE | RCAR_AHBPCI_WIN_CTR_CFG :
+ RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG;
+
+ iowrite32(val, priv->reg + RCAR_AHBPCI_WIN1_CTR_REG);
+ return priv->reg + (slot >> 1) * 0x100 + where;
+}
+
+static int rcar_pci_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where);
+
+ if (!reg)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ *val = ioread8(reg);
+ break;
+ case 2:
+ *val = ioread16(reg);
+ break;
+ default:
+ *val = ioread32(reg);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rcar_pci_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where);
+
+ if (!reg)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ iowrite8(val, reg);
+ break;
+ case 2:
+ iowrite16(val, reg);
+ break;
+ default:
+ iowrite32(val, reg);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/* PCI interrupt mapping */
+static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct pci_sys_data *sys = dev->bus->sysdata;
+ struct rcar_pci_priv *priv = sys->private_data;
+ int irq;
+
+ irq = of_irq_parse_and_map_pci(dev, slot, pin);
+ if (!irq)
+ irq = priv->irq;
+
+ return irq;
+}
+
+#ifdef CONFIG_PCI_DEBUG
+/* if debug enabled, then attach an error handler irq to the bridge */
+
+static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
+{
+ struct rcar_pci_priv *priv = pw;
+ u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG);
+
+ if (status & RCAR_PCI_INT_ALLERRORS) {
+ dev_err(priv->dev, "error irq: status %08x\n", status);
+
+ /* clear the error(s) */
+ iowrite32(status & RCAR_PCI_INT_ALLERRORS,
+ priv->reg + RCAR_PCI_INT_STATUS_REG);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv)
+{
+ int ret;
+ u32 val;
+
+ ret = devm_request_irq(priv->dev, priv->irq, rcar_pci_err_irq,
+ IRQF_SHARED, "error irq", priv);
+ if (ret) {
+ dev_err(priv->dev, "cannot claim IRQ for error handling\n");
+ return;
+ }
+
+ val = ioread32(priv->reg + RCAR_PCI_INT_ENABLE_REG);
+ val |= RCAR_PCI_INT_ALLERRORS;
+ iowrite32(val, priv->reg + RCAR_PCI_INT_ENABLE_REG);
+}
+#else
+static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { }
+#endif
+
+/* PCI host controller setup */
+static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
+{
+ struct rcar_pci_priv *priv = sys->private_data;
+ void __iomem *reg = priv->reg;
+ u32 val;
+
+ pm_runtime_enable(priv->dev);
+ pm_runtime_get_sync(priv->dev);
+
+ val = ioread32(reg + RCAR_PCI_UNIT_REV_REG);
+ dev_info(priv->dev, "PCI: bus%u revision %x\n", sys->busnr, val);
+
+ /* Disable Direct Power Down State and assert reset */
+ val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD;
+ val |= RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST;
+ iowrite32(val, reg + RCAR_USBCTR_REG);
+ udelay(4);
+
+ /* De-assert reset and reset PCIAHB window1 size */
+ val &= ~(RCAR_USBCTR_PCIAHB_WIN1_MASK | RCAR_USBCTR_PCICLK_MASK |
+ RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST);
+
+ /* Setup PCIAHB window1 size */
+ switch (priv->window_size) {
+ case SZ_2G:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_2G;
+ break;
+ case SZ_1G:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_1G;
+ break;
+ case SZ_512M:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_512M;
+ break;
+ default:
+ pr_warn("unknown window size %ld - defaulting to 256M\n",
+ priv->window_size);
+ priv->window_size = SZ_256M;
+ /* fall-through */
+ case SZ_256M:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_256M;
+ break;
+ }
+ iowrite32(val, reg + RCAR_USBCTR_REG);
+
+ /* Configure AHB master and slave modes */
+ iowrite32(RCAR_AHB_BUS_MODE, reg + RCAR_AHB_BUS_CTR_REG);
+
+ /* Configure PCI arbiter */
+ val = ioread32(reg + RCAR_PCI_ARBITER_CTR_REG);
+ val |= RCAR_PCI_ARBITER_PCIREQ0 | RCAR_PCI_ARBITER_PCIREQ1 |
+ RCAR_PCI_ARBITER_PCIBP_MODE;
+ iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG);
+
+ /* PCI-AHB mapping: 0x40000000 base */
+ iowrite32(0x40000000 | RCAR_PCIAHB_PREFETCH16,
+ reg + RCAR_PCIAHB_WIN1_CTR_REG);
+
+ /* AHB-PCI mapping: OHCI/EHCI registers */
+ val = priv->mem_res.start | RCAR_AHBPCI_WIN_CTR_MEM;
+ iowrite32(val, reg + RCAR_AHBPCI_WIN2_CTR_REG);
+
+ /* Enable AHB-PCI bridge PCI configuration access */
+ iowrite32(RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG,
+ reg + RCAR_AHBPCI_WIN1_CTR_REG);
+ /* Set PCI-AHB Window1 address */
+ iowrite32(0x40000000 | PCI_BASE_ADDRESS_MEM_PREFETCH,
+ reg + PCI_BASE_ADDRESS_1);
+ /* Set AHB-PCI bridge PCI communication area address */
+ val = priv->cfg_res->start + RCAR_AHBPCI_PCICOM_OFFSET;
+ iowrite32(val, reg + PCI_BASE_ADDRESS_0);
+
+ val = ioread32(reg + PCI_COMMAND);
+ val |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
+ PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+ iowrite32(val, reg + PCI_COMMAND);
+
+ /* Enable PCI interrupts */
+ iowrite32(RCAR_PCI_INT_A | RCAR_PCI_INT_B | RCAR_PCI_INT_PME,
+ reg + RCAR_PCI_INT_ENABLE_REG);
+
+ if (priv->irq > 0)
+ rcar_pci_setup_errirq(priv);
+
+ /* Add PCI resources */
+ pci_add_resource(&sys->resources, &priv->io_res);
+ pci_add_resource(&sys->resources, &priv->mem_res);
+
+ /* Setup bus number based on platform device id / of bus-range */
+ sys->busnr = priv->busnr;
+ return 1;
+}
+
+static struct pci_ops rcar_pci_ops = {
+ .read = rcar_pci_read_config,
+ .write = rcar_pci_write_config,
+};
+
+static int rcar_pci_probe(struct platform_device *pdev)
+{
+ struct resource *cfg_res, *mem_res;
+ struct rcar_pci_priv *priv;
+ void __iomem *reg;
+ struct hw_pci hw;
+ void *hw_private[1];
+
+ cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(&pdev->dev, cfg_res);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!mem_res || !mem_res->start)
+ return -ENODEV;
+
+ priv = devm_kzalloc(&pdev->dev,
+ sizeof(struct rcar_pci_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->mem_res = *mem_res;
+ /*
+ * The controller does not support/use port I/O,
+ * so setup a dummy port I/O region here.
+ */
+ priv->io_res.start = priv->mem_res.start;
+ priv->io_res.end = priv->mem_res.end;
+ priv->io_res.flags = IORESOURCE_IO;
+
+ priv->cfg_res = cfg_res;
+
+ priv->irq = platform_get_irq(pdev, 0);
+ priv->reg = reg;
+ priv->dev = &pdev->dev;
+
+ if (priv->irq < 0) {
+ dev_err(&pdev->dev, "no valid irq found\n");
+ return priv->irq;
+ }
+
+ priv->window_size = SZ_1G;
+
+ if (pdev->dev.of_node) {
+ struct resource busnr;
+ int ret;
+
+ ret = of_pci_parse_bus_range(pdev->dev.of_node, &busnr);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse bus-range\n");
+ return ret;
+ }
+
+ priv->busnr = busnr.start;
+ if (busnr.end != busnr.start)
+ dev_warn(&pdev->dev, "only one bus number supported\n");
+ } else {
+ priv->busnr = pdev->id;
+ }
+
+ hw_private[0] = priv;
+ memset(&hw, 0, sizeof(hw));
+ hw.nr_controllers = ARRAY_SIZE(hw_private);
+ hw.private_data = hw_private;
+ hw.map_irq = rcar_pci_map_irq;
+ hw.ops = &rcar_pci_ops;
+ hw.setup = rcar_pci_setup;
+ pci_common_init_dev(&pdev->dev, &hw);
+ return 0;
+}
+
+static struct of_device_id rcar_pci_of_match[] = {
+ { .compatible = "renesas,pci-r8a7790", },
+ { .compatible = "renesas,pci-r8a7791", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, rcar_pci_of_match);
+
+static struct platform_driver rcar_pci_driver = {
+ .driver = {
+ .name = "pci-rcar-gen2",
+ .owner = THIS_MODULE,
+ .suppress_bind_attrs = true,
+ .of_match_table = rcar_pci_of_match,
+ },
+ .probe = rcar_pci_probe,
+};
+
+module_platform_driver(rcar_pci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Renesas R-Car Gen2 internal PCI");
+MODULE_AUTHOR("Valentine Barshak <valentine.barshak@cogentembedded.com>");
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
new file mode 100644
index 00000000000..083cf37ca04
--- /dev/null
+++ b/drivers/pci/host/pci-tegra.c
@@ -0,0 +1,1719 @@
+/*
+ * PCIe host controller driver for Tegra SoCs
+ *
+ * Copyright (c) 2010, CompuLab, Ltd.
+ * Author: Mike Rapoport <mike@compulab.co.il>
+ *
+ * Based on NVIDIA PCIe driver
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * Bits taken from arch/arm/mach-dove/pcie.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/tegra-cpuidle.h>
+#include <linux/tegra-powergate.h>
+#include <linux/vmalloc.h>
+#include <linux/regulator/consumer.h>
+
+#include <asm/mach/irq.h>
+#include <asm/mach/map.h>
+#include <asm/mach/pci.h>
+
+#define INT_PCI_MSI_NR (8 * 32)
+
+/* register definitions */
+
+#define AFI_AXI_BAR0_SZ 0x00
+#define AFI_AXI_BAR1_SZ 0x04
+#define AFI_AXI_BAR2_SZ 0x08
+#define AFI_AXI_BAR3_SZ 0x0c
+#define AFI_AXI_BAR4_SZ 0x10
+#define AFI_AXI_BAR5_SZ 0x14
+
+#define AFI_AXI_BAR0_START 0x18
+#define AFI_AXI_BAR1_START 0x1c
+#define AFI_AXI_BAR2_START 0x20
+#define AFI_AXI_BAR3_START 0x24
+#define AFI_AXI_BAR4_START 0x28
+#define AFI_AXI_BAR5_START 0x2c
+
+#define AFI_FPCI_BAR0 0x30
+#define AFI_FPCI_BAR1 0x34
+#define AFI_FPCI_BAR2 0x38
+#define AFI_FPCI_BAR3 0x3c
+#define AFI_FPCI_BAR4 0x40
+#define AFI_FPCI_BAR5 0x44
+
+#define AFI_CACHE_BAR0_SZ 0x48
+#define AFI_CACHE_BAR0_ST 0x4c
+#define AFI_CACHE_BAR1_SZ 0x50
+#define AFI_CACHE_BAR1_ST 0x54
+
+#define AFI_MSI_BAR_SZ 0x60
+#define AFI_MSI_FPCI_BAR_ST 0x64
+#define AFI_MSI_AXI_BAR_ST 0x68
+
+#define AFI_MSI_VEC0 0x6c
+#define AFI_MSI_VEC1 0x70
+#define AFI_MSI_VEC2 0x74
+#define AFI_MSI_VEC3 0x78
+#define AFI_MSI_VEC4 0x7c
+#define AFI_MSI_VEC5 0x80
+#define AFI_MSI_VEC6 0x84
+#define AFI_MSI_VEC7 0x88
+
+#define AFI_MSI_EN_VEC0 0x8c
+#define AFI_MSI_EN_VEC1 0x90
+#define AFI_MSI_EN_VEC2 0x94
+#define AFI_MSI_EN_VEC3 0x98
+#define AFI_MSI_EN_VEC4 0x9c
+#define AFI_MSI_EN_VEC5 0xa0
+#define AFI_MSI_EN_VEC6 0xa4
+#define AFI_MSI_EN_VEC7 0xa8
+
+#define AFI_CONFIGURATION 0xac
+#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
+
+#define AFI_FPCI_ERROR_MASKS 0xb0
+
+#define AFI_INTR_MASK 0xb4
+#define AFI_INTR_MASK_INT_MASK (1 << 0)
+#define AFI_INTR_MASK_MSI_MASK (1 << 8)
+
+#define AFI_INTR_CODE 0xb8
+#define AFI_INTR_CODE_MASK 0xf
+#define AFI_INTR_AXI_SLAVE_ERROR 1
+#define AFI_INTR_AXI_DECODE_ERROR 2
+#define AFI_INTR_TARGET_ABORT 3
+#define AFI_INTR_MASTER_ABORT 4
+#define AFI_INTR_INVALID_WRITE 5
+#define AFI_INTR_LEGACY 6
+#define AFI_INTR_FPCI_DECODE_ERROR 7
+
+#define AFI_INTR_SIGNATURE 0xbc
+#define AFI_UPPER_FPCI_ADDRESS 0xc0
+#define AFI_SM_INTR_ENABLE 0xc4
+#define AFI_SM_INTR_INTA_ASSERT (1 << 0)
+#define AFI_SM_INTR_INTB_ASSERT (1 << 1)
+#define AFI_SM_INTR_INTC_ASSERT (1 << 2)
+#define AFI_SM_INTR_INTD_ASSERT (1 << 3)
+#define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
+#define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
+#define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
+#define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
+
+#define AFI_AFI_INTR_ENABLE 0xc8
+#define AFI_INTR_EN_INI_SLVERR (1 << 0)
+#define AFI_INTR_EN_INI_DECERR (1 << 1)
+#define AFI_INTR_EN_TGT_SLVERR (1 << 2)
+#define AFI_INTR_EN_TGT_DECERR (1 << 3)
+#define AFI_INTR_EN_TGT_WRERR (1 << 4)
+#define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
+#define AFI_INTR_EN_AXI_DECERR (1 << 6)
+#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
+#define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
+
+#define AFI_PCIE_CONFIG 0x0f8
+#define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
+#define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
+
+#define AFI_FUSE 0x104
+#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
+
+#define AFI_PEX0_CTRL 0x110
+#define AFI_PEX1_CTRL 0x118
+#define AFI_PEX2_CTRL 0x128
+#define AFI_PEX_CTRL_RST (1 << 0)
+#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
+#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
+
+#define AFI_PEXBIAS_CTRL_0 0x168
+
+#define RP_VEND_XP 0x00000F00
+#define RP_VEND_XP_DL_UP (1 << 30)
+
+#define RP_LINK_CONTROL_STATUS 0x00000090
+#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
+#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
+
+#define PADS_CTL_SEL 0x0000009C
+
+#define PADS_CTL 0x000000A0
+#define PADS_CTL_IDDQ_1L (1 << 0)
+#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
+#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
+
+#define PADS_PLL_CTL_TEGRA20 0x000000B8
+#define PADS_PLL_CTL_TEGRA30 0x000000B4
+#define PADS_PLL_CTL_RST_B4SM (1 << 1)
+#define PADS_PLL_CTL_LOCKDET (1 << 8)
+#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
+#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
+#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
+#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
+#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
+#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
+#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
+#define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
+
+#define PADS_REFCLK_CFG0 0x000000C8
+#define PADS_REFCLK_CFG1 0x000000CC
+
+/*
+ * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
+ * entries, one entry per PCIe port. These field definitions and desired
+ * values aren't in the TRM, but do come from NVIDIA.
+ */
+#define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
+#define PADS_REFCLK_CFG_E_TERM_SHIFT 7
+#define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
+#define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
+
+/* Default value provided by HW engineering is 0xfa5c */
+#define PADS_REFCLK_CFG_VALUE \
+ ( \
+ (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \
+ (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
+ (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \
+ (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \
+ )
+
+struct tegra_msi {
+ struct msi_chip chip;
+ DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+ struct irq_domain *domain;
+ unsigned long pages;
+ struct mutex lock;
+ int irq;
+};
+
+/* used to differentiate between Tegra SoC generations */
+struct tegra_pcie_soc_data {
+ unsigned int num_ports;
+ unsigned int msi_base_shift;
+ u32 pads_pll_ctl;
+ u32 tx_ref_sel;
+ bool has_pex_clkreq_en;
+ bool has_pex_bias_ctrl;
+ bool has_intr_prsnt_sense;
+ bool has_avdd_supply;
+ bool has_cml_clk;
+};
+
+static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
+{
+ return container_of(chip, struct tegra_msi, chip);
+}
+
+struct tegra_pcie {
+ struct device *dev;
+
+ void __iomem *pads;
+ void __iomem *afi;
+ int irq;
+
+ struct list_head buses;
+ struct resource *cs;
+
+ struct resource io;
+ struct resource mem;
+ struct resource prefetch;
+ struct resource busn;
+
+ struct clk *pex_clk;
+ struct clk *afi_clk;
+ struct clk *pll_e;
+ struct clk *cml_clk;
+
+ struct reset_control *pex_rst;
+ struct reset_control *afi_rst;
+ struct reset_control *pcie_xrst;
+
+ struct tegra_msi msi;
+
+ struct list_head ports;
+ unsigned int num_ports;
+ u32 xbar_config;
+
+ struct regulator *pex_clk_supply;
+ struct regulator *vdd_supply;
+ struct regulator *avdd_supply;
+
+ const struct tegra_pcie_soc_data *soc_data;
+};
+
+struct tegra_pcie_port {
+ struct tegra_pcie *pcie;
+ struct list_head list;
+ struct resource regs;
+ void __iomem *base;
+ unsigned int index;
+ unsigned int lanes;
+};
+
+struct tegra_pcie_bus {
+ struct vm_struct *area;
+ struct list_head list;
+ unsigned int nr;
+};
+
+static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
+{
+ return sys->private_data;
+}
+
+static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
+ unsigned long offset)
+{
+ writel(value, pcie->afi + offset);
+}
+
+static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
+{
+ return readl(pcie->afi + offset);
+}
+
+static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
+ unsigned long offset)
+{
+ writel(value, pcie->pads + offset);
+}
+
+static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
+{
+ return readl(pcie->pads + offset);
+}
+
+/*
+ * The configuration space mapping on Tegra is somewhat similar to the ECAM
+ * defined by PCIe. However it deviates a bit in how the 4 bits for extended
+ * register accesses are mapped:
+ *
+ * [27:24] extended register number
+ * [23:16] bus number
+ * [15:11] device number
+ * [10: 8] function number
+ * [ 7: 0] register number
+ *
+ * Mapping the whole extended configuration space would require 256 MiB of
+ * virtual address space, only a small part of which will actually be used.
+ * To work around this, a 1 MiB of virtual addresses are allocated per bus
+ * when the bus is first accessed. When the physical range is mapped, the
+ * the bus number bits are hidden so that the extended register number bits
+ * appear as bits [19:16]. Therefore the virtual mapping looks like this:
+ *
+ * [19:16] extended register number
+ * [15:11] device number
+ * [10: 8] function number
+ * [ 7: 0] register number
+ *
+ * This is achieved by stitching together 16 chunks of 64 KiB of physical
+ * address space via the MMU.
+ */
+static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
+{
+ return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
+ (PCI_FUNC(devfn) << 8) | (where & 0xfc);
+}
+
+static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
+ unsigned int busnr)
+{
+ pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
+ L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
+ phys_addr_t cs = pcie->cs->start;
+ struct tegra_pcie_bus *bus;
+ unsigned int i;
+ int err;
+
+ bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&bus->list);
+ bus->nr = busnr;
+
+ /* allocate 1 MiB of virtual addresses */
+ bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
+ if (!bus->area) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ /* map each of the 16 chunks of 64 KiB each */
+ for (i = 0; i < 16; i++) {
+ unsigned long virt = (unsigned long)bus->area->addr +
+ i * SZ_64K;
+ phys_addr_t phys = cs + i * SZ_1M + busnr * SZ_64K;
+
+ err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
+ if (err < 0) {
+ dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
+ err);
+ goto unmap;
+ }
+ }
+
+ return bus;
+
+unmap:
+ vunmap(bus->area->addr);
+free:
+ kfree(bus);
+ return ERR_PTR(err);
+}
+
+/*
+ * Look up a virtual address mapping for the specified bus number. If no such
+ * mapping exists, try to create one.
+ */
+static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
+ unsigned int busnr)
+{
+ struct tegra_pcie_bus *bus;
+
+ list_for_each_entry(bus, &pcie->buses, list)
+ if (bus->nr == busnr)
+ return (void __iomem *)bus->area->addr;
+
+ bus = tegra_pcie_bus_alloc(pcie, busnr);
+ if (IS_ERR(bus))
+ return NULL;
+
+ list_add_tail(&bus->list, &pcie->buses);
+
+ return (void __iomem *)bus->area->addr;
+}
+
+static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
+ void __iomem *addr = NULL;
+
+ if (bus->number == 0) {
+ unsigned int slot = PCI_SLOT(devfn);
+ struct tegra_pcie_port *port;
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ if (port->index + 1 == slot) {
+ addr = port->base + (where & ~3);
+ break;
+ }
+ }
+ } else {
+ addr = tegra_pcie_bus_map(pcie, bus->number);
+ if (!addr) {
+ dev_err(pcie->dev,
+ "failed to map cfg. space for bus %u\n",
+ bus->number);
+ return NULL;
+ }
+
+ addr += tegra_pcie_conf_offset(devfn, where);
+ }
+
+ return addr;
+}
+
+static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ void __iomem *addr;
+
+ addr = tegra_pcie_conf_address(bus, devfn, where);
+ if (!addr) {
+ *value = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ *value = readl(addr);
+
+ if (size == 1)
+ *value = (*value >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *value = (*value >> (8 * (where & 3))) & 0xffff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ void __iomem *addr;
+ u32 mask, tmp;
+
+ addr = tegra_pcie_conf_address(bus, devfn, where);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (size == 4) {
+ writel(value, addr);
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ if (size == 2)
+ mask = ~(0xffff << ((where & 0x3) * 8));
+ else if (size == 1)
+ mask = ~(0xff << ((where & 0x3) * 8));
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ tmp = readl(addr) & mask;
+ tmp |= value << ((where & 0x3) * 8);
+ writel(tmp, addr);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops tegra_pcie_ops = {
+ .read = tegra_pcie_read_conf,
+ .write = tegra_pcie_write_conf,
+};
+
+static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
+{
+ unsigned long ret = 0;
+
+ switch (port->index) {
+ case 0:
+ ret = AFI_PEX0_CTRL;
+ break;
+
+ case 1:
+ ret = AFI_PEX1_CTRL;
+ break;
+
+ case 2:
+ ret = AFI_PEX2_CTRL;
+ break;
+ }
+
+ return ret;
+}
+
+static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
+{
+ unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+ unsigned long value;
+
+ /* pulse reset signal */
+ value = afi_readl(port->pcie, ctrl);
+ value &= ~AFI_PEX_CTRL_RST;
+ afi_writel(port->pcie, value, ctrl);
+
+ usleep_range(1000, 2000);
+
+ value = afi_readl(port->pcie, ctrl);
+ value |= AFI_PEX_CTRL_RST;
+ afi_writel(port->pcie, value, ctrl);
+}
+
+static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
+{
+ const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
+ unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+ unsigned long value;
+
+ /* enable reference clock */
+ value = afi_readl(port->pcie, ctrl);
+ value |= AFI_PEX_CTRL_REFCLK_EN;
+
+ if (soc->has_pex_clkreq_en)
+ value |= AFI_PEX_CTRL_CLKREQ_EN;
+
+ afi_writel(port->pcie, value, ctrl);
+
+ tegra_pcie_port_reset(port);
+}
+
+static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
+{
+ unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+ unsigned long value;
+
+ /* assert port reset */
+ value = afi_readl(port->pcie, ctrl);
+ value &= ~AFI_PEX_CTRL_RST;
+ afi_writel(port->pcie, value, ctrl);
+
+ /* disable reference clock */
+ value = afi_readl(port->pcie, ctrl);
+ value &= ~AFI_PEX_CTRL_REFCLK_EN;
+ afi_writel(port->pcie, value, ctrl);
+}
+
+static void tegra_pcie_port_free(struct tegra_pcie_port *port)
+{
+ struct tegra_pcie *pcie = port->pcie;
+
+ devm_iounmap(pcie->dev, port->base);
+ devm_release_mem_region(pcie->dev, port->regs.start,
+ resource_size(&port->regs));
+ list_del(&port->list);
+ devm_kfree(pcie->dev, port);
+}
+
+static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
+{
+ u16 reg;
+
+ if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
+ pci_read_config_word(dev, PCI_COMMAND, &reg);
+ reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
+ pci_write_config_word(dev, PCI_COMMAND, reg);
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
+
+/* Tegra PCIE root complex wrongly reports device class */
+static void tegra_pcie_fixup_class(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
+
+/* Tegra PCIE requires relaxed ordering */
+static void tegra_pcie_relax_enable(struct pci_dev *dev)
+{
+ pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
+
+static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+ struct tegra_pcie *pcie = sys_to_pcie(sys);
+
+ pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
+ pci_add_resource_offset(&sys->resources, &pcie->prefetch,
+ sys->mem_offset);
+ pci_add_resource(&sys->resources, &pcie->busn);
+
+ pci_ioremap_io(nr * SZ_64K, pcie->io.start);
+
+ return 1;
+}
+
+static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
+{
+ struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
+ int irq;
+
+ tegra_cpuidle_pcie_irqs_in_use();
+
+ irq = of_irq_parse_and_map_pci(pdev, slot, pin);
+ if (!irq)
+ irq = pcie->irq;
+
+ return irq;
+}
+
+static void tegra_pcie_add_bus(struct pci_bus *bus)
+{
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
+
+ bus->msi = &pcie->msi.chip;
+ }
+}
+
+static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
+{
+ struct tegra_pcie *pcie = sys_to_pcie(sys);
+ struct pci_bus *bus;
+
+ bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
+ &sys->resources);
+ if (!bus)
+ return NULL;
+
+ pci_scan_child_bus(bus);
+
+ return bus;
+}
+
+static irqreturn_t tegra_pcie_isr(int irq, void *arg)
+{
+ const char *err_msg[] = {
+ "Unknown",
+ "AXI slave error",
+ "AXI decode error",
+ "Target abort",
+ "Master abort",
+ "Invalid write",
+ "Response decoding error",
+ "AXI response decoding error",
+ "Transaction timeout",
+ };
+ struct tegra_pcie *pcie = arg;
+ u32 code, signature;
+
+ code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
+ signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
+ afi_writel(pcie, 0, AFI_INTR_CODE);
+
+ if (code == AFI_INTR_LEGACY)
+ return IRQ_NONE;
+
+ if (code >= ARRAY_SIZE(err_msg))
+ code = 0;
+
+ /*
+ * do not pollute kernel log with master abort reports since they
+ * happen a lot during enumeration
+ */
+ if (code == AFI_INTR_MASTER_ABORT)
+ dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
+ signature);
+ else
+ dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
+ signature);
+
+ if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
+ code == AFI_INTR_FPCI_DECODE_ERROR) {
+ u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
+ u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
+
+ if (code == AFI_INTR_MASTER_ABORT)
+ dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
+ else
+ dev_err(pcie->dev, " FPCI address: %10llx\n", address);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * FPCI map is as follows:
+ * - 0xfdfc000000: I/O space
+ * - 0xfdfe000000: type 0 configuration space
+ * - 0xfdff000000: type 1 configuration space
+ * - 0xfe00000000: type 0 extended configuration space
+ * - 0xfe10000000: type 1 extended configuration space
+ */
+static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
+{
+ u32 fpci_bar, size, axi_address;
+
+ /* Bar 0: type 1 extended configuration space */
+ fpci_bar = 0xfe100000;
+ size = resource_size(pcie->cs);
+ axi_address = pcie->cs->start;
+ afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
+
+ /* Bar 1: downstream IO bar */
+ fpci_bar = 0xfdfc0000;
+ size = resource_size(&pcie->io);
+ axi_address = pcie->io.start;
+ afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
+
+ /* Bar 2: prefetchable memory BAR */
+ fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
+ size = resource_size(&pcie->prefetch);
+ axi_address = pcie->prefetch.start;
+ afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
+
+ /* Bar 3: non prefetchable memory BAR */
+ fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
+ size = resource_size(&pcie->mem);
+ axi_address = pcie->mem.start;
+ afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
+
+ /* NULL out the remaining BARs as they are not used */
+ afi_writel(pcie, 0, AFI_AXI_BAR4_START);
+ afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
+ afi_writel(pcie, 0, AFI_FPCI_BAR4);
+
+ afi_writel(pcie, 0, AFI_AXI_BAR5_START);
+ afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
+ afi_writel(pcie, 0, AFI_FPCI_BAR5);
+
+ /* map all upstream transactions as uncached */
+ afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
+ afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
+ afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
+ afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
+
+ /* MSI translations are setup only when needed */
+ afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
+ afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
+ afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
+ afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
+}
+
+static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ struct tegra_pcie_port *port;
+ unsigned int timeout;
+ unsigned long value;
+
+ /* power down PCIe slot clock bias pad */
+ if (soc->has_pex_bias_ctrl)
+ afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
+
+ /* configure mode and disable all ports */
+ value = afi_readl(pcie, AFI_PCIE_CONFIG);
+ value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
+ value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
+
+ list_for_each_entry(port, &pcie->ports, list)
+ value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
+
+ afi_writel(pcie, value, AFI_PCIE_CONFIG);
+
+ value = afi_readl(pcie, AFI_FUSE);
+ value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
+ afi_writel(pcie, value, AFI_FUSE);
+
+ /* initialize internal PHY, enable up to 16 PCIE lanes */
+ pads_writel(pcie, 0x0, PADS_CTL_SEL);
+
+ /* override IDDQ to 1 on all 4 lanes */
+ value = pads_readl(pcie, PADS_CTL);
+ value |= PADS_CTL_IDDQ_1L;
+ pads_writel(pcie, value, PADS_CTL);
+
+ /*
+ * Set up PHY PLL inputs select PLLE output as refclock,
+ * set TX ref sel to div10 (not div5).
+ */
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
+ value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
+ pads_writel(pcie, value, soc->pads_pll_ctl);
+
+ /* take PLL out of reset */
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ value |= PADS_PLL_CTL_RST_B4SM;
+ pads_writel(pcie, value, soc->pads_pll_ctl);
+
+ /* Configure the reference clock driver */
+ value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
+ pads_writel(pcie, value, PADS_REFCLK_CFG0);
+ if (soc->num_ports > 2)
+ pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
+
+ /* wait for the PLL to lock */
+ timeout = 300;
+ do {
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ usleep_range(1000, 2000);
+ if (--timeout == 0) {
+ pr_err("Tegra PCIe error: timeout waiting for PLL\n");
+ return -EBUSY;
+ }
+ } while (!(value & PADS_PLL_CTL_LOCKDET));
+
+ /* turn off IDDQ override */
+ value = pads_readl(pcie, PADS_CTL);
+ value &= ~PADS_CTL_IDDQ_1L;
+ pads_writel(pcie, value, PADS_CTL);
+
+ /* enable TX/RX data */
+ value = pads_readl(pcie, PADS_CTL);
+ value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
+ pads_writel(pcie, value, PADS_CTL);
+
+ /* take the PCIe interface module out of reset */
+ reset_control_deassert(pcie->pcie_xrst);
+
+ /* finally enable PCIe */
+ value = afi_readl(pcie, AFI_CONFIGURATION);
+ value |= AFI_CONFIGURATION_EN_FPCI;
+ afi_writel(pcie, value, AFI_CONFIGURATION);
+
+ value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
+ AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
+ AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
+
+ if (soc->has_intr_prsnt_sense)
+ value |= AFI_INTR_EN_PRSNT_SENSE;
+
+ afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
+ afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
+
+ /* don't enable MSI for now, only when needed */
+ afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
+
+ /* disable all exceptions */
+ afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
+
+ return 0;
+}
+
+static void tegra_pcie_power_off(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ int err;
+
+ /* TODO: disable and unprepare clocks? */
+
+ reset_control_assert(pcie->pcie_xrst);
+ reset_control_assert(pcie->afi_rst);
+ reset_control_assert(pcie->pex_rst);
+
+ tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+
+ if (soc->has_avdd_supply) {
+ err = regulator_disable(pcie->avdd_supply);
+ if (err < 0)
+ dev_warn(pcie->dev,
+ "failed to disable AVDD regulator: %d\n",
+ err);
+ }
+
+ err = regulator_disable(pcie->pex_clk_supply);
+ if (err < 0)
+ dev_warn(pcie->dev, "failed to disable pex-clk regulator: %d\n",
+ err);
+
+ err = regulator_disable(pcie->vdd_supply);
+ if (err < 0)
+ dev_warn(pcie->dev, "failed to disable VDD regulator: %d\n",
+ err);
+}
+
+static int tegra_pcie_power_on(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ int err;
+
+ reset_control_assert(pcie->pcie_xrst);
+ reset_control_assert(pcie->afi_rst);
+ reset_control_assert(pcie->pex_rst);
+
+ tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+
+ /* enable regulators */
+ err = regulator_enable(pcie->vdd_supply);
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to enable VDD regulator: %d\n", err);
+ return err;
+ }
+
+ err = regulator_enable(pcie->pex_clk_supply);
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to enable pex-clk regulator: %d\n",
+ err);
+ return err;
+ }
+
+ if (soc->has_avdd_supply) {
+ err = regulator_enable(pcie->avdd_supply);
+ if (err < 0) {
+ dev_err(pcie->dev,
+ "failed to enable AVDD regulator: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
+ pcie->pex_clk,
+ pcie->pex_rst);
+ if (err) {
+ dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
+ return err;
+ }
+
+ reset_control_deassert(pcie->afi_rst);
+
+ err = clk_prepare_enable(pcie->afi_clk);
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
+ return err;
+ }
+
+ if (soc->has_cml_clk) {
+ err = clk_prepare_enable(pcie->cml_clk);
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to enable CML clock: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ err = clk_prepare_enable(pcie->pll_e);
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+
+ pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
+ if (IS_ERR(pcie->pex_clk))
+ return PTR_ERR(pcie->pex_clk);
+
+ pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
+ if (IS_ERR(pcie->afi_clk))
+ return PTR_ERR(pcie->afi_clk);
+
+ pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
+ if (IS_ERR(pcie->pll_e))
+ return PTR_ERR(pcie->pll_e);
+
+ if (soc->has_cml_clk) {
+ pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
+ if (IS_ERR(pcie->cml_clk))
+ return PTR_ERR(pcie->cml_clk);
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
+{
+ pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
+ if (IS_ERR(pcie->pex_rst))
+ return PTR_ERR(pcie->pex_rst);
+
+ pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
+ if (IS_ERR(pcie->afi_rst))
+ return PTR_ERR(pcie->afi_rst);
+
+ pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
+ if (IS_ERR(pcie->pcie_xrst))
+ return PTR_ERR(pcie->pcie_xrst);
+
+ return 0;
+}
+
+static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
+{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct resource *pads, *afi, *res;
+ int err;
+
+ err = tegra_pcie_clocks_get(pcie);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
+ return err;
+ }
+
+ err = tegra_pcie_resets_get(pcie);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get resets: %d\n", err);
+ return err;
+ }
+
+ err = tegra_pcie_power_on(pcie);
+ if (err) {
+ dev_err(&pdev->dev, "failed to power up: %d\n", err);
+ return err;
+ }
+
+ pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
+ pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
+ if (IS_ERR(pcie->pads)) {
+ err = PTR_ERR(pcie->pads);
+ goto poweroff;
+ }
+
+ afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
+ pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
+ if (IS_ERR(pcie->afi)) {
+ err = PTR_ERR(pcie->afi);
+ goto poweroff;
+ }
+
+ /* request configuration space, but remap later, on demand */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
+ if (!res) {
+ err = -EADDRNOTAVAIL;
+ goto poweroff;
+ }
+
+ pcie->cs = devm_request_mem_region(pcie->dev, res->start,
+ resource_size(res), res->name);
+ if (!pcie->cs) {
+ err = -EADDRNOTAVAIL;
+ goto poweroff;
+ }
+
+ /* request interrupt */
+ err = platform_get_irq_byname(pdev, "intr");
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
+ goto poweroff;
+ }
+
+ pcie->irq = err;
+
+ err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
+ goto poweroff;
+ }
+
+ return 0;
+
+poweroff:
+ tegra_pcie_power_off(pcie);
+ return err;
+}
+
+static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
+{
+ if (pcie->irq > 0)
+ free_irq(pcie->irq, pcie);
+
+ tegra_pcie_power_off(pcie);
+ return 0;
+}
+
+static int tegra_msi_alloc(struct tegra_msi *chip)
+{
+ int msi;
+
+ mutex_lock(&chip->lock);
+
+ msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
+ if (msi < INT_PCI_MSI_NR)
+ set_bit(msi, chip->used);
+ else
+ msi = -ENOSPC;
+
+ mutex_unlock(&chip->lock);
+
+ return msi;
+}
+
+static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
+{
+ struct device *dev = chip->chip.dev;
+
+ mutex_lock(&chip->lock);
+
+ if (!test_bit(irq, chip->used))
+ dev_err(dev, "trying to free unused MSI#%lu\n", irq);
+ else
+ clear_bit(irq, chip->used);
+
+ mutex_unlock(&chip->lock);
+}
+
+static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
+{
+ struct tegra_pcie *pcie = data;
+ struct tegra_msi *msi = &pcie->msi;
+ unsigned int i, processed = 0;
+
+ for (i = 0; i < 8; i++) {
+ unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
+
+ while (reg) {
+ unsigned int offset = find_first_bit(&reg, 32);
+ unsigned int index = i * 32 + offset;
+ unsigned int irq;
+
+ /* clear the interrupt */
+ afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
+
+ irq = irq_find_mapping(msi->domain, index);
+ if (irq) {
+ if (test_bit(index, msi->used))
+ generic_handle_irq(irq);
+ else
+ dev_info(pcie->dev, "unhandled MSI\n");
+ } else {
+ /*
+ * that's weird who triggered this?
+ * just clear it
+ */
+ dev_info(pcie->dev, "unexpected MSI\n");
+ }
+
+ /* see if there's any more pending in this vector */
+ reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
+
+ processed++;
+ }
+ }
+
+ return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ struct tegra_msi *msi = to_tegra_msi(chip);
+ struct msi_msg msg;
+ unsigned int irq;
+ int hwirq;
+
+ hwirq = tegra_msi_alloc(msi);
+ if (hwirq < 0)
+ return hwirq;
+
+ irq = irq_create_mapping(msi->domain, hwirq);
+ if (!irq)
+ return -EINVAL;
+
+ irq_set_msi_desc(irq, desc);
+
+ msg.address_lo = virt_to_phys((void *)msi->pages);
+ /* 32 bit address only */
+ msg.address_hi = 0;
+ msg.data = hwirq;
+
+ write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
+{
+ struct tegra_msi *msi = to_tegra_msi(chip);
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ tegra_msi_free(msi, d->hwirq);
+}
+
+static struct irq_chip tegra_msi_irq_chip = {
+ .name = "Tegra PCIe MSI",
+ .irq_enable = unmask_msi_irq,
+ .irq_disable = mask_msi_irq,
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+};
+
+static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ tegra_cpuidle_pcie_irqs_in_use();
+
+ return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .map = tegra_msi_map,
+};
+
+static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
+{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ struct tegra_msi *msi = &pcie->msi;
+ unsigned long base;
+ int err;
+ u32 reg;
+
+ mutex_init(&msi->lock);
+
+ msi->chip.dev = pcie->dev;
+ msi->chip.setup_irq = tegra_msi_setup_irq;
+ msi->chip.teardown_irq = tegra_msi_teardown_irq;
+
+ msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
+ &msi_domain_ops, &msi->chip);
+ if (!msi->domain) {
+ dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ err = platform_get_irq_byname(pdev, "msi");
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
+ goto err;
+ }
+
+ msi->irq = err;
+
+ err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
+ tegra_msi_irq_chip.name, pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ goto err;
+ }
+
+ /* setup AFI/FPCI range */
+ msi->pages = __get_free_pages(GFP_KERNEL, 0);
+ base = virt_to_phys((void *)msi->pages);
+
+ afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
+ afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
+ /* this register is in 4K increments */
+ afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
+
+ /* enable all MSI vectors */
+ afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
+ afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
+ afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
+ afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
+ afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
+ afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
+ afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
+ afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
+
+ /* and unmask the MSI interrupt */
+ reg = afi_readl(pcie, AFI_INTR_MASK);
+ reg |= AFI_INTR_MASK_MSI_MASK;
+ afi_writel(pcie, reg, AFI_INTR_MASK);
+
+ return 0;
+
+err:
+ irq_domain_remove(msi->domain);
+ return err;
+}
+
+static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
+{
+ struct tegra_msi *msi = &pcie->msi;
+ unsigned int i, irq;
+ u32 value;
+
+ /* mask the MSI interrupt */
+ value = afi_readl(pcie, AFI_INTR_MASK);
+ value &= ~AFI_INTR_MASK_MSI_MASK;
+ afi_writel(pcie, value, AFI_INTR_MASK);
+
+ /* disable all MSI vectors */
+ afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
+ afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
+ afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
+ afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
+ afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
+ afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
+ afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
+ afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
+
+ free_pages(msi->pages, 0);
+
+ if (msi->irq > 0)
+ free_irq(msi->irq, pcie);
+
+ for (i = 0; i < INT_PCI_MSI_NR; i++) {
+ irq = irq_find_mapping(msi->domain, i);
+ if (irq > 0)
+ irq_dispose_mapping(irq);
+ }
+
+ irq_domain_remove(msi->domain);
+
+ return 0;
+}
+
+static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
+ u32 *xbar)
+{
+ struct device_node *np = pcie->dev->of_node;
+
+ if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
+ switch (lanes) {
+ case 0x00000204:
+ dev_info(pcie->dev, "4x1, 2x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
+ return 0;
+
+ case 0x00020202:
+ dev_info(pcie->dev, "2x3 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
+ return 0;
+
+ case 0x00010104:
+ dev_info(pcie->dev, "4x1, 1x2 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
+ return 0;
+ }
+ } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
+ switch (lanes) {
+ case 0x00000004:
+ dev_info(pcie->dev, "single-mode configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
+ return 0;
+
+ case 0x00000202:
+ dev_info(pcie->dev, "dual-mode configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ struct device_node *np = pcie->dev->of_node, *port;
+ struct of_pci_range_parser parser;
+ struct of_pci_range range;
+ struct resource res;
+ u32 lanes = 0;
+ int err;
+
+ if (of_pci_range_parser_init(&parser, np)) {
+ dev_err(pcie->dev, "missing \"ranges\" property\n");
+ return -EINVAL;
+ }
+
+ pcie->vdd_supply = devm_regulator_get(pcie->dev, "vdd");
+ if (IS_ERR(pcie->vdd_supply))
+ return PTR_ERR(pcie->vdd_supply);
+
+ pcie->pex_clk_supply = devm_regulator_get(pcie->dev, "pex-clk");
+ if (IS_ERR(pcie->pex_clk_supply))
+ return PTR_ERR(pcie->pex_clk_supply);
+
+ if (soc->has_avdd_supply) {
+ pcie->avdd_supply = devm_regulator_get(pcie->dev, "avdd");
+ if (IS_ERR(pcie->avdd_supply))
+ return PTR_ERR(pcie->avdd_supply);
+ }
+
+ for_each_of_pci_range(&parser, &range) {
+ of_pci_range_to_resource(&range, np, &res);
+
+ switch (res.flags & IORESOURCE_TYPE_BITS) {
+ case IORESOURCE_IO:
+ memcpy(&pcie->io, &res, sizeof(res));
+ pcie->io.name = "I/O";
+ break;
+
+ case IORESOURCE_MEM:
+ if (res.flags & IORESOURCE_PREFETCH) {
+ memcpy(&pcie->prefetch, &res, sizeof(res));
+ pcie->prefetch.name = "PREFETCH";
+ } else {
+ memcpy(&pcie->mem, &res, sizeof(res));
+ pcie->mem.name = "MEM";
+ }
+ break;
+ }
+ }
+
+ err = of_pci_parse_bus_range(np, &pcie->busn);
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to parse ranges property: %d\n",
+ err);
+ pcie->busn.name = np->name;
+ pcie->busn.start = 0;
+ pcie->busn.end = 0xff;
+ pcie->busn.flags = IORESOURCE_BUS;
+ }
+
+ /* parse root ports */
+ for_each_child_of_node(np, port) {
+ struct tegra_pcie_port *rp;
+ unsigned int index;
+ u32 value;
+
+ err = of_pci_get_devfn(port);
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to parse address: %d\n",
+ err);
+ return err;
+ }
+
+ index = PCI_SLOT(err);
+
+ if (index < 1 || index > soc->num_ports) {
+ dev_err(pcie->dev, "invalid port number: %d\n", index);
+ return -EINVAL;
+ }
+
+ index--;
+
+ err = of_property_read_u32(port, "nvidia,num-lanes", &value);
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
+ err);
+ return err;
+ }
+
+ if (value > 16) {
+ dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
+ return -EINVAL;
+ }
+
+ lanes |= value << (index << 3);
+
+ if (!of_device_is_available(port))
+ continue;
+
+ rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+
+ err = of_address_to_resource(port, 0, &rp->regs);
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to parse address: %d\n",
+ err);
+ return err;
+ }
+
+ INIT_LIST_HEAD(&rp->list);
+ rp->index = index;
+ rp->lanes = value;
+ rp->pcie = pcie;
+
+ rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
+ if (IS_ERR(rp->base))
+ return PTR_ERR(rp->base);
+
+ list_add_tail(&rp->list, &pcie->ports);
+ }
+
+ err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
+ if (err < 0) {
+ dev_err(pcie->dev, "invalid lane configuration\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/*
+ * FIXME: If there are no PCIe cards attached, then calling this function
+ * can result in the increase of the bootup time as there are big timeout
+ * loops.
+ */
+#define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
+static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
+{
+ unsigned int retries = 3;
+ unsigned long value;
+
+ do {
+ unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
+
+ do {
+ value = readl(port->base + RP_VEND_XP);
+
+ if (value & RP_VEND_XP_DL_UP)
+ break;
+
+ usleep_range(1000, 2000);
+ } while (--timeout);
+
+ if (!timeout) {
+ dev_err(port->pcie->dev, "link %u down, retrying\n",
+ port->index);
+ goto retry;
+ }
+
+ timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
+
+ do {
+ value = readl(port->base + RP_LINK_CONTROL_STATUS);
+
+ if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
+ return true;
+
+ usleep_range(1000, 2000);
+ } while (--timeout);
+
+retry:
+ tegra_pcie_port_reset(port);
+ } while (--retries);
+
+ return false;
+}
+
+static int tegra_pcie_enable(struct tegra_pcie *pcie)
+{
+ struct tegra_pcie_port *port, *tmp;
+ struct hw_pci hw;
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ dev_info(pcie->dev, "probing port %u, using %u lanes\n",
+ port->index, port->lanes);
+
+ tegra_pcie_port_enable(port);
+
+ if (tegra_pcie_port_check_link(port))
+ continue;
+
+ dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
+
+ tegra_pcie_port_disable(port);
+ tegra_pcie_port_free(port);
+ }
+
+ memset(&hw, 0, sizeof(hw));
+
+ hw.nr_controllers = 1;
+ hw.private_data = (void **)&pcie;
+ hw.setup = tegra_pcie_setup;
+ hw.map_irq = tegra_pcie_map_irq;
+ hw.add_bus = tegra_pcie_add_bus;
+ hw.scan = tegra_pcie_scan_bus;
+ hw.ops = &tegra_pcie_ops;
+
+ pci_common_init_dev(pcie->dev, &hw);
+
+ return 0;
+}
+
+static const struct tegra_pcie_soc_data tegra20_pcie_data = {
+ .num_ports = 2,
+ .msi_base_shift = 0,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
+ .has_pex_clkreq_en = false,
+ .has_pex_bias_ctrl = false,
+ .has_intr_prsnt_sense = false,
+ .has_avdd_supply = false,
+ .has_cml_clk = false,
+};
+
+static const struct tegra_pcie_soc_data tegra30_pcie_data = {
+ .num_ports = 3,
+ .msi_base_shift = 8,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .has_pex_clkreq_en = true,
+ .has_pex_bias_ctrl = true,
+ .has_intr_prsnt_sense = true,
+ .has_avdd_supply = true,
+ .has_cml_clk = true,
+};
+
+static const struct of_device_id tegra_pcie_of_match[] = {
+ { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
+ { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
+
+static int tegra_pcie_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct tegra_pcie *pcie;
+ int err;
+
+ match = of_match_device(tegra_pcie_of_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pcie->buses);
+ INIT_LIST_HEAD(&pcie->ports);
+ pcie->soc_data = match->data;
+ pcie->dev = &pdev->dev;
+
+ err = tegra_pcie_parse_dt(pcie);
+ if (err < 0)
+ return err;
+
+ pcibios_min_mem = 0;
+
+ err = tegra_pcie_get_resources(pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request resources: %d\n", err);
+ return err;
+ }
+
+ err = tegra_pcie_enable_controller(pcie);
+ if (err)
+ goto put_resources;
+
+ /* setup the AFI address translations */
+ tegra_pcie_setup_translations(pcie);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ err = tegra_pcie_enable_msi(pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to enable MSI support: %d\n",
+ err);
+ goto put_resources;
+ }
+ }
+
+ err = tegra_pcie_enable(pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
+ goto disable_msi;
+ }
+
+ platform_set_drvdata(pdev, pcie);
+ return 0;
+
+disable_msi:
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ tegra_pcie_disable_msi(pcie);
+put_resources:
+ tegra_pcie_put_resources(pcie);
+ return err;
+}
+
+static struct platform_driver tegra_pcie_driver = {
+ .driver = {
+ .name = "tegra-pcie",
+ .owner = THIS_MODULE,
+ .of_match_table = tegra_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = tegra_pcie_probe,
+};
+module_platform_driver(tegra_pcie_driver);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
new file mode 100644
index 00000000000..1eaf4df3618
--- /dev/null
+++ b/drivers/pci/host/pcie-designware.c
@@ -0,0 +1,835 @@
+/*
+ * Synopsys Designware PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+/* Synopsis specific PCIE configuration registers */
+#define PCIE_PORT_LINK_CONTROL 0x710
+#define PORT_LINK_MODE_MASK (0x3f << 16)
+#define PORT_LINK_MODE_1_LANES (0x1 << 16)
+#define PORT_LINK_MODE_2_LANES (0x3 << 16)
+#define PORT_LINK_MODE_4_LANES (0x7 << 16)
+
+#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
+#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
+#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8)
+#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
+#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
+#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
+
+#define PCIE_MSI_ADDR_LO 0x820
+#define PCIE_MSI_ADDR_HI 0x824
+#define PCIE_MSI_INTR0_ENABLE 0x828
+#define PCIE_MSI_INTR0_MASK 0x82C
+#define PCIE_MSI_INTR0_STATUS 0x830
+
+#define PCIE_ATU_VIEWPORT 0x900
+#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
+#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
+#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
+#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
+#define PCIE_ATU_CR1 0x904
+#define PCIE_ATU_TYPE_MEM (0x0 << 0)
+#define PCIE_ATU_TYPE_IO (0x2 << 0)
+#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
+#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
+#define PCIE_ATU_CR2 0x908
+#define PCIE_ATU_ENABLE (0x1 << 31)
+#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
+#define PCIE_ATU_LOWER_BASE 0x90C
+#define PCIE_ATU_UPPER_BASE 0x910
+#define PCIE_ATU_LIMIT 0x914
+#define PCIE_ATU_LOWER_TARGET 0x918
+#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
+#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
+#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
+#define PCIE_ATU_UPPER_TARGET 0x91C
+
+static struct hw_pci dw_pci;
+
+static unsigned long global_io_offset;
+
+static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
+{
+ return sys->private_data;
+}
+
+int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val)
+{
+ *val = readl(addr);
+
+ if (size == 1)
+ *val = (*val >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (8 * (where & 3))) & 0xffff;
+ else if (size != 4)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val)
+{
+ if (size == 4)
+ writel(val, addr);
+ else if (size == 2)
+ writew(val, addr + (where & 2));
+ else if (size == 1)
+ writeb(val, addr + (where & 3));
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline void dw_pcie_readl_rc(struct pcie_port *pp, u32 reg, u32 *val)
+{
+ if (pp->ops->readl_rc)
+ pp->ops->readl_rc(pp, pp->dbi_base + reg, val);
+ else
+ *val = readl(pp->dbi_base + reg);
+}
+
+static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
+{
+ if (pp->ops->writel_rc)
+ pp->ops->writel_rc(pp, val, pp->dbi_base + reg);
+ else
+ writel(val, pp->dbi_base + reg);
+}
+
+static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+ u32 *val)
+{
+ int ret;
+
+ if (pp->ops->rd_own_conf)
+ ret = pp->ops->rd_own_conf(pp, where, size, val);
+ else
+ ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where,
+ size, val);
+
+ return ret;
+}
+
+static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
+ u32 val)
+{
+ int ret;
+
+ if (pp->ops->wr_own_conf)
+ ret = pp->ops->wr_own_conf(pp, where, size, val);
+ else
+ ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3), where,
+ size, val);
+
+ return ret;
+}
+
+static struct irq_chip dw_msi_irq_chip = {
+ .name = "PCI-MSI",
+ .irq_enable = unmask_msi_irq,
+ .irq_disable = mask_msi_irq,
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+};
+
+/* MSI int handler */
+irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
+{
+ unsigned long val;
+ int i, pos, irq;
+ irqreturn_t ret = IRQ_NONE;
+
+ for (i = 0; i < MAX_MSI_CTRLS; i++) {
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
+ (u32 *)&val);
+ if (val) {
+ ret = IRQ_HANDLED;
+ pos = 0;
+ while ((pos = find_next_bit(&val, 32, pos)) != 32) {
+ irq = irq_find_mapping(pp->irq_domain,
+ i * 32 + pos);
+ dw_pcie_wr_own_conf(pp,
+ PCIE_MSI_INTR0_STATUS + i * 12,
+ 4, 1 << pos);
+ generic_handle_irq(irq);
+ pos++;
+ }
+ }
+ }
+
+ return ret;
+}
+
+void dw_pcie_msi_init(struct pcie_port *pp)
+{
+ pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
+
+ /* program the msi_data */
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
+ virt_to_phys((void *)pp->msi_data));
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0);
+}
+
+static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
+{
+ int flag = 1;
+
+ do {
+ pos = find_next_zero_bit(pp->msi_irq_in_use,
+ MAX_MSI_IRQS, pos);
+ /*if you have reached to the end then get out from here.*/
+ if (pos == MAX_MSI_IRQS)
+ return -ENOSPC;
+ /*
+ * Check if this position is at correct offset.nvec is always a
+ * power of two. pos0 must be nvec bit aligned.
+ */
+ if (pos % msgvec)
+ pos += msgvec - (pos % msgvec);
+ else
+ flag = 0;
+ } while (flag);
+
+ *pos0 = pos;
+ return 0;
+}
+
+static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
+ unsigned int nvec, unsigned int pos)
+{
+ unsigned int i, res, bit, val;
+
+ for (i = 0; i < nvec; i++) {
+ irq_set_msi_desc_off(irq_base, i, NULL);
+ clear_bit(pos + i, pp->msi_irq_in_use);
+ /* Disable corresponding interrupt on MSI controller */
+ res = ((pos + i) / 32) * 12;
+ bit = (pos + i) % 32;
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+ val &= ~(1 << bit);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+ }
+}
+
+static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
+{
+ int res, bit, irq, pos0, pos1, i;
+ u32 val;
+ struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
+
+ if (!pp) {
+ BUG();
+ return -EINVAL;
+ }
+
+ pos0 = find_first_zero_bit(pp->msi_irq_in_use,
+ MAX_MSI_IRQS);
+ if (pos0 % no_irqs) {
+ if (find_valid_pos0(pp, no_irqs, pos0, &pos0))
+ goto no_valid_irq;
+ }
+ if (no_irqs > 1) {
+ pos1 = find_next_bit(pp->msi_irq_in_use,
+ MAX_MSI_IRQS, pos0);
+ /* there must be nvec number of consecutive free bits */
+ while ((pos1 - pos0) < no_irqs) {
+ if (find_valid_pos0(pp, no_irqs, pos1, &pos0))
+ goto no_valid_irq;
+ pos1 = find_next_bit(pp->msi_irq_in_use,
+ MAX_MSI_IRQS, pos0);
+ }
+ }
+
+ irq = irq_find_mapping(pp->irq_domain, pos0);
+ if (!irq)
+ goto no_valid_irq;
+
+ /*
+ * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
+ * descs so there is no need to allocate descs here. We can therefore
+ * assume that if irq_find_mapping above returns non-zero, then the
+ * descs are also successfully allocated.
+ */
+
+ for (i = 0; i < no_irqs; i++) {
+ if (irq_set_msi_desc_off(irq, i, desc) != 0) {
+ clear_irq_range(pp, irq, i, pos0);
+ goto no_valid_irq;
+ }
+ set_bit(pos0 + i, pp->msi_irq_in_use);
+ /*Enable corresponding interrupt in MSI interrupt controller */
+ res = ((pos0 + i) / 32) * 12;
+ bit = (pos0 + i) % 32;
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+ val |= 1 << bit;
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+ }
+
+ *pos = pos0;
+ return irq;
+
+no_valid_irq:
+ *pos = pos0;
+ return -ENOSPC;
+}
+
+static void clear_irq(unsigned int irq)
+{
+ unsigned int pos, nvec;
+ struct msi_desc *msi;
+ struct pcie_port *pp;
+ struct irq_data *data = irq_get_irq_data(irq);
+
+ /* get the port structure */
+ msi = irq_data_get_msi(data);
+ pp = sys_to_pcie(msi->dev->bus->sysdata);
+ if (!pp) {
+ BUG();
+ return;
+ }
+
+ /* undo what was done in assign_irq */
+ pos = data->hwirq;
+ nvec = 1 << msi->msi_attrib.multiple;
+
+ clear_irq_range(pp, irq, nvec, pos);
+
+ /* all irqs cleared; reset attributes */
+ msi->irq = 0;
+ msi->msi_attrib.multiple = 0;
+}
+
+static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ int irq, pos, msgvec;
+ u16 msg_ctr;
+ struct msi_msg msg;
+ struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
+
+ if (!pp) {
+ BUG();
+ return -EINVAL;
+ }
+
+ pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
+ &msg_ctr);
+ msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
+ if (msgvec == 0)
+ msgvec = (msg_ctr & PCI_MSI_FLAGS_QMASK) >> 1;
+ if (msgvec > 5)
+ msgvec = 0;
+
+ irq = assign_irq((1 << msgvec), desc, &pos);
+ if (irq < 0)
+ return irq;
+
+ /*
+ * write_msi_msg() will update PCI_MSI_FLAGS so there is
+ * no need to explicitly call pci_write_config_word().
+ */
+ desc->msi_attrib.multiple = msgvec;
+
+ msg.address_lo = virt_to_phys((void *)pp->msi_data);
+ msg.address_hi = 0x0;
+ msg.data = pos;
+ write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
+{
+ clear_irq(irq);
+}
+
+static struct msi_chip dw_pcie_msi_chip = {
+ .setup_irq = dw_msi_setup_irq,
+ .teardown_irq = dw_msi_teardown_irq,
+};
+
+int dw_pcie_link_up(struct pcie_port *pp)
+{
+ if (pp->ops->link_up)
+ return pp->ops->link_up(pp);
+ else
+ return 0;
+}
+
+static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .map = dw_pcie_msi_map,
+};
+
+int __init dw_pcie_host_init(struct pcie_port *pp)
+{
+ struct device_node *np = pp->dev->of_node;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ u32 val;
+ int i;
+
+ if (of_pci_range_parser_init(&parser, np)) {
+ dev_err(pp->dev, "missing ranges property\n");
+ return -EINVAL;
+ }
+
+ /* Get the I/O and memory ranges from DT */
+ for_each_of_pci_range(&parser, &range) {
+ unsigned long restype = range.flags & IORESOURCE_TYPE_BITS;
+ if (restype == IORESOURCE_IO) {
+ of_pci_range_to_resource(&range, np, &pp->io);
+ pp->io.name = "I/O";
+ pp->io.start = max_t(resource_size_t,
+ PCIBIOS_MIN_IO,
+ range.pci_addr + global_io_offset);
+ pp->io.end = min_t(resource_size_t,
+ IO_SPACE_LIMIT,
+ range.pci_addr + range.size
+ + global_io_offset);
+ pp->config.io_size = resource_size(&pp->io);
+ pp->config.io_bus_addr = range.pci_addr;
+ pp->io_base = range.cpu_addr;
+ }
+ if (restype == IORESOURCE_MEM) {
+ of_pci_range_to_resource(&range, np, &pp->mem);
+ pp->mem.name = "MEM";
+ pp->config.mem_size = resource_size(&pp->mem);
+ pp->config.mem_bus_addr = range.pci_addr;
+ }
+ if (restype == 0) {
+ of_pci_range_to_resource(&range, np, &pp->cfg);
+ pp->config.cfg0_size = resource_size(&pp->cfg)/2;
+ pp->config.cfg1_size = resource_size(&pp->cfg)/2;
+ }
+ }
+
+ if (!pp->dbi_base) {
+ pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
+ resource_size(&pp->cfg));
+ if (!pp->dbi_base) {
+ dev_err(pp->dev, "error with ioremap\n");
+ return -ENOMEM;
+ }
+ }
+
+ pp->cfg0_base = pp->cfg.start;
+ pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
+ pp->mem_base = pp->mem.start;
+
+ pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
+ pp->config.cfg0_size);
+ if (!pp->va_cfg0_base) {
+ dev_err(pp->dev, "error with ioremap in function\n");
+ return -ENOMEM;
+ }
+ pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
+ pp->config.cfg1_size);
+ if (!pp->va_cfg1_base) {
+ dev_err(pp->dev, "error with ioremap\n");
+ return -ENOMEM;
+ }
+
+ if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
+ dev_err(pp->dev, "Failed to parse the number of lanes\n");
+ return -EINVAL;
+ }
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
+ MAX_MSI_IRQS, &msi_domain_ops,
+ &dw_pcie_msi_chip);
+ if (!pp->irq_domain) {
+ dev_err(pp->dev, "irq domain init failed\n");
+ return -ENXIO;
+ }
+
+ for (i = 0; i < MAX_MSI_IRQS; i++)
+ irq_create_mapping(pp->irq_domain, i);
+ }
+
+ if (pp->ops->host_init)
+ pp->ops->host_init(pp);
+
+ dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
+
+ /* program correct class for RC */
+ dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
+
+ dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
+
+ dw_pci.nr_controllers = 1;
+ dw_pci.private_data = (void **)&pp;
+
+ pci_common_init_dev(pp->dev, &dw_pci);
+ pci_assign_unassigned_resources();
+#ifdef CONFIG_PCI_DOMAINS
+ dw_pci.domain++;
+#endif
+
+ return 0;
+}
+
+static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
+{
+ /* Program viewport 0 : OUTBOUND : CFG0 */
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, pp->cfg0_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->cfg0_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1,
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, PCIE_ATU_CR1);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+
+static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
+{
+ /* Program viewport 1 : OUTBOUND : CFG1 */
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
+ dw_pcie_writel_rc(pp, pp->cfg1_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1,
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+
+static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
+{
+ /* Program viewport 0 : OUTBOUND : MEM */
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
+ dw_pcie_writel_rc(pp, pp->mem_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->mem_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1,
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
+ PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+
+static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
+{
+ /* Program viewport 1 : OUTBOUND : IO */
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
+ dw_pcie_writel_rc(pp, pp->io_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->io_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1,
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
+ PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+
+static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ u32 devfn, int where, int size, u32 *val)
+{
+ int ret = PCIBIOS_SUCCESSFUL;
+ u32 address, busdev;
+
+ busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+ PCIE_ATU_FUNC(PCI_FUNC(devfn));
+ address = where & ~0x3;
+
+ if (bus->parent->number == pp->root_bus_nr) {
+ dw_pcie_prog_viewport_cfg0(pp, busdev);
+ ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size,
+ val);
+ dw_pcie_prog_viewport_mem_outbound(pp);
+ } else {
+ dw_pcie_prog_viewport_cfg1(pp, busdev);
+ ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size,
+ val);
+ dw_pcie_prog_viewport_io_outbound(pp);
+ }
+
+ return ret;
+}
+
+static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ u32 devfn, int where, int size, u32 val)
+{
+ int ret = PCIBIOS_SUCCESSFUL;
+ u32 address, busdev;
+
+ busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+ PCIE_ATU_FUNC(PCI_FUNC(devfn));
+ address = where & ~0x3;
+
+ if (bus->parent->number == pp->root_bus_nr) {
+ dw_pcie_prog_viewport_cfg0(pp, busdev);
+ ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size,
+ val);
+ dw_pcie_prog_viewport_mem_outbound(pp);
+ } else {
+ dw_pcie_prog_viewport_cfg1(pp, busdev);
+ ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size,
+ val);
+ dw_pcie_prog_viewport_io_outbound(pp);
+ }
+
+ return ret;
+}
+
+static int dw_pcie_valid_config(struct pcie_port *pp,
+ struct pci_bus *bus, int dev)
+{
+ /* If there is no link, then there is no device */
+ if (bus->number != pp->root_bus_nr) {
+ if (!dw_pcie_link_up(pp))
+ return 0;
+ }
+
+ /* access only one slot on each root port */
+ if (bus->number == pp->root_bus_nr && dev > 0)
+ return 0;
+
+ /*
+ * do not read more than one device on the bus directly attached
+ * to RC's (Virtual Bridge's) DS side.
+ */
+ if (bus->primary == pp->root_bus_nr && dev > 0)
+ return 0;
+
+ return 1;
+}
+
+static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct pcie_port *pp = sys_to_pcie(bus->sysdata);
+ int ret;
+
+ if (!pp) {
+ BUG();
+ return -EINVAL;
+ }
+
+ if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ if (bus->number != pp->root_bus_nr)
+ ret = dw_pcie_rd_other_conf(pp, bus, devfn,
+ where, size, val);
+ else
+ ret = dw_pcie_rd_own_conf(pp, where, size, val);
+
+ return ret;
+}
+
+static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct pcie_port *pp = sys_to_pcie(bus->sysdata);
+ int ret;
+
+ if (!pp) {
+ BUG();
+ return -EINVAL;
+ }
+
+ if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (bus->number != pp->root_bus_nr)
+ ret = dw_pcie_wr_other_conf(pp, bus, devfn,
+ where, size, val);
+ else
+ ret = dw_pcie_wr_own_conf(pp, where, size, val);
+
+ return ret;
+}
+
+static struct pci_ops dw_pcie_ops = {
+ .read = dw_pcie_rd_conf,
+ .write = dw_pcie_wr_conf,
+};
+
+static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+ struct pcie_port *pp;
+
+ pp = sys_to_pcie(sys);
+
+ if (!pp)
+ return 0;
+
+ if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
+ sys->io_offset = global_io_offset - pp->config.io_bus_addr;
+ pci_ioremap_io(global_io_offset, pp->io_base);
+ global_io_offset += SZ_64K;
+ pci_add_resource_offset(&sys->resources, &pp->io,
+ sys->io_offset);
+ }
+
+ sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr;
+ pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
+
+ return 1;
+}
+
+static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
+{
+ struct pci_bus *bus;
+ struct pcie_port *pp = sys_to_pcie(sys);
+
+ if (pp) {
+ pp->root_bus_nr = sys->busnr;
+ bus = pci_scan_root_bus(pp->dev, sys->busnr, &dw_pcie_ops,
+ sys, &sys->resources);
+ } else {
+ bus = NULL;
+ BUG();
+ }
+
+ return bus;
+}
+
+static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata);
+ int irq;
+
+ irq = of_irq_parse_and_map_pci(dev, slot, pin);
+ if (!irq)
+ irq = pp->irq;
+
+ return irq;
+}
+
+static void dw_pcie_add_bus(struct pci_bus *bus)
+{
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ struct pcie_port *pp = sys_to_pcie(bus->sysdata);
+
+ dw_pcie_msi_chip.dev = pp->dev;
+ bus->msi = &dw_pcie_msi_chip;
+ }
+}
+
+static struct hw_pci dw_pci = {
+ .setup = dw_pcie_setup,
+ .scan = dw_pcie_scan_bus,
+ .map_irq = dw_pcie_map_irq,
+ .add_bus = dw_pcie_add_bus,
+};
+
+void dw_pcie_setup_rc(struct pcie_port *pp)
+{
+ struct pcie_port_info *config = &pp->config;
+ u32 val;
+ u32 membase;
+ u32 memlimit;
+
+ /* set the number of lanes */
+ dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val);
+ val &= ~PORT_LINK_MODE_MASK;
+ switch (pp->lanes) {
+ case 1:
+ val |= PORT_LINK_MODE_1_LANES;
+ break;
+ case 2:
+ val |= PORT_LINK_MODE_2_LANES;
+ break;
+ case 4:
+ val |= PORT_LINK_MODE_4_LANES;
+ break;
+ }
+ dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
+
+ /* set link width speed control register */
+ dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, &val);
+ val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
+ switch (pp->lanes) {
+ case 1:
+ val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+ break;
+ case 2:
+ val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
+ break;
+ case 4:
+ val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
+ break;
+ }
+ dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
+
+ /* setup RC BARs */
+ dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
+ dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
+
+ /* setup interrupt pins */
+ dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val);
+ val &= 0xffff00ff;
+ val |= 0x00000100;
+ dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
+
+ /* setup bus numbers */
+ dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS, &val);
+ val &= 0xff000000;
+ val |= 0x00010100;
+ dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
+
+ /* setup memory base, memory limit */
+ membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
+ memlimit = (config->mem_size + (u32)pp->mem_base) & 0xfff00000;
+ val = memlimit | membase;
+ dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
+
+ /* setup command register */
+ dw_pcie_readl_rc(pp, PCI_COMMAND, &val);
+ val &= 0xffff0000;
+ val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
+ dw_pcie_writel_rc(pp, val, PCI_COMMAND);
+}
+
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_DESCRIPTION("Designware PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
new file mode 100644
index 00000000000..77f592faa7b
--- /dev/null
+++ b/drivers/pci/host/pcie-designware.h
@@ -0,0 +1,76 @@
+/*
+ * Synopsys Designware PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _PCIE_DESIGNWARE_H
+#define _PCIE_DESIGNWARE_H
+
+struct pcie_port_info {
+ u32 cfg0_size;
+ u32 cfg1_size;
+ u32 io_size;
+ u32 mem_size;
+ phys_addr_t io_bus_addr;
+ phys_addr_t mem_bus_addr;
+};
+
+/*
+ * Maximum number of MSI IRQs can be 256 per controller. But keep
+ * it 32 as of now. Probably we will never need more than 32. If needed,
+ * then increment it in multiple of 32.
+ */
+#define MAX_MSI_IRQS 32
+#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
+
+struct pcie_port {
+ struct device *dev;
+ u8 root_bus_nr;
+ void __iomem *dbi_base;
+ u64 cfg0_base;
+ void __iomem *va_cfg0_base;
+ u64 cfg1_base;
+ void __iomem *va_cfg1_base;
+ u64 io_base;
+ u64 mem_base;
+ struct resource cfg;
+ struct resource io;
+ struct resource mem;
+ struct pcie_port_info config;
+ int irq;
+ u32 lanes;
+ struct pcie_host_ops *ops;
+ int msi_irq;
+ struct irq_domain *irq_domain;
+ unsigned long msi_data;
+ DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
+};
+
+struct pcie_host_ops {
+ void (*readl_rc)(struct pcie_port *pp,
+ void __iomem *dbi_base, u32 *val);
+ void (*writel_rc)(struct pcie_port *pp,
+ u32 val, void __iomem *dbi_base);
+ int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
+ int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
+ int (*link_up)(struct pcie_port *pp);
+ void (*host_init)(struct pcie_port *pp);
+};
+
+int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
+int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val);
+irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
+void dw_pcie_msi_init(struct pcie_port *pp);
+int dw_pcie_link_up(struct pcie_port *pp);
+void dw_pcie_setup_rc(struct pcie_port *pp);
+int dw_pcie_host_init(struct pcie_port *pp);
+
+#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
new file mode 100644
index 00000000000..f7d3de32c9a
--- /dev/null
+++ b/drivers/pci/host/pcie-rcar.c
@@ -0,0 +1,1006 @@
+/*
+ * PCIe driver for Renesas R-Car SoCs
+ * Copyright (C) 2014 Renesas Electronics Europe Ltd
+ *
+ * Based on:
+ * arch/sh/drivers/pci/pcie-sh7786.c
+ * arch/sh/drivers/pci/ops-sh7786.c
+ * Copyright (C) 2009 - 2011 Paul Mundt
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define DRV_NAME "rcar-pcie"
+
+#define PCIECAR 0x000010
+#define PCIECCTLR 0x000018
+#define CONFIG_SEND_ENABLE (1 << 31)
+#define TYPE0 (0 << 8)
+#define TYPE1 (1 << 8)
+#define PCIECDR 0x000020
+#define PCIEMSR 0x000028
+#define PCIEINTXR 0x000400
+#define PCIEMSITXR 0x000840
+
+/* Transfer control */
+#define PCIETCTLR 0x02000
+#define CFINIT 1
+#define PCIETSTR 0x02004
+#define DATA_LINK_ACTIVE 1
+#define PCIEERRFR 0x02020
+#define UNSUPPORTED_REQUEST (1 << 4)
+#define PCIEMSIFR 0x02044
+#define PCIEMSIALR 0x02048
+#define MSIFE 1
+#define PCIEMSIAUR 0x0204c
+#define PCIEMSIIER 0x02050
+
+/* root port address */
+#define PCIEPRAR(x) (0x02080 + ((x) * 0x4))
+
+/* local address reg & mask */
+#define PCIELAR(x) (0x02200 + ((x) * 0x20))
+#define PCIELAMR(x) (0x02208 + ((x) * 0x20))
+#define LAM_PREFETCH (1 << 3)
+#define LAM_64BIT (1 << 2)
+#define LAR_ENABLE (1 << 1)
+
+/* PCIe address reg & mask */
+#define PCIEPARL(x) (0x03400 + ((x) * 0x20))
+#define PCIEPARH(x) (0x03404 + ((x) * 0x20))
+#define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
+#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
+#define PAR_ENABLE (1 << 31)
+#define IO_SPACE (1 << 8)
+
+/* Configuration */
+#define PCICONF(x) (0x010000 + ((x) * 0x4))
+#define PMCAP(x) (0x010040 + ((x) * 0x4))
+#define EXPCAP(x) (0x010070 + ((x) * 0x4))
+#define VCCAP(x) (0x010100 + ((x) * 0x4))
+
+/* link layer */
+#define IDSETR1 0x011004
+#define TLCTLR 0x011048
+#define MACSR 0x011054
+#define MACCTLR 0x011058
+#define SCRAMBLE_DISABLE (1 << 27)
+
+/* R-Car H1 PHY */
+#define H1_PCIEPHYADRR 0x04000c
+#define WRITE_CMD (1 << 16)
+#define PHY_ACK (1 << 24)
+#define RATE_POS 12
+#define LANE_POS 8
+#define ADR_POS 0
+#define H1_PCIEPHYDOUTR 0x040014
+#define H1_PCIEPHYSR 0x040018
+
+#define INT_PCI_MSI_NR 32
+
+#define RCONF(x) (PCICONF(0)+(x))
+#define RPMCAP(x) (PMCAP(0)+(x))
+#define REXPCAP(x) (EXPCAP(0)+(x))
+#define RVCCAP(x) (VCCAP(0)+(x))
+
+#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
+#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
+#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
+
+#define PCI_MAX_RESOURCES 4
+#define MAX_NR_INBOUND_MAPS 6
+
+struct rcar_msi {
+ DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+ struct irq_domain *domain;
+ struct msi_chip chip;
+ unsigned long pages;
+ struct mutex lock;
+ int irq1;
+ int irq2;
+};
+
+static inline struct rcar_msi *to_rcar_msi(struct msi_chip *chip)
+{
+ return container_of(chip, struct rcar_msi, chip);
+}
+
+/* Structure representing the PCIe interface */
+struct rcar_pcie {
+ struct device *dev;
+ void __iomem *base;
+ struct resource res[PCI_MAX_RESOURCES];
+ struct resource busn;
+ int root_bus_nr;
+ struct clk *clk;
+ struct clk *bus_clk;
+ struct rcar_msi msi;
+};
+
+static inline struct rcar_pcie *sys_to_pcie(struct pci_sys_data *sys)
+{
+ return sys->private_data;
+}
+
+static void pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
+ unsigned long reg)
+{
+ writel(val, pcie->base + reg);
+}
+
+static unsigned long pci_read_reg(struct rcar_pcie *pcie, unsigned long reg)
+{
+ return readl(pcie->base + reg);
+}
+
+enum {
+ PCI_ACCESS_READ,
+ PCI_ACCESS_WRITE,
+};
+
+static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
+{
+ int shift = 8 * (where & 3);
+ u32 val = pci_read_reg(pcie, where & ~3);
+
+ val &= ~(mask << shift);
+ val |= data << shift;
+ pci_write_reg(pcie, val, where & ~3);
+}
+
+static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
+{
+ int shift = 8 * (where & 3);
+ u32 val = pci_read_reg(pcie, where & ~3);
+
+ return val >> shift;
+}
+
+/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
+static int rcar_pcie_config_access(struct rcar_pcie *pcie,
+ unsigned char access_type, struct pci_bus *bus,
+ unsigned int devfn, int where, u32 *data)
+{
+ int dev, func, reg, index;
+
+ dev = PCI_SLOT(devfn);
+ func = PCI_FUNC(devfn);
+ reg = where & ~3;
+ index = reg / 4;
+
+ /*
+ * While each channel has its own memory-mapped extended config
+ * space, it's generally only accessible when in endpoint mode.
+ * When in root complex mode, the controller is unable to target
+ * itself with either type 0 or type 1 accesses, and indeed, any
+ * controller initiated target transfer to its own config space
+ * result in a completer abort.
+ *
+ * Each channel effectively only supports a single device, but as
+ * the same channel <-> device access works for any PCI_SLOT()
+ * value, we cheat a bit here and bind the controller's config
+ * space to devfn 0 in order to enable self-enumeration. In this
+ * case the regular ECAR/ECDR path is sidelined and the mangled
+ * config access itself is initiated as an internal bus transaction.
+ */
+ if (pci_is_root_bus(bus)) {
+ if (dev != 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (access_type == PCI_ACCESS_READ) {
+ *data = pci_read_reg(pcie, PCICONF(index));
+ } else {
+ /* Keep an eye out for changes to the root bus number */
+ if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS))
+ pcie->root_bus_nr = *data & 0xff;
+
+ pci_write_reg(pcie, *data, PCICONF(index));
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ if (pcie->root_bus_nr < 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Clear errors */
+ pci_write_reg(pcie, pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
+
+ /* Set the PIO address */
+ pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) | PCIE_CONF_DEV(dev) |
+ PCIE_CONF_FUNC(func) | reg, PCIECAR);
+
+ /* Enable the configuration access */
+ if (bus->parent->number == pcie->root_bus_nr)
+ pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
+ else
+ pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
+
+ /* Check for errors */
+ if (pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Check for master and target aborts */
+ if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
+ (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (access_type == PCI_ACCESS_READ)
+ *data = pci_read_reg(pcie, PCIECDR);
+ else
+ pci_write_reg(pcie, *data, PCIECDR);
+
+ /* Disable the configuration access */
+ pci_write_reg(pcie, 0, PCIECCTLR);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
+ int ret;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ ret = rcar_pcie_config_access(pcie, PCI_ACCESS_READ,
+ bus, devfn, where, val);
+ if (ret != PCIBIOS_SUCCESSFUL) {
+ *val = 0xffffffff;
+ return ret;
+ }
+
+ if (size == 1)
+ *val = (*val >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (8 * (where & 2))) & 0xffff;
+
+ dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
+ bus->number, devfn, where, size, (unsigned long)*val);
+
+ return ret;
+}
+
+/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
+static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
+ int shift, ret;
+ u32 data;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ ret = rcar_pcie_config_access(pcie, PCI_ACCESS_READ,
+ bus, devfn, where, &data);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
+ bus->number, devfn, where, size, (unsigned long)val);
+
+ if (size == 1) {
+ shift = 8 * (where & 3);
+ data &= ~(0xff << shift);
+ data |= ((val & 0xff) << shift);
+ } else if (size == 2) {
+ shift = 8 * (where & 2);
+ data &= ~(0xffff << shift);
+ data |= ((val & 0xffff) << shift);
+ } else
+ data = val;
+
+ ret = rcar_pcie_config_access(pcie, PCI_ACCESS_WRITE,
+ bus, devfn, where, &data);
+
+ return ret;
+}
+
+static struct pci_ops rcar_pcie_ops = {
+ .read = rcar_pcie_read_conf,
+ .write = rcar_pcie_write_conf,
+};
+
+static void rcar_pcie_setup_window(int win, struct resource *res,
+ struct rcar_pcie *pcie)
+{
+ /* Setup PCIe address space mappings for each resource */
+ resource_size_t size;
+ u32 mask;
+
+ pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
+
+ /*
+ * The PAMR mask is calculated in units of 128Bytes, which
+ * keeps things pretty simple.
+ */
+ size = resource_size(res);
+ mask = (roundup_pow_of_two(size) / SZ_128) - 1;
+ pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
+
+ pci_write_reg(pcie, upper_32_bits(res->start), PCIEPARH(win));
+ pci_write_reg(pcie, lower_32_bits(res->start), PCIEPARL(win));
+
+ /* First resource is for IO */
+ mask = PAR_ENABLE;
+ if (res->flags & IORESOURCE_IO)
+ mask |= IO_SPACE;
+
+ pci_write_reg(pcie, mask, PCIEPTCTLR(win));
+}
+
+static int rcar_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+ struct rcar_pcie *pcie = sys_to_pcie(sys);
+ struct resource *res;
+ int i;
+
+ pcie->root_bus_nr = -1;
+
+ /* Setup PCI resources */
+ for (i = 0; i < PCI_MAX_RESOURCES; i++) {
+
+ res = &pcie->res[i];
+ if (!res->flags)
+ continue;
+
+ rcar_pcie_setup_window(i, res, pcie);
+
+ if (res->flags & IORESOURCE_IO)
+ pci_ioremap_io(nr * SZ_64K, res->start);
+ else
+ pci_add_resource(&sys->resources, res);
+ }
+ pci_add_resource(&sys->resources, &pcie->busn);
+
+ return 1;
+}
+
+static void rcar_pcie_add_bus(struct pci_bus *bus)
+{
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
+
+ bus->msi = &pcie->msi.chip;
+ }
+}
+
+struct hw_pci rcar_pci = {
+ .setup = rcar_pcie_setup,
+ .map_irq = of_irq_parse_and_map_pci,
+ .ops = &rcar_pcie_ops,
+ .add_bus = rcar_pcie_add_bus,
+};
+
+static void rcar_pcie_enable(struct rcar_pcie *pcie)
+{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
+
+ rcar_pci.nr_controllers = 1;
+ rcar_pci.private_data = (void **)&pcie;
+
+ pci_common_init_dev(&pdev->dev, &rcar_pci);
+#ifdef CONFIG_PCI_DOMAINS
+ rcar_pci.domain++;
+#endif
+}
+
+static int phy_wait_for_ack(struct rcar_pcie *pcie)
+{
+ unsigned int timeout = 100;
+
+ while (timeout--) {
+ if (pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
+ return 0;
+
+ udelay(100);
+ }
+
+ dev_err(pcie->dev, "Access to PCIe phy timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+static void phy_write_reg(struct rcar_pcie *pcie,
+ unsigned int rate, unsigned int addr,
+ unsigned int lane, unsigned int data)
+{
+ unsigned long phyaddr;
+
+ phyaddr = WRITE_CMD |
+ ((rate & 1) << RATE_POS) |
+ ((lane & 0xf) << LANE_POS) |
+ ((addr & 0xff) << ADR_POS);
+
+ /* Set write data */
+ pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
+ pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
+
+ /* Ignore errors as they will be dealt with if the data link is down */
+ phy_wait_for_ack(pcie);
+
+ /* Clear command */
+ pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
+ pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
+
+ /* Ignore errors as they will be dealt with if the data link is down */
+ phy_wait_for_ack(pcie);
+}
+
+static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
+{
+ unsigned int timeout = 10;
+
+ while (timeout--) {
+ if ((pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
+ return 0;
+
+ msleep(5);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
+{
+ int err;
+
+ /* Begin initialization */
+ pci_write_reg(pcie, 0, PCIETCTLR);
+
+ /* Set mode */
+ pci_write_reg(pcie, 1, PCIEMSR);
+
+ /*
+ * Initial header for port config space is type 1, set the device
+ * class to match. Hardware takes care of propagating the IDSETR
+ * settings, so there is no need to bother with a quirk.
+ */
+ pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
+
+ /*
+ * Setup Secondary Bus Number & Subordinate Bus Number, even though
+ * they aren't used, to avoid bridge being detected as broken.
+ */
+ rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
+ rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
+
+ /* Initialize default capabilities. */
+ rcar_rmw32(pcie, REXPCAP(0), 0, PCI_CAP_ID_EXP);
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
+ PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
+ rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
+ PCI_HEADER_TYPE_BRIDGE);
+
+ /* Enable data link layer active state reporting */
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), 0, PCI_EXP_LNKCAP_DLLLARC);
+
+ /* Write out the physical slot number = 0 */
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
+
+ /* Set the completion timer timeout to the maximum 50ms. */
+ rcar_rmw32(pcie, TLCTLR+1, 0x3f, 50);
+
+ /* Terminate list of capabilities (Next Capability Offset=0) */
+ rcar_rmw32(pcie, RVCCAP(0), 0xfff0, 0);
+
+ /* Enable MAC data scrambling. */
+ rcar_rmw32(pcie, MACCTLR, SCRAMBLE_DISABLE, 0);
+
+ /* Enable MSI */
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ pci_write_reg(pcie, 0x101f0000, PCIEMSITXR);
+
+ /* Finish initialization - establish a PCI Express link */
+ pci_write_reg(pcie, CFINIT, PCIETCTLR);
+
+ /* This will timeout if we don't have a link. */
+ err = rcar_pcie_wait_for_dl(pcie);
+ if (err)
+ return err;
+
+ /* Enable INTx interrupts */
+ rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
+
+ /* Enable slave Bus Mastering */
+ rcar_rmw32(pcie, RCONF(PCI_STATUS), PCI_STATUS_DEVSEL_MASK,
+ PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
+ PCI_STATUS_CAP_LIST | PCI_STATUS_DEVSEL_FAST);
+
+ wmb();
+
+ return 0;
+}
+
+static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
+{
+ unsigned int timeout = 10;
+
+ /* Initialize the phy */
+ phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
+ phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
+ phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
+ phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
+ phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
+ phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
+ phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
+ phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
+ phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
+ phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
+ phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
+ phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
+
+ phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
+ phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
+ phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
+
+ while (timeout--) {
+ if (pci_read_reg(pcie, H1_PCIEPHYSR))
+ return rcar_pcie_hw_init(pcie);
+
+ msleep(5);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int rcar_msi_alloc(struct rcar_msi *chip)
+{
+ int msi;
+
+ mutex_lock(&chip->lock);
+
+ msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
+ if (msi < INT_PCI_MSI_NR)
+ set_bit(msi, chip->used);
+ else
+ msi = -ENOSPC;
+
+ mutex_unlock(&chip->lock);
+
+ return msi;
+}
+
+static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
+{
+ mutex_lock(&chip->lock);
+ clear_bit(irq, chip->used);
+ mutex_unlock(&chip->lock);
+}
+
+static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
+{
+ struct rcar_pcie *pcie = data;
+ struct rcar_msi *msi = &pcie->msi;
+ unsigned long reg;
+
+ reg = pci_read_reg(pcie, PCIEMSIFR);
+
+ /* MSI & INTx share an interrupt - we only handle MSI here */
+ if (!reg)
+ return IRQ_NONE;
+
+ while (reg) {
+ unsigned int index = find_first_bit(&reg, 32);
+ unsigned int irq;
+
+ /* clear the interrupt */
+ pci_write_reg(pcie, 1 << index, PCIEMSIFR);
+
+ irq = irq_find_mapping(msi->domain, index);
+ if (irq) {
+ if (test_bit(index, msi->used))
+ generic_handle_irq(irq);
+ else
+ dev_info(pcie->dev, "unhandled MSI\n");
+ } else {
+ /* Unknown MSI, just clear it */
+ dev_dbg(pcie->dev, "unexpected MSI\n");
+ }
+
+ /* see if there's any more pending in this vector */
+ reg = pci_read_reg(pcie, PCIEMSIFR);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rcar_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ struct rcar_msi *msi = to_rcar_msi(chip);
+ struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
+ struct msi_msg msg;
+ unsigned int irq;
+ int hwirq;
+
+ hwirq = rcar_msi_alloc(msi);
+ if (hwirq < 0)
+ return hwirq;
+
+ irq = irq_create_mapping(msi->domain, hwirq);
+ if (!irq) {
+ rcar_msi_free(msi, hwirq);
+ return -EINVAL;
+ }
+
+ irq_set_msi_desc(irq, desc);
+
+ msg.address_lo = pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
+ msg.address_hi = pci_read_reg(pcie, PCIEMSIAUR);
+ msg.data = hwirq;
+
+ write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+static void rcar_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
+{
+ struct rcar_msi *msi = to_rcar_msi(chip);
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ rcar_msi_free(msi, d->hwirq);
+}
+
+static struct irq_chip rcar_msi_irq_chip = {
+ .name = "R-Car PCIe MSI",
+ .irq_enable = unmask_msi_irq,
+ .irq_disable = mask_msi_irq,
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+};
+
+static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .map = rcar_msi_map,
+};
+
+static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
+{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct rcar_msi *msi = &pcie->msi;
+ unsigned long base;
+ int err;
+
+ mutex_init(&msi->lock);
+
+ msi->chip.dev = pcie->dev;
+ msi->chip.setup_irq = rcar_msi_setup_irq;
+ msi->chip.teardown_irq = rcar_msi_teardown_irq;
+
+ msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
+ &msi_domain_ops, &msi->chip);
+ if (!msi->domain) {
+ dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ /* Two irqs are for MSI, but they are also used for non-MSI irqs */
+ err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
+ IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ goto err;
+ }
+
+ err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
+ IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ goto err;
+ }
+
+ /* setup MSI data target */
+ msi->pages = __get_free_pages(GFP_KERNEL, 0);
+ base = virt_to_phys((void *)msi->pages);
+
+ pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
+ pci_write_reg(pcie, 0, PCIEMSIAUR);
+
+ /* enable all MSI interrupts */
+ pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
+
+ return 0;
+
+err:
+ irq_domain_remove(msi->domain);
+ return err;
+}
+
+static int rcar_pcie_get_resources(struct platform_device *pdev,
+ struct rcar_pcie *pcie)
+{
+ struct resource res;
+ int err, i;
+
+ err = of_address_to_resource(pdev->dev.of_node, 0, &res);
+ if (err)
+ return err;
+
+ pcie->clk = devm_clk_get(&pdev->dev, "pcie");
+ if (IS_ERR(pcie->clk)) {
+ dev_err(pcie->dev, "cannot get platform clock\n");
+ return PTR_ERR(pcie->clk);
+ }
+ err = clk_prepare_enable(pcie->clk);
+ if (err)
+ goto fail_clk;
+
+ pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
+ if (IS_ERR(pcie->bus_clk)) {
+ dev_err(pcie->dev, "cannot get pcie bus clock\n");
+ err = PTR_ERR(pcie->bus_clk);
+ goto fail_clk;
+ }
+ err = clk_prepare_enable(pcie->bus_clk);
+ if (err)
+ goto err_map_reg;
+
+ i = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (i < 0) {
+ dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
+ err = -ENOENT;
+ goto err_map_reg;
+ }
+ pcie->msi.irq1 = i;
+
+ i = irq_of_parse_and_map(pdev->dev.of_node, 1);
+ if (i < 0) {
+ dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
+ err = -ENOENT;
+ goto err_map_reg;
+ }
+ pcie->msi.irq2 = i;
+
+ pcie->base = devm_ioremap_resource(&pdev->dev, &res);
+ if (IS_ERR(pcie->base)) {
+ err = PTR_ERR(pcie->base);
+ goto err_map_reg;
+ }
+
+ return 0;
+
+err_map_reg:
+ clk_disable_unprepare(pcie->bus_clk);
+fail_clk:
+ clk_disable_unprepare(pcie->clk);
+
+ return err;
+}
+
+static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
+ struct of_pci_range *range,
+ int *index)
+{
+ u64 restype = range->flags;
+ u64 cpu_addr = range->cpu_addr;
+ u64 cpu_end = range->cpu_addr + range->size;
+ u64 pci_addr = range->pci_addr;
+ u32 flags = LAM_64BIT | LAR_ENABLE;
+ u64 mask;
+ u64 size;
+ int idx = *index;
+
+ if (restype & IORESOURCE_PREFETCH)
+ flags |= LAM_PREFETCH;
+
+ /*
+ * If the size of the range is larger than the alignment of the start
+ * address, we have to use multiple entries to perform the mapping.
+ */
+ if (cpu_addr > 0) {
+ unsigned long nr_zeros = __ffs64(cpu_addr);
+ u64 alignment = 1ULL << nr_zeros;
+ size = min(range->size, alignment);
+ } else {
+ size = range->size;
+ }
+ /* Hardware supports max 4GiB inbound region */
+ size = min(size, 1ULL << 32);
+
+ mask = roundup_pow_of_two(size) - 1;
+ mask &= ~0xf;
+
+ while (cpu_addr < cpu_end) {
+ /*
+ * Set up 64-bit inbound regions as the range parser doesn't
+ * distinguish between 32 and 64-bit types.
+ */
+ pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx));
+ pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
+ pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx));
+
+ pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1));
+ pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1));
+ pci_write_reg(pcie, 0, PCIELAMR(idx+1));
+
+ pci_addr += size;
+ cpu_addr += size;
+ idx += 2;
+
+ if (idx > MAX_NR_INBOUND_MAPS) {
+ dev_err(pcie->dev, "Failed to map inbound regions!\n");
+ return -EINVAL;
+ }
+ }
+ *index = idx;
+
+ return 0;
+}
+
+static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ const int na = 3, ns = 2;
+ int rlen;
+
+ parser->node = node;
+ parser->pna = of_n_addr_cells(node);
+ parser->np = parser->pna + na + ns;
+
+ parser->range = of_get_property(node, "dma-ranges", &rlen);
+ if (!parser->range)
+ return -ENOENT;
+
+ parser->end = parser->range + rlen / sizeof(__be32);
+ return 0;
+}
+
+static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
+ struct device_node *np)
+{
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ int index = 0;
+ int err;
+
+ if (pci_dma_range_parser_init(&parser, np))
+ return -EINVAL;
+
+ /* Get the dma-ranges from DT */
+ for_each_of_pci_range(&parser, &range) {
+ u64 end = range.cpu_addr + range.size - 1;
+ dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
+ range.flags, range.cpu_addr, end, range.pci_addr);
+
+ err = rcar_pcie_inbound_ranges(pcie, &range, &index);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id rcar_pcie_of_match[] = {
+ { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
+ { .compatible = "renesas,pcie-r8a7790", .data = rcar_pcie_hw_init },
+ { .compatible = "renesas,pcie-r8a7791", .data = rcar_pcie_hw_init },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rcar_pcie_of_match);
+
+static int rcar_pcie_probe(struct platform_device *pdev)
+{
+ struct rcar_pcie *pcie;
+ unsigned int data;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ const struct of_device_id *of_id;
+ int err, win = 0;
+ int (*hw_init_fn)(struct rcar_pcie *);
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pcie);
+
+ /* Get the bus range */
+ if (of_pci_parse_bus_range(pdev->dev.of_node, &pcie->busn)) {
+ dev_err(&pdev->dev, "failed to parse bus-range property\n");
+ return -EINVAL;
+ }
+
+ if (of_pci_range_parser_init(&parser, pdev->dev.of_node)) {
+ dev_err(&pdev->dev, "missing ranges property\n");
+ return -EINVAL;
+ }
+
+ err = rcar_pcie_get_resources(pdev, pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request resources: %d\n", err);
+ return err;
+ }
+
+ for_each_of_pci_range(&parser, &range) {
+ of_pci_range_to_resource(&range, pdev->dev.of_node,
+ &pcie->res[win++]);
+
+ if (win > PCI_MAX_RESOURCES)
+ break;
+ }
+
+ err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node);
+ if (err)
+ return err;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ err = rcar_pcie_enable_msi(pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to enable MSI support: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ of_id = of_match_device(rcar_pcie_of_match, pcie->dev);
+ if (!of_id || !of_id->data)
+ return -EINVAL;
+ hw_init_fn = of_id->data;
+
+ /* Failure to get a link might just be that no cards are inserted */
+ err = hw_init_fn(pcie);
+ if (err) {
+ dev_info(&pdev->dev, "PCIe link down\n");
+ return 0;
+ }
+
+ data = pci_read_reg(pcie, MACSR);
+ dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
+
+ rcar_pcie_enable(pcie);
+
+ return 0;
+}
+
+static struct platform_driver rcar_pcie_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = rcar_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rcar_pcie_probe,
+};
+module_platform_driver(rcar_pcie_driver);
+
+MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
+MODULE_DESCRIPTION("Renesas R-Car PCIe driver");
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/pci/hotplug-pci.c b/drivers/pci/hotplug-pci.c
index 4d4a6447840..c68366cee6b 100644
--- a/drivers/pci/hotplug-pci.c
+++ b/drivers/pci/hotplug-pci.c
@@ -1,20 +1,29 @@
/* Core PCI functionality used only by PCI hotplug */
#include <linux/pci.h>
+#include <linux/export.h>
#include "pci.h"
-
-unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus)
+int pci_hp_add_bridge(struct pci_dev *dev)
{
- unsigned int max;
-
- max = pci_scan_child_bus(bus);
+ struct pci_bus *parent = dev->bus;
+ int pass, busnr, start = parent->busn_res.start;
+ int end = parent->busn_res.end;
- /*
- * Make the discovered devices available.
- */
- pci_bus_add_devices(bus);
+ for (busnr = start; busnr <= end; busnr++) {
+ if (!pci_find_bus(pci_domain_nr(parent), busnr))
+ break;
+ }
+ if (busnr-- > end) {
+ printk(KERN_ERR "No bus number available for hot-added bridge %s\n",
+ pci_name(dev));
+ return -1;
+ }
+ for (pass = 0; pass < 2; pass++)
+ busnr = pci_scan_bridge(parent, dev, busnr, pass);
+ if (!dev->subordinate)
+ return -1;
- return max;
+ return 0;
}
-EXPORT_SYMBOL(pci_do_scan_bus);
+EXPORT_SYMBOL_GPL(pci_hp_add_bridge);
diff --git a/drivers/pci/hotplug.c b/drivers/pci/hotplug.c
deleted file mode 100644
index 2b5352a7dff..00000000000
--- a/drivers/pci/hotplug.c
+++ /dev/null
@@ -1,37 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/module.h>
-#include "pci.h"
-
-int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- struct pci_dev *pdev;
-
- if (!dev)
- return -ENODEV;
-
- pdev = to_pci_dev(dev);
- if (!pdev)
- return -ENODEV;
-
- if (add_uevent_var(env, "PCI_CLASS=%04X", pdev->class))
- return -ENOMEM;
-
- if (add_uevent_var(env, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device))
- return -ENOMEM;
-
- if (add_uevent_var(env, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor,
- pdev->subsystem_device))
- return -ENOMEM;
-
- if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
- return -ENOMEM;
-
- if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x",
- pdev->vendor, pdev->device,
- pdev->subsystem_vendor, pdev->subsystem_device,
- (u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
- (u8)(pdev->class)))
- return -ENOMEM;
- return 0;
-}
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 66f29bc00be..df8caec5978 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -3,42 +3,17 @@
#
menuconfig HOTPLUG_PCI
- tristate "Support for PCI Hotplug"
- depends on PCI && HOTPLUG && SYSFS
+ bool "Support for PCI Hotplug"
+ depends on PCI && SYSFS
---help---
Say Y here if you have a motherboard with a PCI Hotplug controller.
This allows you to add and remove PCI cards while the machine is
powered up and running.
- To compile this driver as a module, choose M here: the
- module will be called pci_hotplug.
-
When in doubt, say N.
if HOTPLUG_PCI
-config HOTPLUG_PCI_FAKE
- tristate "Fake PCI Hotplug driver"
- help
- Say Y here if you want to use the fake PCI hotplug driver. It can
- be used to simulate PCI hotplug events if even if your system is
- not PCI hotplug capable.
-
- This driver will "emulate" removing PCI devices from the system.
- If the "power" file is written to with "0" then the specified PCI
- device will be completely removed from the kernel.
-
- WARNING, this does NOT turn off the power to the PCI device.
- This is a "logical" removal, not a physical or electrical
- removal.
-
- Use this module at your own risk. You have been warned!
-
- To compile this driver as a module, choose M here: the
- module will be called fakephp.
-
- When in doubt, say N.
-
config HOTPLUG_PCI_COMPAQ
tristate "Compaq PCI Hotplug driver"
depends on X86 && PCI_BIOS
@@ -74,15 +49,12 @@ config HOTPLUG_PCI_IBM
When in doubt, say N.
config HOTPLUG_PCI_ACPI
- tristate "ACPI PCI Hotplug driver"
- depends on (!ACPI_DOCK && ACPI) || (ACPI_DOCK)
+ bool "ACPI PCI Hotplug driver"
+ depends on HOTPLUG_PCI=y && ((!ACPI_DOCK && ACPI) || (ACPI_DOCK))
help
Say Y here if you have a system that supports PCI Hotplug using
ACPI.
- To compile this driver as a module, choose M here: the
- module will be called acpiphp.
-
When in doubt, say N.
config HOTPLUG_PCI_ACPI_IBM
@@ -143,7 +115,7 @@ config HOTPLUG_PCI_SHPC
config HOTPLUG_PCI_RPA
tristate "RPA PCI Hotplug driver"
- depends on PPC_PSERIES && EEH && !HOTPLUG_PCI_FAKE
+ depends on PPC_PSERIES && EEH
help
Say Y here if you have a RPA system that supports PCI Hotplug.
@@ -161,8 +133,8 @@ config HOTPLUG_PCI_RPA_DLPAR
To compile this driver as a module, choose M here: the
module will be called rpadlpar_io.
-
- When in doubt, say N.
+
+ When in doubt, say N.
config HOTPLUG_PCI_SGI
tristate "SGI PCI Hotplug Support"
@@ -173,4 +145,15 @@ config HOTPLUG_PCI_SGI
When in doubt, say N.
+config HOTPLUG_PCI_S390
+ bool "System z PCI Hotplug Support"
+ depends on S390 && 64BIT
+ help
+ Say Y here if you want to use the System z PCI Hotplug
+ driver for PCI devices. Without this driver it is not
+ possible to access stand-by PCI functions nor to deconfigure
+ PCI functions.
+
+ When in doubt, say Y.
+
endif # HOTPLUG_PCI
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 6cd9f3c9887..3e6532b945c 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -18,14 +18,12 @@ obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o
obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o
obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o
obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o
+obj-$(CONFIG_HOTPLUG_PCI_S390) += s390_pci_hpc.o
# acpiphp_ibm extends acpiphp, so should be linked afterwards.
obj-$(CONFIG_HOTPLUG_PCI_ACPI_IBM) += acpiphp_ibm.o
-# Link this last so it doesn't claim devices that have a real hotplug driver
-obj-$(CONFIG_HOTPLUG_PCI_FAKE) += fakephp.o
-
pci_hotplug-objs := pci_hotplug_core.o pcihp_slot.o
ifdef CONFIG_HOTPLUG_PCI_CPCI
@@ -33,7 +31,7 @@ pci_hotplug-objs += cpci_hotplug_core.o \
cpci_hotplug_pci.o
endif
ifdef CONFIG_ACPI
-pci_hotplug-objs += acpi_pcihp.o
+pci_hotplug-objs += acpi_pcihp.o
endif
cpqphp-objs := cpqphp_core.o \
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 3bc72d18b12..a94d850ae22 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -44,7 +44,7 @@
#define METHOD_NAME__SUN "_SUN"
#define METHOD_NAME_OSHP "OSHP"
-static int debug_acpi;
+static bool debug_acpi;
static acpi_status
decode_type0_hpx_record(union acpi_object *record, struct hotplug_params *hpx)
@@ -338,7 +338,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
acpi_handle chandle, handle;
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
- flags &= OSC_SHPC_NATIVE_HP_CONTROL;
+ flags &= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
if (!flags) {
err("Invalid flags %u specified!\n", flags);
return -EINVAL;
@@ -351,7 +351,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
* To handle different BIOS behavior, we look for _OSC on a root
* bridge preferentially (according to PCI fw spec). Later for
* OSHP within the scope of the hotplug controller and its parents,
- * upto the host bridge under which this controller exists.
+ * up to the host bridge under which this controller exists.
*/
handle = acpi_find_root_bridge_handle(pdev);
if (handle) {
@@ -367,7 +367,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
}
- handle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ handle = ACPI_HANDLE(&pdev->dev);
if (!handle) {
/*
* This hotplug controller was not listed in the ACPI name
@@ -408,16 +408,13 @@ got_one:
}
EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
-static int is_ejectable(acpi_handle handle)
+static int pcihp_is_ejectable(acpi_handle handle)
{
acpi_status status;
- acpi_handle tmp;
unsigned long long removable;
- status = acpi_get_handle(handle, "_ADR", &tmp);
- if (ACPI_FAILURE(status))
+ if (!acpi_has_method(handle, "_ADR"))
return 0;
- status = acpi_get_handle(handle, "_EJ0", &tmp);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(handle, "_EJ0"))
return 1;
status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable);
if (ACPI_SUCCESS(status) && removable)
@@ -442,7 +439,7 @@ int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle)
return 0;
if (bridge_handle != parent_handle)
return 0;
- return is_ejectable(handle);
+ return pcihp_is_ejectable(handle);
}
EXPORT_SYMBOL_GPL(acpi_pci_check_ejectable);
@@ -450,7 +447,7 @@ static acpi_status
check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv)
{
int *found = (int *)context;
- if (is_ejectable(handle)) {
+ if (pcihp_is_ejectable(handle)) {
*found = 1;
return AE_CTRL_TERMINATE;
}
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index bab52047baa..b0e61bf261a 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -36,20 +36,10 @@
#define _ACPIPHP_H
#include <linux/acpi.h>
-#include <linux/kobject.h>
#include <linux/mutex.h>
#include <linux/pci_hotplug.h>
-#define dbg(format, arg...) \
- do { \
- if (acpiphp_debug) \
- printk(KERN_DEBUG "%s: " format, \
- MY_NAME , ## arg); \
- } while (0)
-#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
-#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
-#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
-
+struct acpiphp_context;
struct acpiphp_bridge;
struct acpiphp_slot;
@@ -60,6 +50,7 @@ struct slot {
struct hotplug_slot *hotplug_slot;
struct acpiphp_slot *acpi_slot;
struct hotplug_slot_info info;
+ unsigned int sun; /* ACPI _SUN (Slot User Number) value */
};
static inline const char *slot_name(struct slot *slot)
@@ -74,24 +65,20 @@ static inline const char *slot_name(struct slot *slot)
*/
struct acpiphp_bridge {
struct list_head list;
- acpi_handle handle;
- struct acpiphp_slot *slots;
+ struct list_head slots;
+ struct kref ref;
- /* Ejectable PCI-to-PCI bridge (PCI bridge and PCI function) */
- struct acpiphp_func *func;
+ struct acpiphp_context *context;
- int type;
int nr_slots;
- u32 flags;
-
/* This bus (host bridge) or Secondary bus (PCI-to-PCI bridge) */
struct pci_bus *pci_bus;
/* PCI-to-PCI bridge device */
struct pci_dev *pci_dev;
- spinlock_t res_lock;
+ bool is_going_away;
};
@@ -101,16 +88,13 @@ struct acpiphp_bridge {
* PCI slot information for each *physical* PCI slot
*/
struct acpiphp_slot {
- struct acpiphp_slot *next;
- struct acpiphp_bridge *bridge; /* parent */
+ struct list_head node;
+ struct pci_bus *bus;
struct list_head funcs; /* one slot may have different
objects (i.e. for each function) */
struct slot *slot;
- struct mutex crit_sect;
u8 device; /* pci device# */
-
- unsigned long long sun; /* ACPI _SUN (slot unique number) */
u32 flags; /* see below */
};
@@ -122,17 +106,52 @@ struct acpiphp_slot {
* typically 8 objects per slot (i.e. for each PCI function)
*/
struct acpiphp_func {
- struct acpiphp_slot *slot; /* parent */
- struct acpiphp_bridge *bridge; /* Ejectable PCI-to-PCI bridge */
+ struct acpiphp_bridge *parent;
+ struct acpiphp_slot *slot;
struct list_head sibling;
- struct notifier_block nb;
- acpi_handle handle;
u8 function; /* pci function# */
u32 flags; /* see below */
};
+struct acpiphp_context {
+ struct acpi_hotplug_context hp;
+ struct acpiphp_func func;
+ struct acpiphp_bridge *bridge;
+ unsigned int refcount;
+};
+
+static inline struct acpiphp_context *to_acpiphp_context(struct acpi_hotplug_context *hp)
+{
+ return container_of(hp, struct acpiphp_context, hp);
+}
+
+static inline struct acpiphp_context *func_to_context(struct acpiphp_func *func)
+{
+ return container_of(func, struct acpiphp_context, func);
+}
+
+static inline struct acpi_device *func_to_acpi_device(struct acpiphp_func *func)
+{
+ return func_to_context(func)->hp.self;
+}
+
+static inline acpi_handle func_to_handle(struct acpiphp_func *func)
+{
+ return func_to_acpi_device(func)->handle;
+}
+
+struct acpiphp_root_context {
+ struct acpi_hotplug_context hp;
+ struct acpiphp_bridge *root_bridge;
+};
+
+static inline struct acpiphp_root_context *to_acpiphp_root_context(struct acpi_hotplug_context *hp)
+{
+ return container_of(hp, struct acpiphp_root_context, hp);
+}
+
/*
* struct acpiphp_attention_info - device specific attention registration
*
@@ -146,68 +165,38 @@ struct acpiphp_attention_info
struct module *owner;
};
-/* PCI bus bridge HID */
-#define ACPI_PCI_HOST_HID "PNP0A03"
-
-/* PCI BRIDGE type */
-#define BRIDGE_TYPE_HOST 0
-#define BRIDGE_TYPE_P2P 1
-
/* ACPI _STA method value (ignore bit 4; battery present) */
-#define ACPI_STA_PRESENT (0x00000001)
-#define ACPI_STA_ENABLED (0x00000002)
-#define ACPI_STA_SHOW_IN_UI (0x00000004)
-#define ACPI_STA_FUNCTIONING (0x00000008)
#define ACPI_STA_ALL (0x0000000f)
-/* bridge flags */
-#define BRIDGE_HAS_STA (0x00000001)
-#define BRIDGE_HAS_EJ0 (0x00000002)
-#define BRIDGE_HAS_HPP (0x00000004)
-#define BRIDGE_HAS_PS0 (0x00000010)
-#define BRIDGE_HAS_PS1 (0x00000020)
-#define BRIDGE_HAS_PS2 (0x00000040)
-#define BRIDGE_HAS_PS3 (0x00000080)
-
/* slot flags */
-#define SLOT_POWEREDON (0x00000001)
-#define SLOT_ENABLED (0x00000002)
-#define SLOT_MULTIFUNCTION (0x00000004)
+#define SLOT_ENABLED (0x00000001)
+#define SLOT_IS_GOING_AWAY (0x00000002)
/* function flags */
#define FUNC_HAS_STA (0x00000001)
#define FUNC_HAS_EJ0 (0x00000002)
-#define FUNC_HAS_PS0 (0x00000010)
-#define FUNC_HAS_PS1 (0x00000020)
-#define FUNC_HAS_PS2 (0x00000040)
-#define FUNC_HAS_PS3 (0x00000080)
-#define FUNC_HAS_DCK (0x00000100)
/* function prototypes */
/* acpiphp_core.c */
-extern int acpiphp_register_attention(struct acpiphp_attention_info*info);
-extern int acpiphp_unregister_attention(struct acpiphp_attention_info *info);
-extern int acpiphp_register_hotplug_slot(struct acpiphp_slot *slot);
-extern void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot);
+int acpiphp_register_attention(struct acpiphp_attention_info*info);
+int acpiphp_unregister_attention(struct acpiphp_attention_info *info);
+int acpiphp_register_hotplug_slot(struct acpiphp_slot *slot, unsigned int sun);
+void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot);
/* acpiphp_glue.c */
-extern int acpiphp_glue_init (void);
-extern void acpiphp_glue_exit (void);
-extern int acpiphp_get_num_slots (void);
typedef int (*acpiphp_callback)(struct acpiphp_slot *slot, void *data);
-extern int acpiphp_enable_slot (struct acpiphp_slot *slot);
-extern int acpiphp_disable_slot (struct acpiphp_slot *slot);
-extern int acpiphp_eject_slot (struct acpiphp_slot *slot);
-extern u8 acpiphp_get_power_status (struct acpiphp_slot *slot);
-extern u8 acpiphp_get_attention_status (struct acpiphp_slot *slot);
-extern u8 acpiphp_get_latch_status (struct acpiphp_slot *slot);
-extern u8 acpiphp_get_adapter_status (struct acpiphp_slot *slot);
+int acpiphp_enable_slot(struct acpiphp_slot *slot);
+int acpiphp_disable_slot(struct acpiphp_slot *slot);
+u8 acpiphp_get_power_status(struct acpiphp_slot *slot);
+u8 acpiphp_get_attention_status(struct acpiphp_slot *slot);
+u8 acpiphp_get_latch_status(struct acpiphp_slot *slot);
+u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot);
/* variables */
-extern int acpiphp_debug;
+extern bool acpiphp_disabled;
#endif /* _ACPIPHP_H */
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index efa9f2de51c..e291efcd02a 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -31,27 +31,26 @@
*
*/
+#define pr_fmt(fmt) "acpiphp: " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/pci-acpi.h>
#include <linux/pci_hotplug.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include "acpiphp.h"
-#define MY_NAME "acpiphp"
-
/* name size which is used for entries in pcihpfs */
#define SLOT_NAME_SIZE 21 /* {_SUN} */
-static int debug;
-int acpiphp_debug;
+bool acpiphp_disabled;
/* local variables */
-static int num_slots;
static struct acpiphp_attention_info *attention_info;
#define DRIVER_VERSION "0.5"
@@ -61,12 +60,8 @@ static struct acpiphp_attention_info *attention_info;
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
-module_param(debug, bool, 0644);
-
-/* export the attention callback registration methods */
-EXPORT_SYMBOL_GPL(acpiphp_register_attention);
-EXPORT_SYMBOL_GPL(acpiphp_unregister_attention);
+MODULE_PARM_DESC(disable, "disable acpiphp driver");
+module_param_named(disable, acpiphp_disabled, bool, 0444);
static int enable_slot (struct hotplug_slot *slot);
static int disable_slot (struct hotplug_slot *slot);
@@ -105,6 +100,7 @@ int acpiphp_register_attention(struct acpiphp_attention_info *info)
}
return retval;
}
+EXPORT_SYMBOL_GPL(acpiphp_register_attention);
/**
@@ -112,7 +108,7 @@ int acpiphp_register_attention(struct acpiphp_attention_info *info)
* @info: must match the pointer used to register
*
* Description: This is used to un-register a hardware specific acpi
- * driver that manipulates the attention LED. The pointer to the
+ * driver that manipulates the attention LED. The pointer to the
* info struct must be the same as the one used to set it.
*/
int acpiphp_unregister_attention(struct acpiphp_attention_info *info)
@@ -125,6 +121,7 @@ int acpiphp_unregister_attention(struct acpiphp_attention_info *info)
}
return retval;
}
+EXPORT_SYMBOL_GPL(acpiphp_unregister_attention);
/**
@@ -137,7 +134,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
/* enable the specified slot */
return acpiphp_enable_slot(slot->acpi_slot);
@@ -153,15 +150,11 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- int retval;
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
/* disable the specified slot */
- retval = acpiphp_disable_slot(slot->acpi_slot);
- if (!retval)
- retval = acpiphp_eject_slot(slot->acpi_slot);
- return retval;
+ return acpiphp_disable_slot(slot->acpi_slot);
}
@@ -174,20 +167,21 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
* was registered with us. This allows hardware specific
* ACPI implementations to blink the light for us.
*/
- static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
- {
+static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
+{
int retval = -ENODEV;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
-
+ pr_debug("%s - physical_slot = %s\n", __func__,
+ hotplug_slot_name(hotplug_slot));
+
if (attention_info && try_module_get(attention_info->owner)) {
retval = attention_info->set_attn(hotplug_slot, status);
module_put(attention_info->owner);
} else
attention_info = NULL;
return retval;
- }
-
+}
+
/**
* get_power_status - get power status of a slot
@@ -201,7 +195,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = acpiphp_get_power_status(slot->acpi_slot);
@@ -223,7 +217,8 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
int retval = -EINVAL;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
+ pr_debug("%s - physical_slot = %s\n", __func__,
+ hotplug_slot_name(hotplug_slot));
if (attention_info && try_module_get(attention_info->owner)) {
retval = attention_info->get_attn(hotplug_slot, value);
@@ -246,7 +241,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = acpiphp_get_latch_status(slot->acpi_slot);
@@ -266,32 +261,13 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = acpiphp_get_adapter_status(slot->acpi_slot);
return 0;
}
-static int __init init_acpi(void)
-{
- int retval;
-
- /* initialize internal data structure etc. */
- retval = acpiphp_glue_init();
-
- /* read initial number of slots */
- if (!retval) {
- num_slots = acpiphp_get_num_slots();
- if (num_slots == 0) {
- acpiphp_glue_exit();
- retval = -ENODEV;
- }
- }
-
- return retval;
-}
-
/**
* release_slot - free up the memory used by a slot
* @hotplug_slot: slot to free
@@ -300,14 +276,15 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
kfree(slot->hotplug_slot);
kfree(slot);
}
/* callback routine to initialize 'struct slot' for each slot */
-int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
+int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
+ unsigned int sun)
{
struct slot *slot;
int retval = -ENOMEM;
@@ -334,20 +311,19 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot);
acpiphp_slot->slot = slot;
- snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun);
+ slot->sun = sun;
+ snprintf(name, SLOT_NAME_SIZE, "%u", sun);
- retval = pci_hp_register(slot->hotplug_slot,
- acpiphp_slot->bridge->pci_bus,
- acpiphp_slot->device,
- name);
+ retval = pci_hp_register(slot->hotplug_slot, acpiphp_slot->bus,
+ acpiphp_slot->device, name);
if (retval == -EBUSY)
goto error_hpslot;
if (retval) {
- err("pci_hp_register failed with error %d\n", retval);
+ pr_err("pci_hp_register failed with error %d\n", retval);
goto error_hpslot;
- }
+ }
- info("Slot [%s] registered\n", slot_name(slot));
+ pr_info("Slot [%s] registered\n", slot_name(slot));
return 0;
error_hpslot:
@@ -364,36 +340,17 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
struct slot *slot = acpiphp_slot->slot;
int retval = 0;
- info("Slot [%s] unregistered\n", slot_name(slot));
+ pr_info("Slot [%s] unregistered\n", slot_name(slot));
retval = pci_hp_deregister(slot->hotplug_slot);
if (retval)
- err("pci_hp_deregister failed with error %d\n", retval);
-}
-
-
-static int __init acpiphp_init(void)
-{
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
-
- if (acpi_pci_disabled)
- return 0;
-
- acpiphp_debug = debug;
-
- /* read all the ACPI info from the system */
- return init_acpi();
+ pr_err("pci_hp_deregister failed with error %d\n", retval);
}
-static void __exit acpiphp_exit(void)
+void __init acpiphp_init(void)
{
- if (acpi_pci_disabled)
- return;
-
- /* deallocate internal data structures etc. */
- acpiphp_glue_exit();
+ pr_info(DRIVER_DESC " version: " DRIVER_VERSION "%s\n",
+ acpiphp_disabled ? ", disabled by user; please report a bug"
+ : "");
}
-
-module_init(acpiphp_init);
-module_exit(acpiphp_exit);
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index cb23aa2ebf9..602d153c705 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -39,636 +39,385 @@
* bus. It loses the refcount when the the driver unloads.
*/
-#include <linux/init.h>
+#define pr_fmt(fmt) "acpiphp_glue: " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/pci-acpi.h>
+#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
#include "../pci.h"
#include "acpiphp.h"
static LIST_HEAD(bridge_list);
+static DEFINE_MUTEX(bridge_mutex);
-#define MY_NAME "acpiphp_glue"
-
-static void handle_hotplug_event_bridge (acpi_handle, u32, void *);
+static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type);
+static void acpiphp_post_dock_fixup(struct acpi_device *adev);
static void acpiphp_sanitize_bus(struct pci_bus *bus);
static void acpiphp_set_hpp_values(struct pci_bus *bus);
-static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context);
+static void hotplug_event(u32 type, struct acpiphp_context *context);
+static void free_bridge(struct kref *kref);
-/* callback routine to check for the existence of a pci dock device */
-static acpi_status
-is_pci_dock_device(acpi_handle handle, u32 lvl, void *context, void **rv)
+/**
+ * acpiphp_init_context - Create hotplug context and grab a reference to it.
+ * @adev: ACPI device object to create the context for.
+ *
+ * Call under acpi_hp_context_lock.
+ */
+static struct acpiphp_context *acpiphp_init_context(struct acpi_device *adev)
{
- int *count = (int *)context;
+ struct acpiphp_context *context;
- if (is_dock_device(handle)) {
- (*count)++;
- return AE_CTRL_TERMINATE;
- } else {
- return AE_OK;
- }
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return NULL;
+
+ context->refcount = 1;
+ acpi_set_hp_context(adev, &context->hp, acpiphp_hotplug_notify, NULL,
+ acpiphp_post_dock_fixup);
+ return context;
}
-/*
- * the _DCK method can do funny things... and sometimes not
- * hah-hah funny.
+/**
+ * acpiphp_get_context - Get hotplug context and grab a reference to it.
+ * @adev: ACPI device object to get the context for.
*
- * TBD - figure out a way to only call fixups for
- * systems that require them.
+ * Call under acpi_hp_context_lock.
*/
-static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
- void *v)
+static struct acpiphp_context *acpiphp_get_context(struct acpi_device *adev)
{
- struct acpiphp_func *func = container_of(nb, struct acpiphp_func, nb);
- struct pci_bus *bus = func->slot->bridge->pci_bus;
- u32 buses;
-
- if (!bus->self)
- return NOTIFY_OK;
+ struct acpiphp_context *context;
- /* fixup bad _DCK function that rewrites
- * secondary bridge on slot
- */
- pci_read_config_dword(bus->self,
- PCI_PRIMARY_BUS,
- &buses);
+ if (!adev->hp)
+ return NULL;
- if (((buses >> 8) & 0xff) != bus->secondary) {
- buses = (buses & 0xff000000)
- | ((unsigned int)(bus->primary) << 0)
- | ((unsigned int)(bus->secondary) << 8)
- | ((unsigned int)(bus->subordinate) << 16);
- pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses);
- }
- return NOTIFY_OK;
+ context = to_acpiphp_context(adev->hp);
+ context->refcount++;
+ return context;
}
-
-static struct acpi_dock_ops acpiphp_dock_ops = {
- .handler = handle_hotplug_event_func,
-};
-
-/* callback routine to register each ACPI PCI slot object */
-static acpi_status
-register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
+/**
+ * acpiphp_put_context - Drop a reference to ACPI hotplug context.
+ * @context: ACPI hotplug context to drop a reference to.
+ *
+ * The context object is removed if there are no more references to it.
+ *
+ * Call under acpi_hp_context_lock.
+ */
+static void acpiphp_put_context(struct acpiphp_context *context)
{
- struct acpiphp_bridge *bridge = (struct acpiphp_bridge *)context;
- struct acpiphp_slot *slot;
- struct acpiphp_func *newfunc;
- acpi_handle tmp;
- acpi_status status = AE_OK;
- unsigned long long adr, sun;
- int device, function, retval;
- struct pci_bus *pbus = bridge->pci_bus;
- struct pci_dev *pdev;
-
- if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
- return AE_OK;
-
- acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
- device = (adr >> 16) & 0xffff;
- function = adr & 0xffff;
-
- newfunc = kzalloc(sizeof(struct acpiphp_func), GFP_KERNEL);
- if (!newfunc)
- return AE_NO_MEMORY;
-
- INIT_LIST_HEAD(&newfunc->sibling);
- newfunc->handle = handle;
- newfunc->function = function;
-
- if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp)))
- newfunc->flags = FUNC_HAS_EJ0;
-
- if (ACPI_SUCCESS(acpi_get_handle(handle, "_STA", &tmp)))
- newfunc->flags |= FUNC_HAS_STA;
-
- if (ACPI_SUCCESS(acpi_get_handle(handle, "_PS0", &tmp)))
- newfunc->flags |= FUNC_HAS_PS0;
-
- if (ACPI_SUCCESS(acpi_get_handle(handle, "_PS3", &tmp)))
- newfunc->flags |= FUNC_HAS_PS3;
-
- if (ACPI_SUCCESS(acpi_get_handle(handle, "_DCK", &tmp)))
- newfunc->flags |= FUNC_HAS_DCK;
-
- status = acpi_evaluate_integer(handle, "_SUN", NULL, &sun);
- if (ACPI_FAILURE(status)) {
- /*
- * use the count of the number of slots we've found
- * for the number of the slot
- */
- sun = bridge->nr_slots+1;
- }
-
- /* search for objects that share the same slot */
- for (slot = bridge->slots; slot; slot = slot->next)
- if (slot->device == device) {
- if (slot->sun != sun)
- warn("sibling found, but _SUN doesn't match!\n");
- break;
- }
-
- if (!slot) {
- slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL);
- if (!slot) {
- kfree(newfunc);
- return AE_NO_MEMORY;
- }
-
- slot->bridge = bridge;
- slot->device = device;
- slot->sun = sun;
- INIT_LIST_HEAD(&slot->funcs);
- mutex_init(&slot->crit_sect);
-
- slot->next = bridge->slots;
- bridge->slots = slot;
-
- bridge->nr_slots++;
-
- dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
- slot->sun, pci_domain_nr(pbus), pbus->number, device);
- retval = acpiphp_register_hotplug_slot(slot);
- if (retval) {
- if (retval == -EBUSY)
- warn("Slot %llu already registered by another "
- "hotplug driver\n", slot->sun);
- else
- warn("acpiphp_register_hotplug_slot failed "
- "(err code = 0x%x)\n", retval);
- goto err_exit;
- }
- }
-
- newfunc->slot = slot;
- list_add_tail(&newfunc->sibling, &slot->funcs);
-
- pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
- if (pdev) {
- slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
- pci_dev_put(pdev);
- }
-
- if (is_dock_device(handle)) {
- /* we don't want to call this device's _EJ0
- * because we want the dock notify handler
- * to call it after it calls _DCK
- */
- newfunc->flags &= ~FUNC_HAS_EJ0;
- if (register_hotplug_dock_device(handle,
- &acpiphp_dock_ops, newfunc))
- dbg("failed to register dock device\n");
-
- /* we need to be notified when dock events happen
- * outside of the hotplug operation, since we may
- * need to do fixups before we can hotplug.
- */
- newfunc->nb.notifier_call = post_dock_fixups;
- if (register_dock_notifier(&newfunc->nb))
- dbg("failed to register a dock notifier");
- }
-
- /* install notify handler */
- if (!(newfunc->flags & FUNC_HAS_DCK)) {
- status = acpi_install_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_func,
- newfunc);
-
- if (ACPI_FAILURE(status))
- err("failed to register interrupt notify handler\n");
- } else
- status = AE_OK;
-
- return status;
-
- err_exit:
- bridge->nr_slots--;
- bridge->slots = slot->next;
- kfree(slot);
- kfree(newfunc);
+ if (--context->refcount)
+ return;
- return AE_OK;
+ WARN_ON(context->bridge);
+ context->hp.self->hp = NULL;
+ kfree(context);
}
-
-/* see if it's worth looking at this bridge */
-static int detect_ejectable_slots(acpi_handle handle)
+static inline void get_bridge(struct acpiphp_bridge *bridge)
{
- int found = acpi_pci_detect_ejectable(handle);
- if (!found) {
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- is_pci_dock_device, NULL, (void *)&found, NULL);
- }
- return found;
+ kref_get(&bridge->ref);
}
-/* initialize miscellaneous stuff for both root and PCI-to-PCI bridge */
-static void init_bridge_misc(struct acpiphp_bridge *bridge)
+static inline void put_bridge(struct acpiphp_bridge *bridge)
{
- acpi_status status;
-
- /* must be added to the list prior to calling register_slot */
- list_add(&bridge->list, &bridge_list);
-
- /* register all slot objects under this bridge */
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge->handle, (u32)1,
- register_slot, NULL, bridge, NULL);
- if (ACPI_FAILURE(status)) {
- list_del(&bridge->list);
- return;
- }
-
- /* install notify handler */
- if (bridge->type != BRIDGE_TYPE_HOST) {
- if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) {
- status = acpi_remove_notify_handler(bridge->func->handle,
- ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_func);
- if (ACPI_FAILURE(status))
- err("failed to remove notify handler\n");
- }
- status = acpi_install_notify_handler(bridge->handle,
- ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_bridge,
- bridge);
-
- if (ACPI_FAILURE(status)) {
- err("failed to register interrupt notify handler\n");
- }
- }
+ kref_put(&bridge->ref, free_bridge);
}
-
-/* find acpiphp_func from acpiphp_bridge */
-static struct acpiphp_func *acpiphp_bridge_handle_to_function(acpi_handle handle)
+static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev)
{
- struct acpiphp_bridge *bridge;
- struct acpiphp_slot *slot;
- struct acpiphp_func *func;
+ struct acpiphp_context *context;
- list_for_each_entry(bridge, &bridge_list, list) {
- for (slot = bridge->slots; slot; slot = slot->next) {
- list_for_each_entry(func, &slot->funcs, sibling) {
- if (func->handle == handle)
- return func;
- }
- }
+ acpi_lock_hp_context();
+ context = acpiphp_get_context(adev);
+ if (!context || context->func.parent->is_going_away) {
+ acpi_unlock_hp_context();
+ return NULL;
}
-
- return NULL;
+ get_bridge(context->func.parent);
+ acpiphp_put_context(context);
+ acpi_unlock_hp_context();
+ return context;
}
-
-static inline void config_p2p_bridge_flags(struct acpiphp_bridge *bridge)
+static void acpiphp_let_context_go(struct acpiphp_context *context)
{
- acpi_handle dummy_handle;
-
- if (ACPI_SUCCESS(acpi_get_handle(bridge->handle,
- "_STA", &dummy_handle)))
- bridge->flags |= BRIDGE_HAS_STA;
-
- if (ACPI_SUCCESS(acpi_get_handle(bridge->handle,
- "_EJ0", &dummy_handle)))
- bridge->flags |= BRIDGE_HAS_EJ0;
-
- if (ACPI_SUCCESS(acpi_get_handle(bridge->handle,
- "_PS0", &dummy_handle)))
- bridge->flags |= BRIDGE_HAS_PS0;
-
- if (ACPI_SUCCESS(acpi_get_handle(bridge->handle,
- "_PS3", &dummy_handle)))
- bridge->flags |= BRIDGE_HAS_PS3;
-
- /* is this ejectable p2p bridge? */
- if (bridge->flags & BRIDGE_HAS_EJ0) {
- struct acpiphp_func *func;
-
- dbg("found ejectable p2p bridge\n");
-
- /* make link between PCI bridge and PCI function */
- func = acpiphp_bridge_handle_to_function(bridge->handle);
- if (!func)
- return;
- bridge->func = func;
- func->bridge = bridge;
- }
+ put_bridge(context->func.parent);
}
-
-/* allocate and initialize host bridge data structure */
-static void add_host_bridge(acpi_handle *handle)
+static void free_bridge(struct kref *kref)
{
+ struct acpiphp_context *context;
struct acpiphp_bridge *bridge;
- struct acpi_pci_root *root = acpi_pci_find_root(handle);
-
- bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
- if (bridge == NULL)
- return;
-
- bridge->type = BRIDGE_TYPE_HOST;
- bridge->handle = handle;
-
- bridge->pci_bus = root->bus;
-
- spin_lock_init(&bridge->res_lock);
+ struct acpiphp_slot *slot, *next;
+ struct acpiphp_func *func, *tmp;
- init_bridge_misc(bridge);
-}
+ acpi_lock_hp_context();
+ bridge = container_of(kref, struct acpiphp_bridge, ref);
-/* allocate and initialize PCI-to-PCI bridge data structure */
-static void add_p2p_bridge(acpi_handle *handle)
-{
- struct acpiphp_bridge *bridge;
+ list_for_each_entry_safe(slot, next, &bridge->slots, node) {
+ list_for_each_entry_safe(func, tmp, &slot->funcs, sibling)
+ acpiphp_put_context(func_to_context(func));
- bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
- if (bridge == NULL) {
- err("out of memory\n");
- return;
+ kfree(slot);
}
- bridge->type = BRIDGE_TYPE_P2P;
- bridge->handle = handle;
- config_p2p_bridge_flags(bridge);
-
- bridge->pci_dev = acpi_get_pci_dev(handle);
- bridge->pci_bus = bridge->pci_dev->subordinate;
- if (!bridge->pci_bus) {
- err("This is not a PCI-to-PCI bridge!\n");
- goto err;
+ context = bridge->context;
+ /* Root bridges will not have hotplug context. */
+ if (context) {
+ /* Release the reference taken by acpiphp_enumerate_slots(). */
+ put_bridge(context->func.parent);
+ context->bridge = NULL;
+ acpiphp_put_context(context);
}
- /*
- * Grab a ref to the subordinate PCI bus in case the bus is
- * removed via PCI core logical hotplug. The ref pins the bus
- * (which we access during module unload).
- */
- get_device(&bridge->pci_bus->dev);
- spin_lock_init(&bridge->res_lock);
-
- init_bridge_misc(bridge);
- return;
- err:
+ put_device(&bridge->pci_bus->dev);
pci_dev_put(bridge->pci_dev);
kfree(bridge);
- return;
-}
+ acpi_unlock_hp_context();
+}
-/* callback routine to find P2P bridges */
-static acpi_status
-find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
+/**
+ * acpiphp_post_dock_fixup - Post-dock fixups for PCI devices.
+ * @adev: ACPI device object corresponding to a PCI device.
+ *
+ * TBD - figure out a way to only call fixups for systems that require them.
+ */
+static void acpiphp_post_dock_fixup(struct acpi_device *adev)
{
- acpi_status status;
- struct pci_dev *dev;
+ struct acpiphp_context *context = acpiphp_grab_context(adev);
+ struct pci_bus *bus;
+ u32 buses;
- dev = acpi_get_pci_dev(handle);
- if (!dev || !dev->subordinate)
+ if (!context)
+ return;
+
+ bus = context->func.slot->bus;
+ if (!bus->self)
goto out;
- /* check if this bridge has ejectable slots */
- if ((detect_ejectable_slots(handle) > 0)) {
- dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev));
- add_p2p_bridge(handle);
- }
+ /* fixup bad _DCK function that rewrites
+ * secondary bridge on slot
+ */
+ pci_read_config_dword(bus->self, PCI_PRIMARY_BUS, &buses);
- /* search P2P bridges under this p2p bridge */
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- find_p2p_bridge, NULL, NULL, NULL);
- if (ACPI_FAILURE(status))
- warn("find_p2p_bridge failed (error code = 0x%x)\n", status);
+ if (((buses >> 8) & 0xff) != bus->busn_res.start) {
+ buses = (buses & 0xff000000)
+ | ((unsigned int)(bus->primary) << 0)
+ | ((unsigned int)(bus->busn_res.start) << 8)
+ | ((unsigned int)(bus->busn_res.end) << 16);
+ pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses);
+ }
out:
- pci_dev_put(dev);
- return AE_OK;
+ acpiphp_let_context_go(context);
}
-
-/* find hot-pluggable slots, and then find P2P bridge */
-static int add_bridge(acpi_handle handle)
+/* Check whether the PCI device is managed by native PCIe hotplug driver */
+static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
{
- acpi_status status;
- unsigned long long tmp;
- acpi_handle dummy_handle;
-
- /* if the bridge doesn't have _STA, we assume it is always there */
- status = acpi_get_handle(handle, "_STA", &dummy_handle);
- if (ACPI_SUCCESS(status)) {
- status = acpi_evaluate_integer(handle, "_STA", NULL, &tmp);
- if (ACPI_FAILURE(status)) {
- dbg("%s: _STA evaluation failure\n", __func__);
- return 0;
- }
- if ((tmp & ACPI_STA_FUNCTIONING) == 0)
- /* don't register this object */
- return 0;
- }
-
- /* check if this bridge has ejectable slots */
- if (detect_ejectable_slots(handle) > 0) {
- dbg("found PCI host-bus bridge with hot-pluggable slots\n");
- add_host_bridge(handle);
- }
-
- /* search P2P bridges under this host bridge */
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- find_p2p_bridge, NULL, NULL, NULL);
+ u32 reg32;
+ acpi_handle tmp;
+ struct acpi_pci_root *root;
- if (ACPI_FAILURE(status))
- warn("find_p2p_bridge failed (error code = 0x%x)\n", status);
+ /* Check whether the PCIe port supports native PCIe hotplug */
+ if (pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32))
+ return false;
+ if (!(reg32 & PCI_EXP_SLTCAP_HPC))
+ return false;
- return 0;
+ /*
+ * Check whether native PCIe hotplug has been enabled for
+ * this PCIe hierarchy.
+ */
+ tmp = acpi_find_root_bridge_handle(pdev);
+ if (!tmp)
+ return false;
+ root = acpi_pci_find_root(tmp);
+ if (!root)
+ return false;
+ if (!(root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
+ return false;
+
+ return true;
}
-static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle)
+/**
+ * acpiphp_add_context - Add ACPIPHP context to an ACPI device object.
+ * @handle: ACPI handle of the object to add a context to.
+ * @lvl: Not used.
+ * @data: The object's parent ACPIPHP bridge.
+ * @rv: Not used.
+ */
+static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
+ void **rv)
{
- struct acpiphp_bridge *bridge;
-
- list_for_each_entry(bridge, &bridge_list, list)
- if (bridge->handle == handle)
- return bridge;
-
- return NULL;
-}
+ struct acpiphp_bridge *bridge = data;
+ struct acpiphp_context *context;
+ struct acpi_device *adev;
+ struct acpiphp_slot *slot;
+ struct acpiphp_func *newfunc;
+ acpi_status status = AE_OK;
+ unsigned long long adr;
+ int device, function;
+ struct pci_bus *pbus = bridge->pci_bus;
+ struct pci_dev *pdev = bridge->pci_dev;
+ u32 val;
-static void cleanup_bridge(struct acpiphp_bridge *bridge)
-{
- struct acpiphp_slot *slot, *next;
- struct acpiphp_func *func, *tmp;
- acpi_status status;
- acpi_handle handle = bridge->handle;
-
- status = acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_bridge);
- if (ACPI_FAILURE(status))
- err("failed to remove notify handler\n");
-
- if ((bridge->type != BRIDGE_TYPE_HOST) &&
- ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func)) {
- status = acpi_install_notify_handler(bridge->func->handle,
- ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_func,
- bridge->func);
- if (ACPI_FAILURE(status))
- err("failed to install interrupt notify handler\n");
+ status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND)
+ acpi_handle_warn(handle,
+ "can't evaluate _ADR (%#x)\n", status);
+ return AE_OK;
}
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
- slot = bridge->slots;
- while (slot) {
- next = slot->next;
- list_for_each_entry_safe(func, tmp, &slot->funcs, sibling) {
- if (is_dock_device(func->handle)) {
- unregister_hotplug_dock_device(func->handle);
- unregister_dock_notifier(&func->nb);
- }
- if (!(func->flags & FUNC_HAS_DCK)) {
- status = acpi_remove_notify_handler(func->handle,
- ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_func);
- if (ACPI_FAILURE(status))
- err("failed to remove notify handler\n");
- }
- list_del(&func->sibling);
- kfree(func);
- }
- acpiphp_unregister_hotplug_slot(slot);
- list_del(&slot->funcs);
- kfree(slot);
- slot = next;
+ device = (adr >> 16) & 0xffff;
+ function = adr & 0xffff;
+
+ acpi_lock_hp_context();
+ context = acpiphp_init_context(adev);
+ if (!context) {
+ acpi_unlock_hp_context();
+ acpi_handle_err(handle, "No hotplug context\n");
+ return AE_NOT_EXIST;
}
+ newfunc = &context->func;
+ newfunc->function = function;
+ newfunc->parent = bridge;
+ acpi_unlock_hp_context();
/*
- * Only P2P bridges have a pci_dev
+ * If this is a dock device, its _EJ0 should be executed by the dock
+ * notify handler after calling _DCK.
*/
- if (bridge->pci_dev)
- put_device(&bridge->pci_bus->dev);
-
- pci_dev_put(bridge->pci_dev);
- list_del(&bridge->list);
- kfree(bridge);
-}
-
-static acpi_status
-cleanup_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- struct acpiphp_bridge *bridge;
+ if (!is_dock_device(adev) && acpi_has_method(handle, "_EJ0"))
+ newfunc->flags = FUNC_HAS_EJ0;
- /* cleanup p2p bridges under this P2P bridge
- in a depth-first manner */
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- cleanup_p2p_bridge, NULL, NULL, NULL);
+ if (acpi_has_method(handle, "_STA"))
+ newfunc->flags |= FUNC_HAS_STA;
- bridge = acpiphp_handle_to_bridge(handle);
- if (bridge)
- cleanup_bridge(bridge);
+ /* search for objects that share the same slot */
+ list_for_each_entry(slot, &bridge->slots, node)
+ if (slot->device == device)
+ goto slot_found;
- return AE_OK;
-}
+ slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL);
+ if (!slot) {
+ acpi_lock_hp_context();
+ acpiphp_put_context(context);
+ acpi_unlock_hp_context();
+ return AE_NO_MEMORY;
+ }
-static void remove_bridge(acpi_handle handle)
-{
- struct acpiphp_bridge *bridge;
+ slot->bus = bridge->pci_bus;
+ slot->device = device;
+ INIT_LIST_HEAD(&slot->funcs);
- /* cleanup p2p bridges under this host bridge
- in a depth-first manner */
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
- (u32)1, cleanup_p2p_bridge, NULL, NULL, NULL);
+ list_add_tail(&slot->node, &bridge->slots);
/*
- * On root bridges with hotplug slots directly underneath (ie,
- * no p2p bridge inbetween), we call cleanup_bridge().
- *
- * The else clause cleans up root bridges that either had no
- * hotplug slots at all, or had a p2p bridge underneath.
+ * Expose slots to user space for functions that have _EJ0 or _RMV or
+ * are located in dock stations. Do not expose them for devices handled
+ * by the native PCIe hotplug (PCIeHP), becuase that code is supposed to
+ * expose slots to user space in those cases.
*/
- bridge = acpiphp_handle_to_bridge(handle);
- if (bridge)
- cleanup_bridge(bridge);
- else
- acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_bridge);
-}
+ if ((acpi_pci_check_ejectable(pbus, handle) || is_dock_device(adev))
+ && !(pdev && device_is_managed_by_native_pciehp(pdev))) {
+ unsigned long long sun;
+ int retval;
-static int power_on_slot(struct acpiphp_slot *slot)
-{
- acpi_status status;
- struct acpiphp_func *func;
- int retval = 0;
+ bridge->nr_slots++;
+ status = acpi_evaluate_integer(handle, "_SUN", NULL, &sun);
+ if (ACPI_FAILURE(status))
+ sun = bridge->nr_slots;
- /* if already enabled, just skip */
- if (slot->flags & SLOT_POWEREDON)
- goto err_exit;
+ pr_debug("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
+ sun, pci_domain_nr(pbus), pbus->number, device);
- list_for_each_entry(func, &slot->funcs, sibling) {
- if (func->flags & FUNC_HAS_PS0) {
- dbg("%s: executing _PS0\n", __func__);
- status = acpi_evaluate_object(func->handle, "_PS0", NULL, NULL);
- if (ACPI_FAILURE(status)) {
- warn("%s: _PS0 failed\n", __func__);
- retval = -1;
- goto err_exit;
- } else
- break;
+ retval = acpiphp_register_hotplug_slot(slot, sun);
+ if (retval) {
+ slot->slot = NULL;
+ bridge->nr_slots--;
+ if (retval == -EBUSY)
+ pr_warn("Slot %llu already registered by another hotplug driver\n", sun);
+ else
+ pr_warn("acpiphp_register_hotplug_slot failed (err code = 0x%x)\n", retval);
}
+ /* Even if the slot registration fails, we can still use it. */
}
- /* TBD: evaluate _STA to check if the slot is enabled */
+ slot_found:
+ newfunc->slot = slot;
+ list_add_tail(&newfunc->sibling, &slot->funcs);
- slot->flags |= SLOT_POWEREDON;
+ if (pci_bus_read_dev_vendor_id(pbus, PCI_DEVFN(device, function),
+ &val, 60*1000))
+ slot->flags |= SLOT_ENABLED;
- err_exit:
- return retval;
+ return AE_OK;
}
+static struct acpiphp_bridge *acpiphp_dev_to_bridge(struct acpi_device *adev)
+{
+ struct acpiphp_bridge *bridge = NULL;
+
+ acpi_lock_hp_context();
+ if (adev->hp) {
+ bridge = to_acpiphp_root_context(adev->hp)->root_bridge;
+ if (bridge)
+ get_bridge(bridge);
+ }
+ acpi_unlock_hp_context();
+ return bridge;
+}
-static int power_off_slot(struct acpiphp_slot *slot)
+static void cleanup_bridge(struct acpiphp_bridge *bridge)
{
- acpi_status status;
+ struct acpiphp_slot *slot;
struct acpiphp_func *func;
- int retval = 0;
+ list_for_each_entry(slot, &bridge->slots, node) {
+ list_for_each_entry(func, &slot->funcs, sibling) {
+ struct acpi_device *adev = func_to_acpi_device(func);
- /* if already disabled, just skip */
- if ((slot->flags & SLOT_POWEREDON) == 0)
- goto err_exit;
-
- list_for_each_entry(func, &slot->funcs, sibling) {
- if (func->flags & FUNC_HAS_PS3) {
- status = acpi_evaluate_object(func->handle, "_PS3", NULL, NULL);
- if (ACPI_FAILURE(status)) {
- warn("%s: _PS3 failed\n", __func__);
- retval = -1;
- goto err_exit;
- } else
- break;
+ acpi_lock_hp_context();
+ adev->hp->notify = NULL;
+ adev->hp->fixup = NULL;
+ acpi_unlock_hp_context();
}
+ slot->flags |= SLOT_IS_GOING_AWAY;
+ if (slot->slot)
+ acpiphp_unregister_hotplug_slot(slot);
}
- /* TBD: evaluate _STA to check if the slot is disabled */
-
- slot->flags &= (~SLOT_POWEREDON);
+ mutex_lock(&bridge_mutex);
+ list_del(&bridge->list);
+ mutex_unlock(&bridge_mutex);
- err_exit:
- return retval;
+ acpi_lock_hp_context();
+ bridge->is_going_away = true;
+ acpi_unlock_hp_context();
}
-
-
/**
* acpiphp_max_busnr - return the highest reserved bus number under the given bus.
* @bus: bus to start search with
*/
static unsigned char acpiphp_max_busnr(struct pci_bus *bus)
{
- struct list_head *tmp;
+ struct pci_bus *tmp;
unsigned char max, n;
/*
@@ -679,77 +428,16 @@ static unsigned char acpiphp_max_busnr(struct pci_bus *bus)
* bus->subordinate value because it could have
* padding in it.
*/
- max = bus->secondary;
+ max = bus->busn_res.start;
- list_for_each(tmp, &bus->children) {
- n = pci_bus_max_busnr(pci_bus_b(tmp));
+ list_for_each_entry(tmp, &bus->children, node) {
+ n = pci_bus_max_busnr(tmp);
if (n > max)
max = n;
}
return max;
}
-
-/**
- * acpiphp_bus_add - add a new bus to acpi subsystem
- * @func: acpiphp_func of the bridge
- */
-static int acpiphp_bus_add(struct acpiphp_func *func)
-{
- acpi_handle phandle;
- struct acpi_device *device, *pdevice;
- int ret_val;
-
- acpi_get_parent(func->handle, &phandle);
- if (acpi_bus_get_device(phandle, &pdevice)) {
- dbg("no parent device, assuming NULL\n");
- pdevice = NULL;
- }
- if (!acpi_bus_get_device(func->handle, &device)) {
- dbg("bus exists... trim\n");
- /* this shouldn't be in here, so remove
- * the bus then re-add it...
- */
- ret_val = acpi_bus_trim(device, 1);
- dbg("acpi_bus_trim return %x\n", ret_val);
- }
-
- ret_val = acpi_bus_add(&device, pdevice, func->handle,
- ACPI_BUS_TYPE_DEVICE);
- if (ret_val) {
- dbg("error adding bus, %x\n",
- -ret_val);
- goto acpiphp_bus_add_out;
- }
- ret_val = acpi_bus_start(device);
-
-acpiphp_bus_add_out:
- return ret_val;
-}
-
-
-/**
- * acpiphp_bus_trim - trim a bus from acpi subsystem
- * @handle: handle to acpi namespace
- */
-static int acpiphp_bus_trim(acpi_handle handle)
-{
- struct acpi_device *device;
- int retval;
-
- retval = acpi_bus_get_device(handle, &device);
- if (retval) {
- dbg("acpi_device not found\n");
- return retval;
- }
-
- retval = acpi_bus_trim(device, 1);
- if (retval)
- err("cannot remove from acpi list\n");
-
- return retval;
-}
-
static void acpiphp_set_acpi_region(struct acpiphp_slot *slot)
{
struct acpiphp_func *func;
@@ -764,150 +452,142 @@ static void acpiphp_set_acpi_region(struct acpiphp_slot *slot)
params[1].type = ACPI_TYPE_INTEGER;
params[1].integer.value = 1;
/* _REG is optional, we don't care about if there is failure */
- acpi_evaluate_object(func->handle, "_REG", &arg_list, NULL);
+ acpi_evaluate_object(func_to_handle(func), "_REG", &arg_list,
+ NULL);
}
}
+static void check_hotplug_bridge(struct acpiphp_slot *slot, struct pci_dev *dev)
+{
+ struct acpiphp_func *func;
+
+ /* quirk, or pcie could set it already */
+ if (dev->is_hotplug_bridge)
+ return;
+
+ list_for_each_entry(func, &slot->funcs, sibling) {
+ if (PCI_FUNC(dev->devfn) == func->function) {
+ dev->is_hotplug_bridge = 1;
+ break;
+ }
+ }
+}
+
+static int acpiphp_rescan_slot(struct acpiphp_slot *slot)
+{
+ struct acpiphp_func *func;
+
+ list_for_each_entry(func, &slot->funcs, sibling) {
+ struct acpi_device *adev = func_to_acpi_device(func);
+
+ acpi_bus_scan(adev->handle);
+ if (acpi_device_enumerated(adev))
+ acpi_device_set_power(adev, ACPI_STATE_D0);
+ }
+ return pci_scan_slot(slot->bus, PCI_DEVFN(slot->device, 0));
+}
+
/**
- * enable_device - enable, configure a slot
+ * enable_slot - enable, configure a slot
* @slot: slot to be enabled
*
* This function should be called per *physical slot*,
* not per each slot object in ACPI namespace.
*/
-static int __ref enable_device(struct acpiphp_slot *slot)
+static void enable_slot(struct acpiphp_slot *slot)
{
struct pci_dev *dev;
- struct pci_bus *bus = slot->bridge->pci_bus;
+ struct pci_bus *bus = slot->bus;
struct acpiphp_func *func;
- int retval = 0;
- int num, max, pass;
- acpi_status status;
-
- if (slot->flags & SLOT_ENABLED)
- goto err_exit;
-
- /* sanity check: dev should be NULL when hot-plugged in */
- dev = pci_get_slot(bus, PCI_DEVFN(slot->device, 0));
- if (dev) {
- /* This case shouldn't happen */
- err("pci_dev structure already exists.\n");
- pci_dev_put(dev);
- retval = -1;
- goto err_exit;
- }
-
- num = pci_scan_slot(bus, PCI_DEVFN(slot->device, 0));
- if (num == 0) {
- err("No new device found\n");
- retval = -1;
- goto err_exit;
- }
+ int max, pass;
+ LIST_HEAD(add_list);
+ acpiphp_rescan_slot(slot);
max = acpiphp_max_busnr(bus);
for (pass = 0; pass < 2; pass++) {
list_for_each_entry(dev, &bus->devices, bus_list) {
if (PCI_SLOT(dev->devfn) != slot->device)
continue;
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
- dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
+
+ if (pci_is_bridge(dev)) {
max = pci_scan_bridge(bus, dev, max, pass);
- if (pass && dev->subordinate)
- pci_bus_size_bridges(dev->subordinate);
+ if (pass && dev->subordinate) {
+ check_hotplug_bridge(slot, dev);
+ pcibios_resource_survey_bus(dev->subordinate);
+ __pci_bus_size_bridges(dev->subordinate,
+ &add_list);
+ }
}
}
}
+ __pci_bus_assign_resources(bus, &add_list, NULL);
- list_for_each_entry(func, &slot->funcs, sibling)
- acpiphp_bus_add(func);
-
- pci_bus_assign_resources(bus);
acpiphp_sanitize_bus(bus);
acpiphp_set_hpp_values(bus);
acpiphp_set_acpi_region(slot);
- pci_enable_bridges(bus);
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ /* Assume that newly added devices are powered on already. */
+ if (!dev->is_added)
+ dev->current_state = PCI_D0;
+ }
+
pci_bus_add_devices(bus);
+ slot->flags |= SLOT_ENABLED;
list_for_each_entry(func, &slot->funcs, sibling) {
dev = pci_get_slot(bus, PCI_DEVFN(slot->device,
func->function));
- if (!dev)
- continue;
-
- if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE &&
- dev->hdr_type != PCI_HEADER_TYPE_CARDBUS) {
- pci_dev_put(dev);
+ if (!dev) {
+ /* Do not set SLOT_ENABLED flag if some funcs
+ are not added. */
+ slot->flags &= (~SLOT_ENABLED);
continue;
}
-
- status = find_p2p_bridge(func->handle, (u32)1, bus, NULL);
- if (ACPI_FAILURE(status))
- warn("find_p2p_bridge failed (error code = 0x%x)\n",
- status);
- pci_dev_put(dev);
- }
-
- slot->flags |= SLOT_ENABLED;
-
- err_exit:
- return retval;
-}
-
-static void disable_bridges(struct pci_bus *bus)
-{
- struct pci_dev *dev;
- list_for_each_entry(dev, &bus->devices, bus_list) {
- if (dev->subordinate) {
- disable_bridges(dev->subordinate);
- pci_disable_device(dev);
- }
}
}
/**
- * disable_device - disable a slot
+ * disable_slot - disable a slot
* @slot: ACPI PHP slot
*/
-static int disable_device(struct acpiphp_slot *slot)
+static void disable_slot(struct acpiphp_slot *slot)
{
+ struct pci_bus *bus = slot->bus;
+ struct pci_dev *dev, *prev;
struct acpiphp_func *func;
- struct pci_dev *pdev;
-
- /* is this slot already disabled? */
- if (!(slot->flags & SLOT_ENABLED))
- goto err_exit;
-
- list_for_each_entry(func, &slot->funcs, sibling) {
- if (func->bridge) {
- /* cleanup p2p bridges under this P2P bridge */
- cleanup_p2p_bridge(func->bridge->handle,
- (u32)1, NULL, NULL);
- func->bridge = NULL;
- }
- pdev = pci_get_slot(slot->bridge->pci_bus,
- PCI_DEVFN(slot->device, func->function));
- if (pdev) {
- pci_stop_bus_device(pdev);
- if (pdev->subordinate) {
- disable_bridges(pdev->subordinate);
- pci_disable_device(pdev);
- }
- pci_remove_bus_device(pdev);
- pci_dev_put(pdev);
- }
- }
+ /*
+ * enable_slot() enumerates all functions in this device via
+ * pci_scan_slot(), whether they have associated ACPI hotplug
+ * methods (_EJ0, etc.) or not. Therefore, we remove all functions
+ * here.
+ */
+ list_for_each_entry_safe_reverse(dev, prev, &bus->devices, bus_list)
+ if (PCI_SLOT(dev->devfn) == slot->device)
+ pci_stop_and_remove_bus_device(dev);
- list_for_each_entry(func, &slot->funcs, sibling) {
- acpiphp_bus_trim(func->handle);
- }
+ list_for_each_entry(func, &slot->funcs, sibling)
+ acpi_bus_trim(func_to_acpi_device(func));
slot->flags &= (~SLOT_ENABLED);
+}
-err_exit:
- return 0;
+static bool acpiphp_no_hotplug(struct acpi_device *adev)
+{
+ return adev && adev->flags.no_hotplug;
}
+static bool slot_no_hotplug(struct acpiphp_slot *slot)
+{
+ struct acpiphp_func *func;
+
+ list_for_each_entry(func, &slot->funcs, sibling)
+ if (acpiphp_no_hotplug(func_to_acpi_device(func)))
+ return true;
+
+ return false;
+}
/**
* get_slot_status - get ACPI slot status
@@ -923,18 +603,21 @@ err_exit:
*/
static unsigned int get_slot_status(struct acpiphp_slot *slot)
{
- acpi_status status;
unsigned long long sta = 0;
- u32 dvid;
struct acpiphp_func *func;
list_for_each_entry(func, &slot->funcs, sibling) {
if (func->flags & FUNC_HAS_STA) {
- status = acpi_evaluate_integer(func->handle, "_STA", NULL, &sta);
+ acpi_status status;
+
+ status = acpi_evaluate_integer(func_to_handle(func),
+ "_STA", NULL, &sta);
if (ACPI_SUCCESS(status) && sta)
break;
} else {
- pci_bus_read_config_dword(slot->bridge->pci_bus,
+ u32 dvid;
+
+ pci_bus_read_config_dword(slot->bus,
PCI_DEVFN(slot->device,
func->function),
PCI_VENDOR_ID, &dvid);
@@ -948,35 +631,52 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
return (unsigned int)sta;
}
+static inline bool device_status_valid(unsigned int sta)
+{
+ /*
+ * ACPI spec says that _STA may return bit 0 clear with bit 3 set
+ * if the device is valid but does not require a device driver to be
+ * loaded (Section 6.3.7 of ACPI 5.0A).
+ */
+ unsigned int mask = ACPI_STA_DEVICE_ENABLED | ACPI_STA_DEVICE_FUNCTIONING;
+ return (sta & mask) == mask;
+}
+
/**
- * acpiphp_eject_slot - physically eject the slot
- * @slot: ACPI PHP slot
+ * trim_stale_devices - remove PCI devices that are not responding.
+ * @dev: PCI device to start walking the hierarchy from.
*/
-int acpiphp_eject_slot(struct acpiphp_slot *slot)
+static void trim_stale_devices(struct pci_dev *dev)
{
- acpi_status status;
- struct acpiphp_func *func;
- struct acpi_object_list arg_list;
- union acpi_object arg;
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+ struct pci_bus *bus = dev->subordinate;
+ bool alive = false;
- list_for_each_entry(func, &slot->funcs, sibling) {
- /* We don't want to call _EJ0 on non-existing functions. */
- if ((func->flags & FUNC_HAS_EJ0)) {
- /* _EJ0 method take one argument */
- arg_list.count = 1;
- arg_list.pointer = &arg;
- arg.type = ACPI_TYPE_INTEGER;
- arg.integer.value = 1;
-
- status = acpi_evaluate_object(func->handle, "_EJ0", &arg_list, NULL);
- if (ACPI_FAILURE(status)) {
- warn("%s: _EJ0 failed\n", __func__);
- return -1;
- } else
- break;
- }
+ if (adev) {
+ acpi_status status;
+ unsigned long long sta;
+
+ status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
+ alive = (ACPI_SUCCESS(status) && device_status_valid(sta))
+ || acpiphp_no_hotplug(adev);
+ }
+ if (!alive)
+ alive = pci_device_is_present(dev);
+
+ if (!alive) {
+ pci_stop_and_remove_bus_device(dev);
+ if (adev)
+ acpi_bus_trim(adev);
+ } else if (bus) {
+ struct pci_dev *child, *tmp;
+
+ /* The device is a bridge. so check the bus below it. */
+ pm_runtime_get_sync(&dev->dev);
+ list_for_each_entry_safe_reverse(child, tmp, &bus->devices, bus_list)
+ trim_stale_devices(child);
+
+ pm_runtime_put(&dev->dev);
}
- return 0;
}
/**
@@ -986,43 +686,33 @@ int acpiphp_eject_slot(struct acpiphp_slot *slot)
* Iterate over all slots under this bridge and make sure that if a
* card is present they are enabled, and if not they are disabled.
*/
-static int acpiphp_check_bridge(struct acpiphp_bridge *bridge)
+static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
{
struct acpiphp_slot *slot;
- int retval = 0;
- int enabled, disabled;
- enabled = disabled = 0;
+ /* Bail out if the bridge is going away. */
+ if (bridge->is_going_away)
+ return;
- for (slot = bridge->slots; slot; slot = slot->next) {
- unsigned int status = get_slot_status(slot);
- if (slot->flags & SLOT_ENABLED) {
- if (status == ACPI_STA_ALL)
- continue;
- retval = acpiphp_disable_slot(slot);
- if (retval) {
- err("Error occurred in disabling\n");
- goto err_exit;
- } else {
- acpiphp_eject_slot(slot);
- }
- disabled++;
+ list_for_each_entry(slot, &bridge->slots, node) {
+ struct pci_bus *bus = slot->bus;
+ struct pci_dev *dev, *tmp;
+
+ if (slot_no_hotplug(slot)) {
+ ; /* do nothing */
+ } else if (device_status_valid(get_slot_status(slot))) {
+ /* remove stale devices if any */
+ list_for_each_entry_safe_reverse(dev, tmp,
+ &bus->devices, bus_list)
+ if (PCI_SLOT(dev->devfn) == slot->device)
+ trim_stale_devices(dev);
+
+ /* configure all functions */
+ enable_slot(slot);
} else {
- if (status != ACPI_STA_ALL)
- continue;
- retval = acpiphp_enable_slot(slot);
- if (retval) {
- err("Error occurred in enabling\n");
- goto err_exit;
- }
- enabled++;
+ disable_slot(slot);
}
}
-
- dbg("%s: %d enabled, %d disabled\n", __func__, enabled, disabled);
-
- err_exit:
- return retval;
}
static void acpiphp_set_hpp_values(struct pci_bus *bus)
@@ -1039,349 +729,242 @@ static void acpiphp_set_hpp_values(struct pci_bus *bus)
*/
static void acpiphp_sanitize_bus(struct pci_bus *bus)
{
- struct pci_dev *dev;
+ struct pci_dev *dev, *tmp;
int i;
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM;
- list_for_each_entry(dev, &bus->devices, bus_list) {
+ list_for_each_entry_safe_reverse(dev, tmp, &bus->devices, bus_list) {
for (i=0; i<PCI_BRIDGE_RESOURCES; i++) {
struct resource *res = &dev->resource[i];
if ((res->flags & type_mask) && !res->start &&
res->end) {
/* Could not assign a required resources
* for this device, remove it */
- pci_remove_bus_device(dev);
+ pci_stop_and_remove_bus_device(dev);
break;
}
}
}
}
-/* Program resources in newly inserted bridge */
-static int acpiphp_configure_bridge (acpi_handle handle)
-{
- struct pci_bus *bus;
-
- if (acpi_is_root_bridge(handle)) {
- struct acpi_pci_root *root = acpi_pci_find_root(handle);
- bus = root->bus;
- } else {
- struct pci_dev *pdev = acpi_get_pci_dev(handle);
- bus = pdev->subordinate;
- pci_dev_put(pdev);
- }
-
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
- acpiphp_sanitize_bus(bus);
- acpiphp_set_hpp_values(bus);
- pci_enable_bridges(bus);
- return 0;
-}
-
-static void handle_bridge_insertion(acpi_handle handle, u32 type)
-{
- struct acpi_device *device, *pdevice;
- acpi_handle phandle;
-
- if ((type != ACPI_NOTIFY_BUS_CHECK) &&
- (type != ACPI_NOTIFY_DEVICE_CHECK)) {
- err("unexpected notification type %d\n", type);
- return;
- }
-
- acpi_get_parent(handle, &phandle);
- if (acpi_bus_get_device(phandle, &pdevice)) {
- dbg("no parent device, assuming NULL\n");
- pdevice = NULL;
- }
- if (acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE)) {
- err("cannot add bridge to acpi list\n");
- return;
- }
- if (!acpiphp_configure_bridge(handle) &&
- !acpi_bus_start(device))
- add_bridge(handle);
- else
- err("cannot configure and start bridge\n");
-
-}
-
/*
* ACPI event handlers
*/
-static acpi_status
-count_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
+void acpiphp_check_host_bridge(struct acpi_device *adev)
{
- int *count = (int *)context;
struct acpiphp_bridge *bridge;
- bridge = acpiphp_handle_to_bridge(handle);
- if (bridge)
- (*count)++;
- return AE_OK ;
-}
-
-static acpi_status
-check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- struct acpiphp_bridge *bridge;
- char objname[64];
- struct acpi_buffer buffer = { .length = sizeof(objname),
- .pointer = objname };
-
- bridge = acpiphp_handle_to_bridge(handle);
+ bridge = acpiphp_dev_to_bridge(adev);
if (bridge) {
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
- dbg("%s: re-enumerating slots under %s\n",
- __func__, objname);
+ pci_lock_rescan_remove();
+
acpiphp_check_bridge(bridge);
+
+ pci_unlock_rescan_remove();
+ put_bridge(bridge);
}
- return AE_OK ;
}
-/**
- * handle_hotplug_event_bridge - handle ACPI event on bridges
- * @handle: Notify()'ed acpi_handle
- * @type: Notify code
- * @context: pointer to acpiphp_bridge structure
- *
- * Handles ACPI event notification on {host,p2p} bridges.
- */
-static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *context)
+static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot);
+
+static void hotplug_event(u32 type, struct acpiphp_context *context)
{
+ acpi_handle handle = context->hp.self->handle;
+ struct acpiphp_func *func = &context->func;
+ struct acpiphp_slot *slot = func->slot;
struct acpiphp_bridge *bridge;
- char objname[64];
- struct acpi_buffer buffer = { .length = sizeof(objname),
- .pointer = objname };
- struct acpi_device *device;
- int num_sub_bridges = 0;
-
- if (acpi_bus_get_device(handle, &device)) {
- /* This bridge must have just been physically inserted */
- handle_bridge_insertion(handle, type);
- return;
- }
- bridge = acpiphp_handle_to_bridge(handle);
- if (type == ACPI_NOTIFY_BUS_CHECK) {
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, ACPI_UINT32_MAX,
- count_sub_bridges, NULL, &num_sub_bridges, NULL);
- }
+ acpi_lock_hp_context();
+ bridge = context->bridge;
+ if (bridge)
+ get_bridge(bridge);
- if (!bridge && !num_sub_bridges) {
- err("cannot get bridge info\n");
- return;
- }
+ acpi_unlock_hp_context();
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+ pci_lock_rescan_remove();
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
/* bus re-enumerate */
- dbg("%s: Bus check notify on %s\n", __func__, objname);
- if (bridge) {
- dbg("%s: re-enumerating slots under %s\n",
- __func__, objname);
+ acpi_handle_debug(handle, "Bus check in %s()\n", __func__);
+ if (bridge)
acpiphp_check_bridge(bridge);
- }
- if (num_sub_bridges)
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
- ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL);
+ else if (!(slot->flags & SLOT_IS_GOING_AWAY))
+ enable_slot(slot);
+
break;
case ACPI_NOTIFY_DEVICE_CHECK:
/* device check */
- dbg("%s: Device check notify on %s\n", __func__, objname);
- acpiphp_check_bridge(bridge);
- break;
-
- case ACPI_NOTIFY_DEVICE_WAKE:
- /* wake event */
- dbg("%s: Device wake notify on %s\n", __func__, objname);
+ acpi_handle_debug(handle, "Device check in %s()\n", __func__);
+ if (bridge) {
+ acpiphp_check_bridge(bridge);
+ } else if (!(slot->flags & SLOT_IS_GOING_AWAY)) {
+ /*
+ * Check if anything has changed in the slot and rescan
+ * from the parent if that's the case.
+ */
+ if (acpiphp_rescan_slot(slot))
+ acpiphp_check_bridge(func->parent);
+ }
break;
case ACPI_NOTIFY_EJECT_REQUEST:
/* request device eject */
- dbg("%s: Device eject notify on %s\n", __func__, objname);
- if ((bridge->type != BRIDGE_TYPE_HOST) &&
- (bridge->flags & BRIDGE_HAS_EJ0)) {
- struct acpiphp_slot *slot;
- slot = bridge->func->slot;
- if (!acpiphp_disable_slot(slot))
- acpiphp_eject_slot(slot);
- }
+ acpi_handle_debug(handle, "Eject request in %s()\n", __func__);
+ acpiphp_disable_and_eject_slot(slot);
break;
+ }
- case ACPI_NOTIFY_FREQUENCY_MISMATCH:
- printk(KERN_ERR "Device %s cannot be configured due"
- " to a frequency mismatch\n", objname);
- break;
+ pci_unlock_rescan_remove();
+ if (bridge)
+ put_bridge(bridge);
+}
- case ACPI_NOTIFY_BUS_MODE_MISMATCH:
- printk(KERN_ERR "Device %s cannot be configured due"
- " to a bus mode mismatch\n", objname);
- break;
+static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type)
+{
+ struct acpiphp_context *context;
- case ACPI_NOTIFY_POWER_FAULT:
- printk(KERN_ERR "Device %s has suffered a power fault\n",
- objname);
- break;
+ context = acpiphp_grab_context(adev);
+ if (!context)
+ return -ENODATA;
- default:
- warn("notify_handler: unknown event type 0x%x for %s\n", type, objname);
- break;
- }
+ hotplug_event(type, context);
+ acpiphp_let_context_go(context);
+ return 0;
}
/**
- * handle_hotplug_event_func - handle ACPI event on functions (i.e. slots)
- * @handle: Notify()'ed acpi_handle
- * @type: Notify code
- * @context: pointer to acpiphp_func structure
+ * acpiphp_enumerate_slots - Enumerate PCI slots for a given bus.
+ * @bus: PCI bus to enumerate the slots for.
*
- * Handles ACPI event notification on slots.
+ * A "slot" is an object associated with a PCI device number. All functions
+ * (PCI devices) with the same bus and device number belong to the same slot.
*/
-static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context)
+void acpiphp_enumerate_slots(struct pci_bus *bus)
{
- struct acpiphp_func *func;
- char objname[64];
- struct acpi_buffer buffer = { .length = sizeof(objname),
- .pointer = objname };
-
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+ struct acpiphp_bridge *bridge;
+ struct acpi_device *adev;
+ acpi_handle handle;
+ acpi_status status;
- func = (struct acpiphp_func *)context;
+ if (acpiphp_disabled)
+ return;
- switch (type) {
- case ACPI_NOTIFY_BUS_CHECK:
- /* bus re-enumerate */
- dbg("%s: Bus check notify on %s\n", __func__, objname);
- acpiphp_enable_slot(func->slot);
- break;
+ adev = ACPI_COMPANION(bus->bridge);
+ if (!adev)
+ return;
- case ACPI_NOTIFY_DEVICE_CHECK:
- /* device check : re-enumerate from parent bus */
- dbg("%s: Device check notify on %s\n", __func__, objname);
- acpiphp_check_bridge(func->slot->bridge);
- break;
+ handle = adev->handle;
+ bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
+ if (!bridge) {
+ acpi_handle_err(handle, "No memory for bridge object\n");
+ return;
+ }
- case ACPI_NOTIFY_DEVICE_WAKE:
- /* wake event */
- dbg("%s: Device wake notify on %s\n", __func__, objname);
- break;
+ INIT_LIST_HEAD(&bridge->slots);
+ kref_init(&bridge->ref);
+ bridge->pci_dev = pci_dev_get(bus->self);
+ bridge->pci_bus = bus;
- case ACPI_NOTIFY_EJECT_REQUEST:
- /* request device eject */
- dbg("%s: Device eject notify on %s\n", __func__, objname);
- if (!(acpiphp_disable_slot(func->slot)))
- acpiphp_eject_slot(func->slot);
- break;
+ /*
+ * Grab a ref to the subordinate PCI bus in case the bus is
+ * removed via PCI core logical hotplug. The ref pins the bus
+ * (which we access during module unload).
+ */
+ get_device(&bus->dev);
- default:
- warn("notify_handler: unknown event type 0x%x for %s\n", type, objname);
- break;
- }
-}
+ acpi_lock_hp_context();
+ if (pci_is_root_bus(bridge->pci_bus)) {
+ struct acpiphp_root_context *root_context;
+ root_context = kzalloc(sizeof(*root_context), GFP_KERNEL);
+ if (!root_context)
+ goto err;
-static acpi_status
-find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- int *count = (int *)context;
+ root_context->root_bridge = bridge;
+ acpi_set_hp_context(adev, &root_context->hp, NULL, NULL, NULL);
+ } else {
+ struct acpiphp_context *context;
- if (acpi_is_root_bridge(handle)) {
- acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_bridge, NULL);
- (*count)++;
+ /*
+ * This bridge should have been registered as a hotplug function
+ * under its parent, so the context should be there, unless the
+ * parent is going to be handled by pciehp, in which case this
+ * bridge is not interesting to us either.
+ */
+ context = acpiphp_get_context(adev);
+ if (!context)
+ goto err;
+
+ bridge->context = context;
+ context->bridge = bridge;
+ /* Get a reference to the parent bridge. */
+ get_bridge(context->func.parent);
}
- return AE_OK ;
-}
-
-static struct acpi_pci_driver acpi_pci_hp_driver = {
- .add = add_bridge,
- .remove = remove_bridge,
-};
-
-/**
- * acpiphp_glue_init - initializes all PCI hotplug - ACPI glue data structures
- */
-int __init acpiphp_glue_init(void)
-{
- int num = 0;
+ acpi_unlock_hp_context();
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, find_root_bridges, NULL, &num, NULL);
+ /* Must be added to the list prior to calling acpiphp_add_context(). */
+ mutex_lock(&bridge_mutex);
+ list_add(&bridge->list, &bridge_list);
+ mutex_unlock(&bridge_mutex);
- if (num <= 0)
- return -1;
- else
- acpi_pci_register_driver(&acpi_pci_hp_driver);
+ /* register all slot objects under this bridge */
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpiphp_add_context, NULL, bridge, NULL);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to register slots\n");
+ cleanup_bridge(bridge);
+ put_bridge(bridge);
+ }
+ return;
- return 0;
+ err:
+ acpi_unlock_hp_context();
+ put_device(&bus->dev);
+ pci_dev_put(bridge->pci_dev);
+ kfree(bridge);
}
-
-/**
- * acpiphp_glue_exit - terminates all PCI hotplug - ACPI glue data structures
- *
- * This function frees all data allocated in acpiphp_glue_init().
- */
-void acpiphp_glue_exit(void)
+void acpiphp_drop_bridge(struct acpiphp_bridge *bridge)
{
- acpi_pci_unregister_driver(&acpi_pci_hp_driver);
+ if (pci_is_root_bus(bridge->pci_bus)) {
+ struct acpiphp_root_context *root_context;
+ struct acpi_device *adev;
+
+ acpi_lock_hp_context();
+ adev = ACPI_COMPANION(bridge->pci_bus->bridge);
+ root_context = to_acpiphp_root_context(adev->hp);
+ adev->hp = NULL;
+ acpi_unlock_hp_context();
+ kfree(root_context);
+ }
+ cleanup_bridge(bridge);
+ put_bridge(bridge);
}
-
/**
- * acpiphp_get_num_slots - count number of slots in a system
+ * acpiphp_remove_slots - Remove slot objects associated with a given bus.
+ * @bus: PCI bus to remove the slot objects for.
*/
-int __init acpiphp_get_num_slots(void)
+void acpiphp_remove_slots(struct pci_bus *bus)
{
struct acpiphp_bridge *bridge;
- int num_slots = 0;
-
- list_for_each_entry(bridge, &bridge_list, list) {
- dbg("Bus %04x:%02x has %d slot%s\n",
- pci_domain_nr(bridge->pci_bus),
- bridge->pci_bus->number, bridge->nr_slots,
- bridge->nr_slots == 1 ? "" : "s");
- num_slots += bridge->nr_slots;
- }
-
- dbg("Total %d slots\n", num_slots);
- return num_slots;
-}
+ if (acpiphp_disabled)
+ return;
-#if 0
-/**
- * acpiphp_for_each_slot - call function for each slot
- * @fn: callback function
- * @data: context to be passed to callback function
- */
-static int acpiphp_for_each_slot(acpiphp_callback fn, void *data)
-{
- struct list_head *node;
- struct acpiphp_bridge *bridge;
- struct acpiphp_slot *slot;
- int retval = 0;
-
- list_for_each (node, &bridge_list) {
- bridge = (struct acpiphp_bridge *)node;
- for (slot = bridge->slots; slot; slot = slot->next) {
- retval = fn(slot, data);
- if (!retval)
- goto err_exit;
+ mutex_lock(&bridge_mutex);
+ list_for_each_entry(bridge, &bridge_list, list)
+ if (bridge->pci_bus == bus) {
+ mutex_unlock(&bridge_mutex);
+ acpiphp_drop_bridge(bridge);
+ return;
}
- }
- err_exit:
- return retval;
+ mutex_unlock(&bridge_mutex);
}
-#endif
-
/**
* acpiphp_enable_slot - power on slot
@@ -1389,55 +972,61 @@ static int acpiphp_for_each_slot(acpiphp_callback fn, void *data)
*/
int acpiphp_enable_slot(struct acpiphp_slot *slot)
{
- int retval;
-
- mutex_lock(&slot->crit_sect);
+ pci_lock_rescan_remove();
- /* wake up all functions */
- retval = power_on_slot(slot);
- if (retval)
- goto err_exit;
+ if (slot->flags & SLOT_IS_GOING_AWAY)
+ return -ENODEV;
- if (get_slot_status(slot) == ACPI_STA_ALL) {
- /* configure all functions */
- retval = enable_device(slot);
- if (retval)
- power_off_slot(slot);
- } else {
- dbg("%s: Slot status is not ACPI_STA_ALL\n", __func__);
- power_off_slot(slot);
- }
+ /* configure all functions */
+ if (!(slot->flags & SLOT_ENABLED))
+ enable_slot(slot);
- err_exit:
- mutex_unlock(&slot->crit_sect);
- return retval;
+ pci_unlock_rescan_remove();
+ return 0;
}
/**
- * acpiphp_disable_slot - power off slot
+ * acpiphp_disable_and_eject_slot - power off and eject slot
* @slot: ACPI PHP slot
*/
-int acpiphp_disable_slot(struct acpiphp_slot *slot)
+static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot)
{
- int retval = 0;
+ struct acpiphp_func *func;
- mutex_lock(&slot->crit_sect);
+ if (slot->flags & SLOT_IS_GOING_AWAY)
+ return -ENODEV;
/* unconfigure all functions */
- retval = disable_device(slot);
- if (retval)
- goto err_exit;
-
- /* power off all functions */
- retval = power_off_slot(slot);
- if (retval)
- goto err_exit;
-
- err_exit:
- mutex_unlock(&slot->crit_sect);
- return retval;
+ disable_slot(slot);
+
+ list_for_each_entry(func, &slot->funcs, sibling)
+ if (func->flags & FUNC_HAS_EJ0) {
+ acpi_handle handle = func_to_handle(func);
+
+ if (ACPI_FAILURE(acpi_evaluate_ej0(handle)))
+ acpi_handle_err(handle, "_EJ0 failed\n");
+
+ break;
+ }
+
+ return 0;
}
+int acpiphp_disable_slot(struct acpiphp_slot *slot)
+{
+ int ret;
+
+ /*
+ * Acquire acpi_scan_lock to ensure that the execution of _EJ0 in
+ * acpiphp_disable_and_eject_slot() will be synchronized properly.
+ */
+ acpi_scan_lock_acquire();
+ pci_lock_rescan_remove();
+ ret = acpiphp_disable_and_eject_slot(slot);
+ pci_unlock_rescan_remove();
+ acpi_scan_lock_release();
+ return ret;
+}
/*
* slot enabled: 1
@@ -1445,33 +1034,23 @@ int acpiphp_disable_slot(struct acpiphp_slot *slot)
*/
u8 acpiphp_get_power_status(struct acpiphp_slot *slot)
{
- return (slot->flags & SLOT_POWEREDON);
+ return (slot->flags & SLOT_ENABLED);
}
-
/*
* latch open: 1
* latch closed: 0
*/
u8 acpiphp_get_latch_status(struct acpiphp_slot *slot)
{
- unsigned int sta;
-
- sta = get_slot_status(slot);
-
- return (sta & ACPI_STA_SHOW_IN_UI) ? 0 : 1;
+ return !(get_slot_status(slot) & ACPI_STA_DEVICE_UI);
}
-
/*
* adapter presence : 1
* absence : 0
*/
u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot)
{
- unsigned int sta;
-
- sta = get_slot_status(slot);
-
- return (sta == 0) ? 0 : 1;
+ return !!get_slot_status(slot);
}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index e525263210e..8dcccffd6e2 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -25,16 +25,17 @@
*
*/
+#define pr_fmt(fmt) "acpiphp_ibm: " fmt
+
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <acpi/acpi_bus.h>
#include <linux/sysfs.h>
#include <linux/kobject.h>
-#include <asm/uaccess.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
+#include <asm/uaccess.h>
#include "acpiphp.h"
#include "../pci.h"
@@ -43,30 +44,18 @@
#define DRIVER_AUTHOR "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>"
#define DRIVER_DESC "ACPI Hot Plug PCI Controller Driver IBM extension"
-static int debug;
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRIVER_VERSION);
-module_param(debug, bool, 0644);
-MODULE_PARM_DESC(debug, " Debugging mode enabled or not");
-#define MY_NAME "acpiphp_ibm"
-
-#undef dbg
-#define dbg(format, arg...) \
-do { \
- if (debug) \
- printk(KERN_DEBUG "%s: " format, \
- MY_NAME , ## arg); \
-} while (0)
#define FOUND_APCI 0x61504349
/* these are the names for the IBM ACPI pseudo-device */
#define IBM_HARDWARE_ID1 "IBM37D0"
#define IBM_HARDWARE_ID2 "IBM37D4"
-#define hpslot_to_sun(A) (((struct slot *)((A)->private))->acpi_slot->sun)
+#define hpslot_to_sun(A) (((struct slot *)((A)->private))->sun)
/* union apci_descriptor - allows access to the
* various device descriptors that are embedded in the
@@ -126,7 +115,7 @@ static struct bin_attribute ibm_apci_table_attr = {
.read = ibm_read_apci_table,
.write = NULL,
};
-static struct acpiphp_attention_info ibm_attention_info =
+static struct acpiphp_attention_info ibm_attention_info =
{
.set_attn = ibm_set_attention_status,
.get_attn = ibm_get_attention_status,
@@ -181,15 +170,15 @@ ibm_slot_done:
*/
static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
{
- union acpi_object args[2];
+ union acpi_object args[2];
struct acpi_object_list params = { .pointer = args, .count = 2 };
- acpi_status stat;
+ acpi_status stat;
unsigned long long rc;
union apci_descriptor *ibm_slot;
ibm_slot = ibm_slot_from_id(hpslot_to_sun(slot));
- dbg("%s: set slot %d (%d) attention status to %d\n", __func__,
+ pr_debug("%s: set slot %d (%d) attention status to %d\n", __func__,
ibm_slot->slot.slot_num, ibm_slot->slot.slot_id,
(status ? 1 : 0));
@@ -202,10 +191,10 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
stat = acpi_evaluate_integer(ibm_acpi_handle, "APLS", &params, &rc);
if (ACPI_FAILURE(stat)) {
- err("APLS evaluation failed: 0x%08x\n", stat);
+ pr_err("APLS evaluation failed: 0x%08x\n", stat);
return -ENODEV;
} else if (!rc) {
- err("APLS method failed: 0x%08llx\n", rc);
+ pr_err("APLS method failed: 0x%08llx\n", rc);
return -ERANGE;
}
return 0;
@@ -218,7 +207,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
*
* Description: This method is registered with the acpiphp module as a
* callback to do the device specific task of getting the LED status.
- *
+ *
* Because there is no direct method of getting the LED status directly
* from an ACPI call, we read the aPCI table and parse out our
* slot descriptor to read the status from that.
@@ -234,7 +223,7 @@ static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status)
else
*status = 0;
- dbg("%s: get slot %d (%d) attention status is %d\n", __func__,
+ pr_debug("%s: get slot %d (%d) attention status is %d\n", __func__,
ibm_slot->slot.slot_num, ibm_slot->slot.slot_id,
*status);
@@ -266,11 +255,10 @@ static void ibm_handle_events(acpi_handle handle, u32 event, void *context)
u8 subevent = event & 0xf0;
struct notification *note = context;
- dbg("%s: Received notification %02x\n", __func__, event);
+ pr_debug("%s: Received notification %02x\n", __func__, event);
if (subevent == 0x80) {
- dbg("%s: generationg bus event\n", __func__);
- acpi_bus_generate_proc_event(note->device, note->event, detail);
+ pr_debug("%s: generating bus event\n", __func__);
acpi_bus_generate_netlink_event(note->device->pnp.device_class,
dev_name(&note->device->dev),
note->event, detail);
@@ -302,7 +290,7 @@ static int ibm_get_table_from_acpi(char **bufp)
status = acpi_evaluate_object(ibm_acpi_handle, "APCI", NULL, &buffer);
if (ACPI_FAILURE(status)) {
- err("%s: APCI evaluation failed\n", __func__);
+ pr_err("%s: APCI evaluation failed\n", __func__);
return -ENODEV;
}
@@ -310,13 +298,13 @@ static int ibm_get_table_from_acpi(char **bufp)
if (!(package) ||
(package->type != ACPI_TYPE_PACKAGE) ||
!(package->package.elements)) {
- err("%s: Invalid APCI object\n", __func__);
+ pr_err("%s: Invalid APCI object\n", __func__);
goto read_table_done;
}
for(size = 0, i = 0; i < package->package.count; i++) {
if (package->package.elements[i].type != ACPI_TYPE_BUFFER) {
- err("%s: Invalid APCI element %d\n", __func__, i);
+ pr_err("%s: Invalid APCI element %d\n", __func__, i);
goto read_table_done;
}
size += package->package.elements[i].buffer.length;
@@ -326,7 +314,7 @@ static int ibm_get_table_from_acpi(char **bufp)
goto read_table_done;
lbuf = kzalloc(size, GFP_KERNEL);
- dbg("%s: element count: %i, ASL table size: %i, &table = 0x%p\n",
+ pr_debug("%s: element count: %i, ASL table size: %i, &table = 0x%p\n",
__func__, package->package.count, size, lbuf);
if (lbuf) {
@@ -371,8 +359,8 @@ static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
{
int bytes_read = -EINVAL;
char *table = NULL;
-
- dbg("%s: pos = %d, size = %zd\n", __func__, (int)pos, size);
+
+ pr_debug("%s: pos = %d, size = %zd\n", __func__, (int)pos, size);
if (pos == 0) {
bytes_read = ibm_get_table_from_acpi(&table);
@@ -398,13 +386,13 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
u32 lvl, void *context, void **rv)
{
acpi_handle *phandle = (acpi_handle *)context;
- acpi_status status;
+ acpi_status status;
struct acpi_device_info *info;
int retval = 0;
status = acpi_get_object_info(handle, &info);
if (ACPI_FAILURE(status)) {
- err("%s: Failed to get device information status=0x%x\n",
+ pr_err("%s: Failed to get device information status=0x%x\n",
__func__, status);
return retval;
}
@@ -412,11 +400,11 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
if (info->current_status && (info->valid & ACPI_VALID_HID) &&
(!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) ||
!strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) {
- dbg("found hardware: %s, handle: %p\n",
+ pr_debug("found hardware: %s, handle: %p\n",
info->hardware_id.string, handle);
*phandle = handle;
/* returning non-zero causes the search to stop
- * and returns this value to the caller of
+ * and returns this value to the caller of
* acpi_walk_namespace, but it also causes some warnings
* in the acpi debug code to print...
*/
@@ -433,18 +421,18 @@ static int __init ibm_acpiphp_init(void)
struct acpi_device *device;
struct kobject *sysdir = &pci_slots_kset->kobj;
- dbg("%s\n", __func__);
+ pr_debug("%s\n", __func__);
if (acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, ibm_find_acpi_device, NULL,
&ibm_acpi_handle, NULL) != FOUND_APCI) {
- err("%s: acpi_walk_namespace failed\n", __func__);
+ pr_err("%s: acpi_walk_namespace failed\n", __func__);
retval = -ENODEV;
goto init_return;
}
- dbg("%s: found IBM aPCI device\n", __func__);
+ pr_debug("%s: found IBM aPCI device\n", __func__);
if (acpi_bus_get_device(ibm_acpi_handle, &device)) {
- err("%s: acpi_bus_get_device failed\n", __func__);
+ pr_err("%s: acpi_bus_get_device failed\n", __func__);
retval = -ENODEV;
goto init_return;
}
@@ -458,7 +446,7 @@ static int __init ibm_acpiphp_init(void)
ACPI_DEVICE_NOTIFY, ibm_handle_events,
&ibm_note);
if (ACPI_FAILURE(status)) {
- err("%s: Failed to register notification handler\n",
+ pr_err("%s: Failed to register notification handler\n",
__func__);
retval = -EBUSY;
goto init_cleanup;
@@ -480,17 +468,17 @@ static void __exit ibm_acpiphp_exit(void)
acpi_status status;
struct kobject *sysdir = &pci_slots_kset->kobj;
- dbg("%s\n", __func__);
+ pr_debug("%s\n", __func__);
if (acpiphp_unregister_attention(&ibm_attention_info))
- err("%s: attention info deregistration failed", __func__);
+ pr_err("%s: attention info deregistration failed", __func__);
status = acpi_remove_notify_handler(
ibm_acpi_handle,
ACPI_DEVICE_NOTIFY,
ibm_handle_events);
if (ACPI_FAILURE(status))
- err("%s: Notification handler removal failed\n", __func__);
+ pr_err("%s: Notification handler removal failed\n", __func__);
/* remove the /sys entries */
sysfs_remove_bin_file(sysdir, &ibm_apci_table_attr);
}
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
index 9fff878cf02..6a0ddf75734 100644
--- a/drivers/pci/hotplug/cpci_hotplug.h
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -56,9 +56,9 @@ struct cpci_hp_controller_ops {
int (*enable_irq) (void);
int (*disable_irq) (void);
int (*check_irq) (void *dev_id);
- int (*hardware_test) (struct slot* slot, u32 value);
- u8 (*get_power) (struct slot* slot);
- int (*set_power) (struct slot* slot, int value);
+ int (*hardware_test) (struct slot *slot, u32 value);
+ u8 (*get_power) (struct slot *slot);
+ int (*set_power) (struct slot *slot, int value);
};
struct cpci_hp_controller {
@@ -75,28 +75,36 @@ static inline const char *slot_name(struct slot *slot)
return hotplug_slot_name(slot->hotplug_slot);
}
-extern int cpci_hp_register_controller(struct cpci_hp_controller *controller);
-extern int cpci_hp_unregister_controller(struct cpci_hp_controller *controller);
-extern int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last);
-extern int cpci_hp_unregister_bus(struct pci_bus *bus);
-extern int cpci_hp_start(void);
-extern int cpci_hp_stop(void);
+int cpci_hp_register_controller(struct cpci_hp_controller *controller);
+int cpci_hp_unregister_controller(struct cpci_hp_controller *controller);
+int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last);
+int cpci_hp_unregister_bus(struct pci_bus *bus);
+int cpci_hp_start(void);
+int cpci_hp_stop(void);
/*
* Internal function prototypes, these functions should not be used by
* board/chassis drivers.
*/
-extern u8 cpci_get_attention_status(struct slot *slot);
-extern u8 cpci_get_latch_status(struct slot *slot);
-extern u8 cpci_get_adapter_status(struct slot *slot);
-extern u16 cpci_get_hs_csr(struct slot * slot);
-extern int cpci_set_attention_status(struct slot *slot, int status);
-extern int cpci_check_and_clear_ins(struct slot * slot);
-extern int cpci_check_ext(struct slot * slot);
-extern int cpci_clear_ext(struct slot * slot);
-extern int cpci_led_on(struct slot * slot);
-extern int cpci_led_off(struct slot * slot);
-extern int cpci_configure_slot(struct slot *slot);
-extern int cpci_unconfigure_slot(struct slot *slot);
+u8 cpci_get_attention_status(struct slot *slot);
+u8 cpci_get_latch_status(struct slot *slot);
+u8 cpci_get_adapter_status(struct slot *slot);
+u16 cpci_get_hs_csr(struct slot *slot);
+int cpci_set_attention_status(struct slot *slot, int status);
+int cpci_check_and_clear_ins(struct slot *slot);
+int cpci_check_ext(struct slot *slot);
+int cpci_clear_ext(struct slot *slot);
+int cpci_led_on(struct slot *slot);
+int cpci_led_off(struct slot *slot);
+int cpci_configure_slot(struct slot *slot);
+int cpci_unconfigure_slot(struct slot *slot);
+
+#ifdef CONFIG_HOTPLUG_PCI_CPCI
+int cpci_hotplug_init(int debug);
+void cpci_hotplug_exit(void);
+#else
+static inline int cpci_hotplug_init(int debug) { return 0; }
+static inline void cpci_hotplug_exit(void) { }
+#endif
#endif /* _CPCI_HOTPLUG_H */
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index d703e73fffa..e09cf7827d6 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -32,7 +32,7 @@
#include <linux/pci_hotplug.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include "cpci_hotplug.h"
@@ -46,7 +46,7 @@
do { \
if (cpci_debug) \
printk (KERN_DEBUG "%s: " format "\n", \
- MY_NAME , ## arg); \
+ MY_NAME , ## arg); \
} while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
@@ -65,10 +65,10 @@ static int thread_finished;
static int enable_slot(struct hotplug_slot *slot);
static int disable_slot(struct hotplug_slot *slot);
static int set_attention_status(struct hotplug_slot *slot, u8 value);
-static int get_power_status(struct hotplug_slot *slot, u8 * value);
-static int get_attention_status(struct hotplug_slot *slot, u8 * value);
-static int get_adapter_status(struct hotplug_slot *slot, u8 * value);
-static int get_latch_status(struct hotplug_slot *slot, u8 * value);
+static int get_power_status(struct hotplug_slot *slot, u8 *value);
+static int get_attention_status(struct hotplug_slot *slot, u8 *value);
+static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
+static int get_latch_status(struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops cpci_hotplug_slot_ops = {
.enable_slot = enable_slot,
@@ -168,7 +168,7 @@ cpci_get_power_status(struct slot *slot)
}
static int
-get_power_status(struct hotplug_slot *hotplug_slot, u8 * value)
+get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
@@ -177,7 +177,7 @@ get_power_status(struct hotplug_slot *hotplug_slot, u8 * value)
}
static int
-get_attention_status(struct hotplug_slot *hotplug_slot, u8 * value)
+get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
@@ -192,14 +192,14 @@ set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
}
static int
-get_adapter_status(struct hotplug_slot *hotplug_slot, u8 * value)
+get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
*value = hotplug_slot->info->adapter_status;
return 0;
}
static int
-get_latch_status(struct hotplug_slot *hotplug_slot, u8 * value)
+get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
*value = hotplug_slot->info->latch_status;
return 0;
@@ -225,7 +225,7 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *info;
char name[SLOT_NAME_SIZE];
- int status = -ENOMEM;
+ int status;
int i;
if (!(controller && bus))
@@ -237,18 +237,24 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
*/
for (i = first; i <= last; ++i) {
slot = kzalloc(sizeof (struct slot), GFP_KERNEL);
- if (!slot)
+ if (!slot) {
+ status = -ENOMEM;
goto error;
+ }
hotplug_slot =
kzalloc(sizeof (struct hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot)
+ if (!hotplug_slot) {
+ status = -ENOMEM;
goto error_slot;
+ }
slot->hotplug_slot = hotplug_slot;
info = kzalloc(sizeof (struct hotplug_slot_info), GFP_KERNEL);
- if (!info)
+ if (!info) {
+ status = -ENOMEM;
goto error_hpslot;
+ }
hotplug_slot->info = info;
slot->bus = bus;
@@ -293,6 +299,7 @@ error_slot:
error:
return status;
}
+EXPORT_SYMBOL_GPL(cpci_hp_register_bus);
int
cpci_hp_unregister_bus(struct pci_bus *bus)
@@ -323,6 +330,7 @@ cpci_hp_unregister_bus(struct pci_bus *bus)
up_write(&list_rwsem);
return status;
}
+EXPORT_SYMBOL_GPL(cpci_hp_unregister_bus);
/* This is the interrupt mode interrupt handler */
static irqreturn_t
@@ -354,7 +362,7 @@ static int
init_slots(int clear_ins)
{
struct slot *slot;
- struct pci_dev* dev;
+ struct pci_dev *dev;
dbg("%s - enter", __func__);
down_read(&list_rwsem);
@@ -608,6 +616,7 @@ cpci_hp_register_controller(struct cpci_hp_controller *new_controller)
controller = new_controller;
return status;
}
+EXPORT_SYMBOL_GPL(cpci_hp_register_controller);
static void
cleanup_slots(void)
@@ -647,6 +656,7 @@ cpci_hp_unregister_controller(struct cpci_hp_controller *old_controller)
status = -ENODEV;
return status;
}
+EXPORT_SYMBOL_GPL(cpci_hp_unregister_controller);
int
cpci_hp_start(void)
@@ -684,6 +694,7 @@ cpci_hp_start(void)
dbg("%s - exit", __func__);
return 0;
}
+EXPORT_SYMBOL_GPL(cpci_hp_start);
int
cpci_hp_stop(void)
@@ -698,6 +709,7 @@ cpci_hp_stop(void)
cpci_stop_thread();
return 0;
}
+EXPORT_SYMBOL_GPL(cpci_hp_stop);
int __init
cpci_hotplug_init(int debug)
@@ -715,10 +727,3 @@ cpci_hotplug_exit(void)
cpci_hp_stop();
cpci_hp_unregister_controller(controller);
}
-
-EXPORT_SYMBOL_GPL(cpci_hp_register_controller);
-EXPORT_SYMBOL_GPL(cpci_hp_unregister_controller);
-EXPORT_SYMBOL_GPL(cpci_hp_register_bus);
-EXPORT_SYMBOL_GPL(cpci_hp_unregister_bus);
-EXPORT_SYMBOL_GPL(cpci_hp_start);
-EXPORT_SYMBOL_GPL(cpci_hp_stop);
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index 829c327cfb5..7d48ecae669 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -39,14 +39,14 @@ extern int cpci_debug;
do { \
if (cpci_debug) \
printk (KERN_DEBUG "%s: " format "\n", \
- MY_NAME , ## arg); \
+ MY_NAME , ## arg); \
} while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
-u8 cpci_get_attention_status(struct slot* slot)
+u8 cpci_get_attention_status(struct slot *slot)
{
int hs_cap;
u16 hs_csr;
@@ -66,7 +66,7 @@ u8 cpci_get_attention_status(struct slot* slot)
return hs_csr & 0x0008 ? 1 : 0;
}
-int cpci_set_attention_status(struct slot* slot, int status)
+int cpci_set_attention_status(struct slot *slot, int status)
{
int hs_cap;
u16 hs_csr;
@@ -93,7 +93,7 @@ int cpci_set_attention_status(struct slot* slot, int status)
return 1;
}
-u16 cpci_get_hs_csr(struct slot* slot)
+u16 cpci_get_hs_csr(struct slot *slot)
{
int hs_cap;
u16 hs_csr;
@@ -111,7 +111,7 @@ u16 cpci_get_hs_csr(struct slot* slot)
return hs_csr;
}
-int cpci_check_and_clear_ins(struct slot* slot)
+int cpci_check_and_clear_ins(struct slot *slot)
{
int hs_cap;
u16 hs_csr;
@@ -140,7 +140,7 @@ int cpci_check_and_clear_ins(struct slot* slot)
return ins;
}
-int cpci_check_ext(struct slot* slot)
+int cpci_check_ext(struct slot *slot)
{
int hs_cap;
u16 hs_csr;
@@ -161,7 +161,7 @@ int cpci_check_ext(struct slot* slot)
return ext;
}
-int cpci_clear_ext(struct slot* slot)
+int cpci_clear_ext(struct slot *slot)
{
int hs_cap;
u16 hs_csr;
@@ -187,7 +187,7 @@ int cpci_clear_ext(struct slot* slot)
return 0;
}
-int cpci_led_on(struct slot* slot)
+int cpci_led_on(struct slot *slot)
{
int hs_cap;
u16 hs_csr;
@@ -216,7 +216,7 @@ int cpci_led_on(struct slot* slot)
return 0;
}
-int cpci_led_off(struct slot* slot)
+int cpci_led_off(struct slot *slot)
{
int hs_cap;
u16 hs_csr;
@@ -250,13 +250,16 @@ int cpci_led_off(struct slot* slot)
* Device configuration functions
*/
-int __ref cpci_configure_slot(struct slot *slot)
+int cpci_configure_slot(struct slot *slot)
{
+ struct pci_dev *dev;
struct pci_bus *parent;
- int fn;
+ int ret = 0;
dbg("%s - enter", __func__);
+ pci_lock_rescan_remove();
+
if (slot->dev == NULL) {
dbg("pci_dev null, finding %02x:%02x:%x",
slot->bus->number, PCI_SLOT(slot->devfn), PCI_FUNC(slot->devfn));
@@ -277,59 +280,32 @@ int __ref cpci_configure_slot(struct slot *slot)
slot->dev = pci_get_slot(slot->bus, slot->devfn);
if (slot->dev == NULL) {
err("Could not find PCI device for slot %02x", slot->number);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
}
}
parent = slot->dev->bus;
- for (fn = 0; fn < 8; fn++) {
- struct pci_dev *dev;
-
- dev = pci_get_slot(parent, PCI_DEVFN(PCI_SLOT(slot->devfn), fn));
- if (!dev)
+ list_for_each_entry(dev, &parent->devices, bus_list)
+ if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn))
continue;
- if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
- (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
- /* Find an unused bus number for the new bridge */
- struct pci_bus *child;
- unsigned char busnr, start = parent->secondary;
- unsigned char end = parent->subordinate;
-
- for (busnr = start; busnr <= end; busnr++) {
- if (!pci_find_bus(pci_domain_nr(parent),
- busnr))
- break;
- }
- if (busnr >= end) {
- err("No free bus for hot-added bridge\n");
- pci_dev_put(dev);
- continue;
- }
- child = pci_add_new_bus(parent, dev, busnr);
- if (!child) {
- err("Cannot add new bus for %s\n",
- pci_name(dev));
- pci_dev_put(dev);
- continue;
- }
- child->subordinate = pci_do_scan_bus(child);
- pci_bus_size_bridges(child);
- }
- pci_dev_put(dev);
- }
+ if (pci_is_bridge(dev))
+ pci_hp_add_bridge(dev);
+
+
+ pci_assign_unassigned_bridge_resources(parent->self);
- pci_bus_assign_resources(parent);
pci_bus_add_devices(parent);
- pci_enable_bridges(parent);
+ out:
+ pci_unlock_rescan_remove();
dbg("%s - exit", __func__);
- return 0;
+ return ret;
}
-int cpci_unconfigure_slot(struct slot* slot)
+int cpci_unconfigure_slot(struct slot *slot)
{
- int i;
- struct pci_dev *dev;
+ struct pci_dev *dev, *temp;
dbg("%s - enter", __func__);
if (!slot->dev) {
@@ -337,17 +313,20 @@ int cpci_unconfigure_slot(struct slot* slot)
return -ENODEV;
}
- for (i = 0; i < 8; i++) {
- dev = pci_get_slot(slot->bus,
- PCI_DEVFN(PCI_SLOT(slot->devfn), i));
- if (dev) {
- pci_remove_bus_device(dev);
- pci_dev_put(dev);
- }
+ pci_lock_rescan_remove();
+
+ list_for_each_entry_safe(dev, temp, &slot->bus->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn))
+ continue;
+ pci_dev_get(dev);
+ pci_stop_and_remove_bus_device(dev);
+ pci_dev_put(dev);
}
pci_dev_put(slot->dev);
slot->dev = NULL;
+ pci_unlock_rescan_remove();
+
dbg("%s - exit", __func__);
return 0;
}
diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
index fb3f84661bd..04fcd781140 100644
--- a/drivers/pci/hotplug/cpcihp_generic.c
+++ b/drivers/pci/hotplug/cpcihp_generic.c
@@ -13,14 +13,14 @@
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
@@ -53,16 +53,16 @@
#define dbg(format, arg...) \
do { \
- if(debug) \
+ if (debug) \
printk (KERN_DEBUG "%s: " format "\n", \
- MY_NAME , ## arg); \
+ MY_NAME , ## arg); \
} while(0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
/* local variables */
-static int debug;
+static bool debug;
static char *bridge;
static u8 bridge_busnr;
static u8 bridge_slot;
@@ -78,8 +78,8 @@ static struct cpci_hp_controller generic_hpc;
static int __init validate_parameters(void)
{
- char* str;
- char* p;
+ char *str;
+ char *p;
unsigned long tmp;
if(!bridge) {
@@ -142,8 +142,8 @@ static int query_enum(void)
static int __init cpcihp_generic_init(void)
{
int status;
- struct resource* r;
- struct pci_dev* dev;
+ struct resource *r;
+ struct pci_dev *dev;
info(DRIVER_DESC " version: " DRIVER_VERSION);
status = validate_parameters();
@@ -154,12 +154,8 @@ static int __init cpcihp_generic_init(void)
if(!r)
return -EBUSY;
- bus = pci_find_bus(0, bridge_busnr);
- if (!bus) {
- err("Invalid bus number %d", bridge_busnr);
- return -EINVAL;
- }
- dev = pci_get_slot(bus, PCI_DEVFN(bridge_slot, 0));
+ dev = pci_get_domain_bus_and_slot(0, bridge_busnr,
+ PCI_DEVFN(bridge_slot, 0));
if(!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
err("Invalid bridge device %s", bridge);
pci_dev_put(dev);
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
index 41f6a8d79c8..6757b3ef7e1 100644
--- a/drivers/pci/hotplug/cpcihp_zt5550.c
+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
@@ -13,14 +13,14 @@
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
@@ -48,17 +48,17 @@
#define dbg(format, arg...) \
do { \
- if(debug) \
+ if (debug) \
printk (KERN_DEBUG "%s: " format "\n", \
- MY_NAME , ## arg); \
+ MY_NAME , ## arg); \
} while(0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
/* local variables */
-static int debug;
-static int poll;
+static bool debug;
+static bool poll;
static struct cpci_hp_controller_ops zt5550_hpc_ops;
static struct cpci_hp_controller zt5550_hpc;
@@ -271,7 +271,7 @@ init_hc_error:
}
-static void __devexit zt5550_hc_remove_one(struct pci_dev *pdev)
+static void zt5550_hc_remove_one(struct pci_dev *pdev)
{
cpci_hp_stop();
cpci_hp_unregister_bus(bus0);
@@ -285,17 +285,17 @@ static struct pci_device_id zt5550_hc_pci_tbl[] = {
{ 0, }
};
MODULE_DEVICE_TABLE(pci, zt5550_hc_pci_tbl);
-
+
static struct pci_driver zt5550_hc_driver = {
.name = "zt5550_hc",
.id_table = zt5550_hc_pci_tbl,
.probe = zt5550_hc_init_one,
- .remove = __devexit_p(zt5550_hc_remove_one),
+ .remove = zt5550_hc_remove_one,
};
static int __init zt5550_init(void)
{
- struct resource* r;
+ struct resource *r;
int rc;
info(DRIVER_DESC " version: " DRIVER_VERSION);
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.h b/drivers/pci/hotplug/cpcihp_zt5550.h
index bebc6060a55..9a57fda5348 100644
--- a/drivers/pci/hotplug/cpcihp_zt5550.h
+++ b/drivers/pci/hotplug/cpcihp_zt5550.h
@@ -13,14 +13,14 @@
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
@@ -55,7 +55,7 @@
#define HC_CMD_REG 0x0C
#define ARB_CONFIG_GNT_REG 0x10
#define ARB_CONFIG_CFG_REG 0x12
-#define ARB_CONFIG_REG 0x10
+#define ARB_CONFIG_REG 0x10
#define ISOL_CONFIG_REG 0x18
#define FAULT_STATUS_REG 0x20
#define FAULT_CONFIG_REG 0x24
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index d8ffc736680..0450f405807 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -255,7 +255,7 @@ struct pci_func {
struct pci_resource *io_head;
struct pci_resource *bus_head;
struct timer_list *p_task_event;
- struct pci_dev* pci_dev;
+ struct pci_dev *pci_dev;
};
struct slot {
@@ -278,7 +278,7 @@ struct slot {
};
struct pci_resource {
- struct pci_resource * next;
+ struct pci_resource *next;
u32 base;
u32 length;
};
@@ -404,50 +404,44 @@ struct resource_lists {
/* debugfs functions for the hotplug controller info */
-extern void cpqhp_initialize_debugfs(void);
-extern void cpqhp_shutdown_debugfs(void);
-extern void cpqhp_create_debugfs_files(struct controller *ctrl);
-extern void cpqhp_remove_debugfs_files(struct controller *ctrl);
+void cpqhp_initialize_debugfs(void);
+void cpqhp_shutdown_debugfs(void);
+void cpqhp_create_debugfs_files(struct controller *ctrl);
+void cpqhp_remove_debugfs_files(struct controller *ctrl);
/* controller functions */
-extern void cpqhp_pushbutton_thread(unsigned long event_pointer);
-extern irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data);
-extern int cpqhp_find_available_resources(struct controller *ctrl,
- void __iomem *rom_start);
-extern int cpqhp_event_start_thread(void);
-extern void cpqhp_event_stop_thread(void);
-extern struct pci_func *cpqhp_slot_create(unsigned char busnumber);
-extern struct pci_func *cpqhp_slot_find(unsigned char bus, unsigned char device,
- unsigned char index);
-extern int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func);
-extern int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func);
-extern int cpqhp_hardware_test(struct controller *ctrl, int test_num);
+void cpqhp_pushbutton_thread(unsigned long event_pointer);
+irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data);
+int cpqhp_find_available_resources(struct controller *ctrl,
+ void __iomem *rom_start);
+int cpqhp_event_start_thread(void);
+void cpqhp_event_stop_thread(void);
+struct pci_func *cpqhp_slot_create(unsigned char busnumber);
+struct pci_func *cpqhp_slot_find(unsigned char bus, unsigned char device,
+ unsigned char index);
+int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func);
+int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func);
+int cpqhp_hardware_test(struct controller *ctrl, int test_num);
/* resource functions */
-extern int cpqhp_resource_sort_and_combine (struct pci_resource **head);
+int cpqhp_resource_sort_and_combine (struct pci_resource **head);
/* pci functions */
-extern int cpqhp_set_irq(u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
-extern int cpqhp_get_bus_dev(struct controller *ctrl, u8 *bus_num, u8 *dev_num,
- u8 slot);
-extern int cpqhp_save_config(struct controller *ctrl, int busnumber,
- int is_hot_plug);
-extern int cpqhp_save_base_addr_length(struct controller *ctrl,
- struct pci_func *func);
-extern int cpqhp_save_used_resources(struct controller *ctrl,
- struct pci_func *func);
-extern int cpqhp_configure_board(struct controller *ctrl,
- struct pci_func *func);
-extern int cpqhp_save_slot_config(struct controller *ctrl,
- struct pci_func *new_slot);
-extern int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func);
-extern void cpqhp_destroy_board_resources(struct pci_func *func);
-extern int cpqhp_return_board_resources (struct pci_func *func,
- struct resource_lists *resources);
-extern void cpqhp_destroy_resource_list(struct resource_lists *resources);
-extern int cpqhp_configure_device(struct controller *ctrl,
- struct pci_func *func);
-extern int cpqhp_unconfigure_device(struct pci_func *func);
+int cpqhp_set_irq(u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
+int cpqhp_get_bus_dev(struct controller *ctrl, u8 *bus_num, u8 *dev_num,
+ u8 slot);
+int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug);
+int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func *func);
+int cpqhp_save_used_resources(struct controller *ctrl, struct pci_func *func);
+int cpqhp_configure_board(struct controller *ctrl, struct pci_func *func);
+int cpqhp_save_slot_config(struct controller *ctrl, struct pci_func *new_slot);
+int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func);
+void cpqhp_destroy_board_resources(struct pci_func *func);
+int cpqhp_return_board_resources(struct pci_func *func,
+ struct resource_lists *resources);
+void cpqhp_destroy_resource_list(struct resource_lists *resources);
+int cpqhp_configure_device(struct controller *ctrl, struct pci_func *func);
+int cpqhp_unconfigure_device(struct pci_func *func);
/* Global variables */
extern int cpqhp_debug;
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 4952c3b9379..4aaee746df8 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -57,8 +57,8 @@ struct irq_routing_table *cpqhp_routing_table;
static void __iomem *smbios_table;
static void __iomem *smbios_start;
static void __iomem *cpqhp_rom_start;
-static int power_mode;
-static int debug;
+static bool power_mode;
+static bool debug;
static int initialized;
#define DRIVER_VERSION "0.9.8"
@@ -94,7 +94,7 @@ static inline int is_slot66mhz(struct slot *slot)
*
* Returns pointer to the head of the SMBIOS tables (or %NULL).
*/
-static void __iomem * detect_SMBIOS_pointer(void __iomem *begin, void __iomem *end)
+static void __iomem *detect_SMBIOS_pointer(void __iomem *begin, void __iomem *end)
{
void __iomem *fp;
void __iomem *endp;
@@ -131,7 +131,7 @@ static void __iomem * detect_SMBIOS_pointer(void __iomem *begin, void __iomem *e
*
* For unexpected switch opens
*/
-static int init_SERR(struct controller * ctrl)
+static int init_SERR(struct controller *ctrl)
{
u32 tempdword;
u32 number_of_slots;
@@ -291,7 +291,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
kfree(slot);
}
-static int ctrl_slot_cleanup (struct controller * ctrl)
+static int ctrl_slot_cleanup (struct controller *ctrl)
{
struct slot *old_slot, *next_slot;
@@ -611,7 +611,7 @@ static int ctrl_slot_setup(struct controller *ctrl,
u32 tempdword;
char name[SLOT_NAME_SIZE];
void __iomem *slot_entry= NULL;
- int result = -ENOMEM;
+ int result;
dbg("%s\n", __func__);
@@ -623,19 +623,25 @@ static int ctrl_slot_setup(struct controller *ctrl,
while (number_of_slots) {
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot)
+ if (!slot) {
+ result = -ENOMEM;
goto error;
+ }
slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
GFP_KERNEL);
- if (!slot->hotplug_slot)
+ if (!slot->hotplug_slot) {
+ result = -ENOMEM;
goto error_slot;
+ }
hotplug_slot = slot->hotplug_slot;
hotplug_slot->info = kzalloc(sizeof(*(hotplug_slot->info)),
GFP_KERNEL);
- if (!hotplug_slot->info)
+ if (!hotplug_slot->info) {
+ result = -ENOMEM;
goto error_hpslot;
+ }
hotplug_slot_info = hotplug_slot->info;
slot->ctrl = ctrl;
@@ -700,8 +706,7 @@ static int ctrl_slot_setup(struct controller *ctrl,
hotplug_slot_info->adapter_status =
get_presence_status(ctrl, slot);
- dbg("registering bus %d, dev %d, number %d, "
- "ctrl->slot_device_offset %d, slot %d\n",
+ dbg("registering bus %d, dev %d, number %d, ctrl->slot_device_offset %d, slot %d\n",
slot->bus, slot->device,
slot->number, ctrl->slot_device_offset,
slot_number);
@@ -831,8 +836,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bus = pdev->subordinate;
if (!bus) {
- dev_notice(&pdev->dev, "the device is not a bridge, "
- "skipping\n");
+ dev_notice(&pdev->dev, "the device is not a bridge, skipping\n");
rc = -ENODEV;
goto err_disable_device;
}
@@ -840,8 +844,9 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Need to read VID early b/c it's used to differentiate CPQ and INTC
* discovery
*/
- rc = pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor_id);
- if (rc || ((vendor_id != PCI_VENDOR_ID_COMPAQ) && (vendor_id != PCI_VENDOR_ID_INTEL))) {
+ vendor_id = pdev->vendor;
+ if ((vendor_id != PCI_VENDOR_ID_COMPAQ) &&
+ (vendor_id != PCI_VENDOR_ID_INTEL)) {
err(msg_HPC_non_compaq_or_intel);
rc = -ENODEV;
goto err_disable_device;
@@ -855,10 +860,10 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_disable_device;
}
- /* Check for the proper subsystem ID's
+ /* Check for the proper subsystem IDs
* Intel uses a different SSID programming model than Compaq.
* For Intel, each SSID bit identifies a PHP capability.
- * Also Intel HPC's may have RID=0.
+ * Also Intel HPCs may have RID=0.
*/
if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) {
err(msg_HPC_not_supported);
@@ -868,11 +873,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* TODO: This code can be made to support non-Compaq or Intel
* subsystem IDs
*/
- rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid);
- if (rc) {
- err("%s : pci_read_config_word failed\n", __func__);
- goto err_disable_device;
- }
+ subsystem_vid = pdev->subsystem_vendor;
dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
err(msg_HPC_non_compaq_or_intel);
@@ -887,11 +888,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_disable_device;
}
- rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid);
- if (rc) {
- err("%s : pci_read_config_word failed\n", __func__);
- goto err_free_ctrl;
- }
+ subsystem_deviceid = pdev->subsystem_device;
info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
@@ -921,12 +918,12 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bus->max_bus_speed = PCI_SPEED_100MHz_PCIX;
break;
}
- if (bus_cap & 20) {
+ if (bus_cap & 0x20) {
dbg("bus max supports 66MHz PCI-X\n");
bus->max_bus_speed = PCI_SPEED_66MHz_PCIX;
break;
}
- if (bus_cap & 10) {
+ if (bus_cap & 0x10) {
dbg("bus max supports 66MHz PCI\n");
bus->max_bus_speed = PCI_SPEED_66MHz;
break;
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index e43908d9b5d..bde47fce324 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -39,9 +39,9 @@
#include <linux/kthread.h>
#include "cpqphp.h"
-static u32 configure_new_device(struct controller* ctrl, struct pci_func *func,
+static u32 configure_new_device(struct controller *ctrl, struct pci_func *func,
u8 behind_bridge, struct resource_lists *resources);
-static int configure_new_function(struct controller* ctrl, struct pci_func *func,
+static int configure_new_function(struct controller *ctrl, struct pci_func *func,
u8 behind_bridge, struct resource_lists *resources);
static void interrupt_event_handler(struct controller *ctrl);
@@ -64,7 +64,7 @@ static void long_delay(int delay)
/* FIXME: The following line needs to be somewhere else... */
#define WRONG_BUS_FREQUENCY 0x07
-static u8 handle_switch_change(u8 change, struct controller * ctrl)
+static u8 handle_switch_change(u8 change, struct controller *ctrl)
{
int hp_slot;
u8 rc = 0;
@@ -138,7 +138,7 @@ static struct slot *cpqhp_find_slot(struct controller *ctrl, u8 device)
}
-static u8 handle_presence_change(u16 change, struct controller * ctrl)
+static u8 handle_presence_change(u16 change, struct controller *ctrl)
{
int hp_slot;
u8 rc = 0;
@@ -232,7 +232,7 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
}
-static u8 handle_power_fault(u8 change, struct controller * ctrl)
+static u8 handle_power_fault(u8 change, struct controller *ctrl)
{
int hp_slot;
u8 rc = 0;
@@ -709,7 +709,8 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz
temp = temp->next;
}
- temp->next = max->next;
+ if (temp)
+ temp->next = max->next;
}
max->next = NULL;
@@ -996,7 +997,7 @@ struct pci_func *cpqhp_slot_create(u8 busnumber)
*
* Returns %0 if successful, !0 otherwise.
*/
-static int slot_remove(struct pci_func * old_slot)
+static int slot_remove(struct pci_func *old_slot)
{
struct pci_func *next;
@@ -1108,7 +1109,7 @@ struct pci_func *cpqhp_slot_find(u8 bus, u8 device, u8 index)
/* DJZ: I don't think is_bridge will work as is.
* FIXME */
-static int is_bridge(struct pci_func * func)
+static int is_bridge(struct pci_func *func)
{
/* Check the header type */
if (((func->config_space[0x03] >> 16) & 0xFF) == 0x01)
@@ -1231,7 +1232,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
/* Only if mode change...*/
if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
- ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
+ ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
set_SOGO(ctrl);
wait_for_ctrl_irq(ctrl);
@@ -1624,7 +1625,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
* @replace_flag: whether replacing or adding a new device
* @ctrl: target controller
*/
-static u32 remove_board(struct pci_func * func, u32 replace_flag, struct controller * ctrl)
+static u32 remove_board(struct pci_func *func, u32 replace_flag, struct controller *ctrl)
{
int index;
u8 skip = 0;
@@ -1741,7 +1742,7 @@ static void pushbutton_helper_thread(unsigned long data)
/* this is the main worker thread */
-static int event_thread(void* data)
+static int event_thread(void *data)
{
struct controller *ctrl;
@@ -1828,7 +1829,7 @@ static void interrupt_event_handler(struct controller *ctrl)
if (ctrl->event_queue[loop].event_type == INT_BUTTON_PRESS) {
dbg("button pressed\n");
- } else if (ctrl->event_queue[loop].event_type ==
+ } else if (ctrl->event_queue[loop].event_type ==
INT_BUTTON_CANCEL) {
dbg("button cancel\n");
del_timer(&p_slot->task_event);
@@ -1900,8 +1901,7 @@ static void interrupt_event_handler(struct controller *ctrl)
dbg("power fault\n");
} else {
/* refresh notification */
- if (p_slot)
- update_slot_info(ctrl, p_slot);
+ update_slot_info(ctrl, p_slot);
}
ctrl->event_queue[loop].event_type = 0;
@@ -1992,7 +1992,7 @@ int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func)
u16 temp_word;
u32 tempdword;
int rc;
- struct slot* p_slot;
+ struct slot *p_slot;
int physical_slot = 0;
tempdword = 0;
@@ -2088,7 +2088,7 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
u8 replace_flag;
u32 rc = 0;
unsigned int devfn;
- struct slot* p_slot;
+ struct slot *p_slot;
struct pci_bus *pci_bus = ctrl->pci_bus;
int physical_slot=0;
@@ -2270,8 +2270,8 @@ int cpqhp_hardware_test(struct controller *ctrl, int test_num)
*
* Returns 0 if success.
*/
-static u32 configure_new_device(struct controller * ctrl, struct pci_func * func,
- u8 behind_bridge, struct resource_lists * resources)
+static u32 configure_new_device(struct controller *ctrl, struct pci_func *func,
+ u8 behind_bridge, struct resource_lists *resources)
{
u8 temp_byte, function, max_functions, stop_it;
int rc;
@@ -2412,11 +2412,11 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
if (rc)
return rc;
- /* find range of busses to use */
+ /* find range of buses to use */
dbg("find ranges of buses to use\n");
bus_node = get_max_resource(&(resources->bus_head), 1);
- /* If we don't have any busses to allocate, we can't continue */
+ /* If we don't have any buses to allocate, we can't continue */
if (!bus_node)
return -ENOMEM;
@@ -2520,44 +2520,28 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
/* If we have IO resources copy them and fill in the bridge's
* IO range registers */
- if (io_node) {
- memcpy(hold_IO_node, io_node, sizeof(struct pci_resource));
- io_node->next = NULL;
-
- /* set IO base and Limit registers */
- temp_byte = io_node->base >> 8;
- rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_BASE, temp_byte);
-
- temp_byte = (io_node->base + io_node->length - 1) >> 8;
- rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_LIMIT, temp_byte);
- } else {
- kfree(hold_IO_node);
- hold_IO_node = NULL;
- }
+ memcpy(hold_IO_node, io_node, sizeof(struct pci_resource));
+ io_node->next = NULL;
- /* If we have memory resources copy them and fill in the
- * bridge's memory range registers. Otherwise, fill in the
- * range registers with values that disable them. */
- if (mem_node) {
- memcpy(hold_mem_node, mem_node, sizeof(struct pci_resource));
- mem_node->next = NULL;
+ /* set IO base and Limit registers */
+ temp_byte = io_node->base >> 8;
+ rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_BASE, temp_byte);
- /* set Mem base and Limit registers */
- temp_word = mem_node->base >> 16;
- rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_BASE, temp_word);
+ temp_byte = (io_node->base + io_node->length - 1) >> 8;
+ rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_LIMIT, temp_byte);
- temp_word = (mem_node->base + mem_node->length - 1) >> 16;
- rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word);
- } else {
- temp_word = 0xFFFF;
- rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_BASE, temp_word);
+ /* Copy the memory resources and fill in the bridge's memory
+ * range registers.
+ */
+ memcpy(hold_mem_node, mem_node, sizeof(struct pci_resource));
+ mem_node->next = NULL;
- temp_word = 0x0000;
- rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word);
+ /* set Mem base and Limit registers */
+ temp_word = mem_node->base >> 16;
+ rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_BASE, temp_word);
- kfree(hold_mem_node);
- hold_mem_node = NULL;
- }
+ temp_word = (mem_node->base + mem_node->length - 1) >> 16;
+ rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word);
memcpy(hold_p_mem_node, p_mem_node, sizeof(struct pci_resource));
p_mem_node->next = NULL;
@@ -2627,7 +2611,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
/* Return unused bus resources
* First use the temporary node to store information for
* the board */
- if (hold_bus_node && bus_node && temp_resources.bus_head) {
+ if (bus_node && temp_resources.bus_head) {
hold_bus_node->length = bus_node->base - hold_bus_node->base;
hold_bus_node->next = func->bus_head;
@@ -2751,7 +2735,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
}
/* If we have prefetchable memory space available and there
* is some left at the end, return the unused portion */
- if (hold_p_mem_node && temp_resources.p_mem_head) {
+ if (temp_resources.p_mem_head) {
p_mem_node = do_pre_bridge_resource_split(&(temp_resources.p_mem_head),
&hold_p_mem_node, 0x100000);
@@ -2890,27 +2874,8 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
func->mem_head = mem_node;
} else
return -ENOMEM;
- } else if ((temp_register & 0x0BL) == 0x04) {
- /* Map memory */
- base = temp_register & 0xFFFFFFF0;
- base = ~base + 1;
-
- dbg("CND: length = 0x%x\n", base);
- mem_node = get_resource(&(resources->mem_head), base);
-
- /* allocate the resource to the board */
- if (mem_node) {
- base = mem_node->base;
-
- mem_node->next = func->mem_head;
- func->mem_head = mem_node;
- } else
- return -ENOMEM;
- } else if ((temp_register & 0x0BL) == 0x06) {
- /* Those bits are reserved, we can't handle this */
- return 1;
} else {
- /* Requesting space below 1M */
+ /* Reserved bits or requesting space below 1M */
return NOT_ENOUGH_RESOURCES;
}
@@ -2936,7 +2901,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
/* If this function needs an interrupt and we are behind
* a bridge and the pin is tied to something that's
- * alread mapped, set this one the same */
+ * already mapped, set this one the same */
if (temp_byte && resources->irqs &&
(resources->irqs->valid_INT &
(0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) {
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index 76ba8a1c774..0968a9bcb34 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -34,7 +34,6 @@
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
-#include <linux/init.h>
#include <asm/uaccess.h>
#include "cpqphp.h"
#include "cpqphp_nvram.h"
@@ -108,7 +107,7 @@ static spinlock_t int15_lock;
*/
-static u32 add_byte( u32 **p_buffer, u8 value, u32 *used, u32 *avail)
+static u32 add_byte(u32 **p_buffer, u8 value, u32 *used, u32 *avail)
{
u8 **tByte;
@@ -123,7 +122,7 @@ static u32 add_byte( u32 **p_buffer, u8 value, u32 *used, u32 *avail)
}
-static u32 add_dword( u32 **p_buffer, u32 value, u32 *used, u32 *avail)
+static u32 add_dword(u32 **p_buffer, u32 value, u32 *used, u32 *avail)
{
if ((*used + 4) > *avail)
return(1);
@@ -268,12 +267,12 @@ static u32 store_HRT (void __iomem *rom_start)
ctrl = cpqhp_ctrl_list;
/* The revision of this structure */
- rc = add_byte( &pFill, 1 + ctrl->push_flag, &usedbytes, &available);
+ rc = add_byte(&pFill, 1 + ctrl->push_flag, &usedbytes, &available);
if (rc)
return(rc);
/* The number of controllers */
- rc = add_byte( &pFill, 1, &usedbytes, &available);
+ rc = add_byte(&pFill, 1, &usedbytes, &available);
if (rc)
return(rc);
@@ -283,22 +282,22 @@ static u32 store_HRT (void __iomem *rom_start)
numCtrl++;
/* The bus number */
- rc = add_byte( &pFill, ctrl->bus, &usedbytes, &available);
+ rc = add_byte(&pFill, ctrl->bus, &usedbytes, &available);
if (rc)
return(rc);
/* The device Number */
- rc = add_byte( &pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available);
+ rc = add_byte(&pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available);
if (rc)
return(rc);
/* The function Number */
- rc = add_byte( &pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available);
+ rc = add_byte(&pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available);
if (rc)
return(rc);
/* Skip the number of available entries */
- rc = add_dword( &pFill, 0, &usedbytes, &available);
+ rc = add_dword(&pFill, 0, &usedbytes, &available);
if (rc)
return(rc);
@@ -312,12 +311,12 @@ static u32 store_HRT (void __iomem *rom_start)
loop ++;
/* base */
- rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
+ rc = add_dword(&pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
/* length */
- rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
+ rc = add_dword(&pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -337,12 +336,12 @@ static u32 store_HRT (void __iomem *rom_start)
loop ++;
/* base */
- rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
+ rc = add_dword(&pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
/* length */
- rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
+ rc = add_dword(&pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -362,12 +361,12 @@ static u32 store_HRT (void __iomem *rom_start)
loop ++;
/* base */
- rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
+ rc = add_dword(&pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
/* length */
- rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
+ rc = add_dword(&pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -387,12 +386,12 @@ static u32 store_HRT (void __iomem *rom_start)
loop ++;
/* base */
- rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
+ rc = add_dword(&pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
/* length */
- rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
+ rc = add_dword(&pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
diff --git a/drivers/pci/hotplug/cpqphp_nvram.h b/drivers/pci/hotplug/cpqphp_nvram.h
index e89c0702119..34e4e54fcf1 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.h
+++ b/drivers/pci/hotplug/cpqphp_nvram.h
@@ -30,26 +30,26 @@
#ifndef CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM
-static inline void compaq_nvram_init (void __iomem *rom_start)
+static inline void compaq_nvram_init(void __iomem *rom_start)
{
return;
}
-static inline int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
+static inline int compaq_nvram_load(void __iomem *rom_start, struct controller *ctrl)
{
return 0;
}
-static inline int compaq_nvram_store (void __iomem *rom_start)
+static inline int compaq_nvram_store(void __iomem *rom_start)
{
return 0;
}
#else
-extern void compaq_nvram_init (void __iomem *rom_start);
-extern int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl);
-extern int compaq_nvram_store (void __iomem *rom_start);
+void compaq_nvram_init(void __iomem *rom_start);
+int compaq_nvram_load(void __iomem *rom_start, struct controller *ctrl);
+int compaq_nvram_store(void __iomem *rom_start);
#endif
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index 6173b9a4544..1c8c2f130d3 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -81,12 +81,13 @@ static void __iomem *detect_HRT_floating_pointer(void __iomem *begin, void __iom
}
-int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
+int cpqhp_configure_device (struct controller *ctrl, struct pci_func *func)
{
- unsigned char bus;
struct pci_bus *child;
int num;
+ pci_lock_rescan_remove();
+
if (func->pci_dev == NULL)
func->pci_dev = pci_get_bus_and_slot(func->bus,PCI_DEVFN(func->device, func->function));
@@ -101,35 +102,40 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
func->pci_dev = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, func->function));
if (func->pci_dev == NULL) {
dbg("ERROR: pci_dev still null\n");
- return 0;
+ goto out;
}
}
if (func->pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- pci_read_config_byte(func->pci_dev, PCI_SECONDARY_BUS, &bus);
- child = (struct pci_bus*) pci_add_new_bus(func->pci_dev->bus, (func->pci_dev), bus);
- pci_do_scan_bus(child);
+ pci_hp_add_bridge(func->pci_dev);
+ child = func->pci_dev->subordinate;
+ if (child)
+ pci_bus_add_devices(child);
}
pci_dev_put(func->pci_dev);
+ out:
+ pci_unlock_rescan_remove();
return 0;
}
-int cpqhp_unconfigure_device(struct pci_func* func)
+int cpqhp_unconfigure_device(struct pci_func *func)
{
int j;
dbg("%s: bus/dev/func = %x/%x/%x\n", __func__, func->bus, func->device, func->function);
+ pci_lock_rescan_remove();
for (j=0; j<8 ; j++) {
- struct pci_dev* temp = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, j));
+ struct pci_dev *temp = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, j));
if (temp) {
pci_dev_put(temp);
- pci_remove_bus_device(temp);
+ pci_stop_and_remove_bus_device(temp);
}
}
+ pci_unlock_rescan_remove();
return 0;
}
@@ -197,7 +203,7 @@ int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num)
}
-static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev_num)
+static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 *dev_num)
{
u16 tdevice;
u32 work;
@@ -274,7 +280,7 @@ static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num
}
-int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 slot)
+int cpqhp_get_bus_dev (struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot)
{
/* plain (bridges allowed) */
return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0);
@@ -291,7 +297,7 @@ int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 s
*
* Reads configuration for all slots in a PCI bus and saves info.
*
- * Note: For non-hot plug busses, the slot # saved is the device #
+ * Note: For non-hot plug buses, the slot # saved is the device #
*
* returns 0 if success
*/
@@ -455,11 +461,11 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
* cpqhp_save_slot_config
*
* Saves configuration info for all PCI devices in a given slot
- * including subordinate busses.
+ * including subordinate buses.
*
* returns 0 if success
*/
-int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot)
+int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func *new_slot)
{
long rc;
u8 class_code;
@@ -543,7 +549,7 @@ int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot)
*
* returns 0 if success
*/
-int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
+int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func *func)
{
u8 cloop;
u8 header_type;
@@ -680,7 +686,7 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
*
* returns 0 if success
*/
-int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
+int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func *func)
{
u8 cloop;
u8 header_type;
@@ -943,7 +949,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
*
* returns 0 if success
*/
-int cpqhp_configure_board(struct controller *ctrl, struct pci_func * func)
+int cpqhp_configure_board(struct controller *ctrl, struct pci_func *func)
{
int cloop;
u8 header_type;
@@ -1021,7 +1027,7 @@ int cpqhp_configure_board(struct controller *ctrl, struct pci_func * func)
*
* returns 0 if the board is the same nonzero otherwise
*/
-int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
+int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func)
{
u8 cloop;
u8 header_type;
@@ -1413,7 +1419,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
*
* returns 0 if success
*/
-int cpqhp_return_board_resources(struct pci_func * func, struct resource_lists * resources)
+int cpqhp_return_board_resources(struct pci_func *func, struct resource_lists *resources)
{
int rc = 0;
struct pci_resource *node;
@@ -1469,7 +1475,7 @@ int cpqhp_return_board_resources(struct pci_func * func, struct resource_lists *
*
* Puts node back in the resource list pointed to by head
*/
-void cpqhp_destroy_resource_list (struct resource_lists * resources)
+void cpqhp_destroy_resource_list (struct resource_lists *resources)
{
struct pci_resource *res, *tres;
@@ -1516,7 +1522,7 @@ void cpqhp_destroy_resource_list (struct resource_lists * resources)
*
* Puts node back in the resource list pointed to by head
*/
-void cpqhp_destroy_board_resources (struct pci_func * func)
+void cpqhp_destroy_board_resources (struct pci_func *func)
{
struct pci_resource *res, *tres;
@@ -1556,4 +1562,3 @@ void cpqhp_destroy_board_resources (struct pci_func * func)
kfree(tres);
}
}
-
diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c
index 4cb30447a48..4a392c44e3d 100644
--- a/drivers/pci/hotplug/cpqphp_sysfs.c
+++ b/drivers/pci/hotplug/cpqphp_sysfs.c
@@ -79,7 +79,7 @@ static int show_ctrl (struct controller *ctrl, char *buf)
static int show_dev (struct controller *ctrl, char *buf)
{
- char * out = buf;
+ char *out = buf;
int index;
struct pci_resource *res;
struct pci_func *new_slot;
@@ -167,26 +167,8 @@ exit:
static loff_t lseek(struct file *file, loff_t off, int whence)
{
- struct ctrl_dbg *dbg;
- loff_t new = -1;
-
- mutex_lock(&cpqphp_mutex);
- dbg = file->private_data;
-
- switch (whence) {
- case 0:
- new = off;
- break;
- case 1:
- new = file->f_pos + off;
- break;
- }
- if (new < 0 || new > dbg->size) {
- mutex_unlock(&cpqphp_mutex);
- return -EINVAL;
- }
- mutex_unlock(&cpqphp_mutex);
- return (file->f_pos = new);
+ struct ctrl_dbg *dbg = file->private_data;
+ return fixed_size_llseek(file, off, whence, dbg->size);
}
static ssize_t read(struct file *file, char __user *buf,
diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
deleted file mode 100644
index 17d10e2e8fb..00000000000
--- a/drivers/pci/hotplug/fakephp.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/* Works like the fakephp driver used to, except a little better.
- *
- * - It's possible to remove devices with subordinate busses.
- * - New PCI devices that appear via any method, not just a fakephp triggered
- * rescan, will be noticed.
- * - Devices that are removed via any method, not just a fakephp triggered
- * removal, will also be noticed.
- *
- * Uses nothing from the pci-hotplug subsystem.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/kobject.h>
-#include <linux/sysfs.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include "../pci.h"
-
-struct legacy_slot {
- struct kobject kobj;
- struct pci_dev *dev;
- struct list_head list;
-};
-
-static LIST_HEAD(legacy_list);
-
-static ssize_t legacy_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj);
- strcpy(buf, "1\n");
- return 2;
-}
-
-static void remove_callback(void *data)
-{
- pci_remove_bus_device((struct pci_dev *)data);
-}
-
-static ssize_t legacy_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t len)
-{
- struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj);
- unsigned long val;
-
- if (strict_strtoul(buf, 0, &val) < 0)
- return -EINVAL;
-
- if (val)
- pci_rescan_bus(slot->dev->bus);
- else
- sysfs_schedule_callback(&slot->dev->dev.kobj, remove_callback,
- slot->dev, THIS_MODULE);
- return len;
-}
-
-static struct attribute *legacy_attrs[] = {
- &(struct attribute){ .name = "power", .mode = 0644 },
- NULL,
-};
-
-static void legacy_release(struct kobject *kobj)
-{
- struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj);
-
- pci_dev_put(slot->dev);
- kfree(slot);
-}
-
-static struct kobj_type legacy_ktype = {
- .sysfs_ops = &(const struct sysfs_ops){
- .store = legacy_store, .show = legacy_show
- },
- .release = &legacy_release,
- .default_attrs = legacy_attrs,
-};
-
-static int legacy_add_slot(struct pci_dev *pdev)
-{
- struct legacy_slot *slot = kzalloc(sizeof(*slot), GFP_KERNEL);
-
- if (!slot)
- return -ENOMEM;
-
- if (kobject_init_and_add(&slot->kobj, &legacy_ktype,
- &pci_slots_kset->kobj, "%s",
- dev_name(&pdev->dev))) {
- dev_warn(&pdev->dev, "Failed to created legacy fake slot\n");
- return -EINVAL;
- }
- slot->dev = pci_dev_get(pdev);
-
- list_add(&slot->list, &legacy_list);
-
- return 0;
-}
-
-static int legacy_notify(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct pci_dev *pdev = to_pci_dev(data);
-
- if (action == BUS_NOTIFY_ADD_DEVICE) {
- legacy_add_slot(pdev);
- } else if (action == BUS_NOTIFY_DEL_DEVICE) {
- struct legacy_slot *slot;
-
- list_for_each_entry(slot, &legacy_list, list)
- if (slot->dev == pdev)
- goto found;
-
- dev_warn(&pdev->dev, "Missing legacy fake slot?");
- return -ENODEV;
-found:
- kobject_del(&slot->kobj);
- list_del(&slot->list);
- kobject_put(&slot->kobj);
- }
-
- return 0;
-}
-
-static struct notifier_block legacy_notifier = {
- .notifier_call = legacy_notify
-};
-
-static int __init init_legacy(void)
-{
- struct pci_dev *pdev = NULL;
-
- /* Add existing devices */
- for_each_pci_dev(pdev)
- legacy_add_slot(pdev);
-
- /* Be alerted of any new ones */
- bus_register_notifier(&pci_bus_type, &legacy_notifier);
- return 0;
-}
-module_init(init_legacy);
-
-static void __exit remove_legacy(void)
-{
- struct legacy_slot *slot, *tmp;
-
- bus_unregister_notifier(&pci_bus_type, &legacy_notifier);
-
- list_for_each_entry_safe(slot, tmp, &legacy_list, list) {
- list_del(&slot->list);
- kobject_del(&slot->kobj);
- kobject_put(&slot->kobj);
- }
-}
-module_exit(remove_legacy);
-
-
-MODULE_AUTHOR("Trent Piepho <xyzzy@speakeasy.org>");
-MODULE_DESCRIPTION("Legacy version of the fakephp interface");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h
index a8d391a4957..e3e46a7b3ee 100644
--- a/drivers/pci/hotplug/ibmphp.h
+++ b/drivers/pci/hotplug/ibmphp.h
@@ -59,7 +59,7 @@ extern int ibmphp_debug;
/************************************************************
-* RESOURE TYPE *
+* RESOURCE TYPE *
************************************************************/
#define EBDA_RSRC_TYPE_MASK 0x03
@@ -103,7 +103,7 @@ extern int ibmphp_debug;
//--------------------------------------------------------------
struct rio_table_hdr {
- u8 ver_num;
+ u8 ver_num;
u8 scal_count;
u8 riodev_count;
u16 offset;
@@ -127,7 +127,7 @@ struct scal_detail {
};
//--------------------------------------------------------------
-// RIO DETAIL
+// RIO DETAIL
//--------------------------------------------------------------
struct rio_detail {
@@ -152,7 +152,7 @@ struct opt_rio {
u8 first_slot_num;
u8 middle_num;
struct list_head opt_rio_list;
-};
+};
struct opt_rio_lo {
u8 rio_type;
@@ -161,7 +161,7 @@ struct opt_rio_lo {
u8 middle_num;
u8 pack_count;
struct list_head opt_rio_lo_list;
-};
+};
/****************************************************************
* HPC DESCRIPTOR NODE *
@@ -275,17 +275,17 @@ extern struct list_head ibmphp_slot_head;
* FUNCTION PROTOTYPES *
***********************************************************/
-extern void ibmphp_free_ebda_hpc_queue (void);
-extern int ibmphp_access_ebda (void);
-extern struct slot *ibmphp_get_slot_from_physical_num (u8);
-extern int ibmphp_get_total_hp_slots (void);
-extern void ibmphp_free_ibm_slot (struct slot *);
-extern void ibmphp_free_bus_info_queue (void);
-extern void ibmphp_free_ebda_pci_rsrc_queue (void);
-extern struct bus_info *ibmphp_find_same_bus_num (u32);
-extern int ibmphp_get_bus_index (u8);
-extern u16 ibmphp_get_total_controllers (void);
-extern int ibmphp_register_pci (void);
+void ibmphp_free_ebda_hpc_queue(void);
+int ibmphp_access_ebda(void);
+struct slot *ibmphp_get_slot_from_physical_num(u8);
+int ibmphp_get_total_hp_slots(void);
+void ibmphp_free_ibm_slot(struct slot *);
+void ibmphp_free_bus_info_queue(void);
+void ibmphp_free_ebda_pci_rsrc_queue(void);
+struct bus_info *ibmphp_find_same_bus_num(u32);
+int ibmphp_get_bus_index(u8);
+u16 ibmphp_get_total_controllers(void);
+int ibmphp_register_pci(void);
/* passed parameters */
#define MEM 0
@@ -381,24 +381,24 @@ struct res_needed {
/* functions */
-extern int ibmphp_rsrc_init (void);
-extern int ibmphp_add_resource (struct resource_node *);
-extern int ibmphp_remove_resource (struct resource_node *);
-extern int ibmphp_find_resource (struct bus_node *, u32, struct resource_node **, int);
-extern int ibmphp_check_resource (struct resource_node *, u8);
-extern int ibmphp_remove_bus (struct bus_node *, u8);
-extern void ibmphp_free_resources (void);
-extern int ibmphp_add_pfmem_from_mem (struct resource_node *);
-extern struct bus_node *ibmphp_find_res_bus (u8);
-extern void ibmphp_print_test (void); /* for debugging purposes */
+int ibmphp_rsrc_init(void);
+int ibmphp_add_resource(struct resource_node *);
+int ibmphp_remove_resource(struct resource_node *);
+int ibmphp_find_resource(struct bus_node *, u32, struct resource_node **, int);
+int ibmphp_check_resource(struct resource_node *, u8);
+int ibmphp_remove_bus(struct bus_node *, u8);
+void ibmphp_free_resources(void);
+int ibmphp_add_pfmem_from_mem(struct resource_node *);
+struct bus_node *ibmphp_find_res_bus(u8);
+void ibmphp_print_test(void); /* for debugging purposes */
-extern void ibmphp_hpc_initvars (void);
-extern int ibmphp_hpc_readslot (struct slot *, u8, u8 *);
-extern int ibmphp_hpc_writeslot (struct slot *, u8);
-extern void ibmphp_lock_operations (void);
-extern void ibmphp_unlock_operations (void);
-extern int ibmphp_hpc_start_poll_thread (void);
-extern void ibmphp_hpc_stop_poll_thread (void);
+void ibmphp_hpc_initvars(void);
+int ibmphp_hpc_readslot(struct slot *, u8, u8 *);
+int ibmphp_hpc_writeslot(struct slot *, u8);
+void ibmphp_lock_operations(void);
+void ibmphp_unlock_operations(void);
+int ibmphp_hpc_start_poll_thread(void);
+void ibmphp_hpc_stop_poll_thread(void);
//----------------------------------------------------------------------------
@@ -574,7 +574,7 @@ extern void ibmphp_hpc_stop_poll_thread (void);
#define HPC_CTLR_IRQ_PENDG 0x80
//----------------------------------------------------------------------------
-// HPC_CTLR_WROKING status return codes
+// HPC_CTLR_WORKING status return codes
//----------------------------------------------------------------------------
#define HPC_CTLR_WORKING_NO 0x00
#define HPC_CTLR_WORKING_YES 0x01
@@ -749,11 +749,11 @@ struct controller {
/* Functions */
-extern int ibmphp_init_devno (struct slot **); /* This function is called from EBDA, so we need it not be static */
-extern int ibmphp_do_disable_slot (struct slot *slot_cur);
-extern int ibmphp_update_slot_info (struct slot *); /* This function is called from HPC, so we need it to not be be static */
-extern int ibmphp_configure_card (struct pci_func *, u8);
-extern int ibmphp_unconfigure_card (struct slot **, int);
+int ibmphp_init_devno(struct slot **); /* This function is called from EBDA, so we need it not be static */
+int ibmphp_do_disable_slot(struct slot *slot_cur);
+int ibmphp_update_slot_info(struct slot *); /* This function is called from HPC, so we need it to not be be static */
+int ibmphp_configure_card(struct pci_func *, u8);
+int ibmphp_unconfigure_card(struct slot **, int);
extern struct hotplug_slot_ops ibmphp_hotplug_slot_ops;
#endif //__IBMPHP_H
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index d934dd4fa87..f7b8684a773 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -49,7 +49,7 @@
int ibmphp_debug;
-static int debug;
+static bool debug;
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC (debug, "Debugging mode enabled or not");
MODULE_LICENSE ("GPL");
@@ -58,7 +58,7 @@ MODULE_DESCRIPTION (DRIVER_DESC);
struct pci_bus *ibmphp_pci_bus;
static int max_slots;
-static int irqs[16]; /* PIC mode IRQ's we're using so far (in case MPS
+static int irqs[16]; /* PIC mode IRQs we're using so far (in case MPS
* tables don't provide default info for empty slots */
static int init_flag;
@@ -71,20 +71,20 @@ static inline int get_max_adapter_speed (struct hotplug_slot *hs, u8 *value)
return get_max_adapter_speed_1 (hs, value, 1);
}
*/
-static inline int get_cur_bus_info(struct slot **sl)
+static inline int get_cur_bus_info(struct slot **sl)
{
int rc = 1;
- struct slot * slot_cur = *sl;
+ struct slot *slot_cur = *sl;
debug("options = %x\n", slot_cur->ctrl->options);
- debug("revision = %x\n", slot_cur->ctrl->revision);
+ debug("revision = %x\n", slot_cur->ctrl->revision);
- if (READ_BUS_STATUS(slot_cur->ctrl))
+ if (READ_BUS_STATUS(slot_cur->ctrl))
rc = ibmphp_hpc_readslot(slot_cur, READ_BUSSTATUS, NULL);
-
- if (rc)
+
+ if (rc)
return rc;
-
+
slot_cur->bus_on->current_speed = CURRENT_BUS_SPEED(slot_cur->busstatus);
if (READ_BUS_MODE(slot_cur->ctrl))
slot_cur->bus_on->current_bus_mode =
@@ -96,7 +96,7 @@ static inline int get_cur_bus_info(struct slot **sl)
slot_cur->busstatus,
slot_cur->bus_on->current_speed,
slot_cur->bus_on->current_bus_mode);
-
+
*sl = slot_cur;
return 0;
}
@@ -104,8 +104,8 @@ static inline int get_cur_bus_info(struct slot **sl)
static inline int slot_update(struct slot **sl)
{
int rc;
- rc = ibmphp_hpc_readslot(*sl, READ_ALLSTAT, NULL);
- if (rc)
+ rc = ibmphp_hpc_readslot(*sl, READ_ALLSTAT, NULL);
+ if (rc)
return rc;
if (!init_flag)
rc = get_cur_bus_info(sl);
@@ -114,8 +114,8 @@ static inline int slot_update(struct slot **sl)
static int __init get_max_slots (void)
{
- struct slot * slot_cur;
- struct list_head * tmp;
+ struct slot *slot_cur;
+ struct list_head *tmp;
u8 slot_count = 0;
list_for_each(tmp, &ibmphp_slot_head) {
@@ -172,7 +172,7 @@ int ibmphp_init_devno(struct slot **cur_slot)
debug("(*cur_slot)->irq[3] = %x\n",
(*cur_slot)->irq[3]);
- debug("rtable->exlusive_irqs = %x\n",
+ debug("rtable->exclusive_irqs = %x\n",
rtable->exclusive_irqs);
debug("rtable->slots[loop].irq[0].bitmap = %x\n",
rtable->slots[loop].irq[0].bitmap);
@@ -271,7 +271,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
else
rc = -ENODEV;
}
- } else
+ } else
rc = -ENODEV;
ibmphp_unlock_operations();
@@ -280,7 +280,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
return rc;
}
-static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 * value)
+static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
int rc = -ENODEV;
struct slot *pslot;
@@ -288,7 +288,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 * value)
debug("get_attention_status - Entry hotplug_slot[%lx] pvalue[%lx]\n",
(ulong) hotplug_slot, (ulong) value);
-
+
ibmphp_lock_operations();
if (hotplug_slot) {
pslot = hotplug_slot->private;
@@ -311,7 +311,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 * value)
return rc;
}
-static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 * value)
+static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
int rc = -ENODEV;
struct slot *pslot;
@@ -338,7 +338,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 * value)
}
-static int get_power_status(struct hotplug_slot *hotplug_slot, u8 * value)
+static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
int rc = -ENODEV;
struct slot *pslot;
@@ -364,7 +364,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 * value)
return rc;
}
-static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 * value)
+static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 *value)
{
int rc = -ENODEV;
struct slot *pslot;
@@ -406,14 +406,14 @@ static int get_max_bus_speed(struct slot *slot)
ibmphp_lock_operations();
mode = slot->supported_bus_mode;
- speed = slot->supported_speed;
+ speed = slot->supported_speed;
ibmphp_unlock_operations();
switch (speed) {
case BUS_SPEED_33:
break;
case BUS_SPEED_66:
- if (mode == BUS_MODE_PCIX)
+ if (mode == BUS_MODE_PCIX)
speed += 0x01;
break;
case BUS_SPEED_100:
@@ -433,7 +433,7 @@ static int get_max_bus_speed(struct slot *slot)
}
/*
-static int get_max_adapter_speed_1(struct hotplug_slot *hotplug_slot, u8 * value, u8 flag)
+static int get_max_adapter_speed_1(struct hotplug_slot *hotplug_slot, u8 *value, u8 flag)
{
int rc = -ENODEV;
struct slot *pslot;
@@ -471,7 +471,7 @@ static int get_max_adapter_speed_1(struct hotplug_slot *hotplug_slot, u8 * value
return rc;
}
-static int get_bus_name(struct hotplug_slot *hotplug_slot, char * value)
+static int get_bus_name(struct hotplug_slot *hotplug_slot, char *value)
{
int rc = -ENODEV;
struct slot *pslot = NULL;
@@ -515,13 +515,13 @@ static int __init init_ops(void)
debug("BEFORE GETTING SLOT STATUS, slot # %x\n",
slot_cur->number);
- if (slot_cur->ctrl->revision == 0xFF)
+ if (slot_cur->ctrl->revision == 0xFF)
if (get_ctrl_revision(slot_cur,
&slot_cur->ctrl->revision))
return -1;
- if (slot_cur->bus_on->current_speed == 0xFF)
- if (get_cur_bus_info(&slot_cur))
+ if (slot_cur->bus_on->current_speed == 0xFF)
+ if (get_cur_bus_info(&slot_cur))
return -1;
get_max_bus_speed(slot_cur);
@@ -539,8 +539,8 @@ static int __init init_ops(void)
debug("SLOT_PRESENT = %x\n", SLOT_PRESENT(slot_cur->status));
debug("SLOT_LATCH = %x\n", SLOT_LATCH(slot_cur->status));
- if ((SLOT_PWRGD(slot_cur->status)) &&
- !(SLOT_PRESENT(slot_cur->status)) &&
+ if ((SLOT_PWRGD(slot_cur->status)) &&
+ !(SLOT_PRESENT(slot_cur->status)) &&
!(SLOT_LATCH(slot_cur->status))) {
debug("BEFORE POWER OFF COMMAND\n");
rc = power_off(slot_cur);
@@ -581,13 +581,13 @@ static int validate(struct slot *slot_cur, int opn)
switch (opn) {
case ENABLE:
- if (!(SLOT_PWRGD(slot_cur->status)) &&
- (SLOT_PRESENT(slot_cur->status)) &&
+ if (!(SLOT_PWRGD(slot_cur->status)) &&
+ (SLOT_PRESENT(slot_cur->status)) &&
!(SLOT_LATCH(slot_cur->status)))
return 0;
break;
case DISABLE:
- if ((SLOT_PWRGD(slot_cur->status)) &&
+ if ((SLOT_PWRGD(slot_cur->status)) &&
(SLOT_PRESENT(slot_cur->status)) &&
!(SLOT_LATCH(slot_cur->status)))
return 0;
@@ -617,7 +617,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur)
err("out of system memory\n");
return -ENOMEM;
}
-
+
info->power_status = SLOT_PWRGD(slot_cur->status);
info->attention_status = SLOT_ATTN(slot_cur->status,
slot_cur->ext_status);
@@ -638,7 +638,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur)
case BUS_SPEED_33:
break;
case BUS_SPEED_66:
- if (mode == BUS_MODE_PCIX)
+ if (mode == BUS_MODE_PCIX)
bus_speed += 0x01;
else if (mode == BUS_MODE_PCI)
;
@@ -654,8 +654,8 @@ int ibmphp_update_slot_info(struct slot *slot_cur)
}
bus->cur_bus_speed = bus_speed;
- // To do: bus_names
-
+ // To do: bus_names
+
rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info);
kfree(info);
return rc;
@@ -671,7 +671,7 @@ static struct pci_func *ibm_slot_find(u8 busno, u8 device, u8 function)
{
struct pci_func *func_cur;
struct slot *slot_cur;
- struct list_head * tmp;
+ struct list_head *tmp;
list_for_each(tmp, &ibmphp_slot_head) {
slot_cur = list_entry(tmp, struct slot, ibm_slot_list);
if (slot_cur->func) {
@@ -696,8 +696,8 @@ static struct pci_func *ibm_slot_find(u8 busno, u8 device, u8 function)
static void free_slots(void)
{
struct slot *slot_cur;
- struct list_head * tmp;
- struct list_head * next;
+ struct list_head *tmp;
+ struct list_head *next;
debug("%s -- enter\n", __func__);
@@ -718,19 +718,24 @@ static void ibm_unconfigure_device(struct pci_func *func)
func->device, func->function);
debug("func->device << 3 | 0x0 = %x\n", func->device << 3 | 0x0);
+ pci_lock_rescan_remove();
+
for (j = 0; j < 0x08; j++) {
temp = pci_get_bus_and_slot(func->busno, (func->device << 3) | j);
if (temp) {
- pci_remove_bus_device(temp);
+ pci_stop_and_remove_bus_device(temp);
pci_dev_put(temp);
}
}
+
pci_dev_put(func->dev);
+
+ pci_unlock_rescan_remove();
}
/*
- * The following function is to fix kernel bug regarding
- * getting bus entries, here we manually add those primary
+ * The following function is to fix kernel bug regarding
+ * getting bus entries, here we manually add those primary
* bus entries to kernel bus structure whenever apply
*/
static u8 bus_structure_fixup(u8 busno)
@@ -760,7 +765,7 @@ static u8 bus_structure_fixup(u8 busno)
for (dev->devfn = 0; dev->devfn < 256; dev->devfn += 8) {
if (!pci_read_config_word(dev, PCI_VENDOR_ID, &l) &&
(l != 0x0000) && (l != 0xffff)) {
- debug("%s - Inside bus_struture_fixup()\n",
+ debug("%s - Inside bus_structure_fixup()\n",
__func__);
pci_scan_bus(busno, ibmphp_pci_bus->ops, NULL);
break;
@@ -775,12 +780,13 @@ static u8 bus_structure_fixup(u8 busno)
static int ibm_configure_device(struct pci_func *func)
{
- unsigned char bus;
struct pci_bus *child;
int num;
int flag = 0; /* this is to make sure we don't double scan the bus,
for bridged devices primarily */
+ pci_lock_rescan_remove();
+
if (!(bus_structure_fixup(func->busno)))
flag = 1;
if (func->dev == NULL)
@@ -790,7 +796,7 @@ static int ibm_configure_device(struct pci_func *func)
if (func->dev == NULL) {
struct pci_bus *bus = pci_find_bus(0, func->busno);
if (!bus)
- return 0;
+ goto out;
num = pci_scan_slot(bus,
PCI_DEVFN(func->device, func->function));
@@ -801,25 +807,28 @@ static int ibm_configure_device(struct pci_func *func)
PCI_DEVFN(func->device, func->function));
if (func->dev == NULL) {
err("ERROR... : pci_dev still NULL\n");
- return 0;
+ goto out;
}
}
if (!(flag) && (func->dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) {
- pci_read_config_byte(func->dev, PCI_SECONDARY_BUS, &bus);
- child = pci_add_new_bus(func->dev->bus, func->dev, bus);
- pci_do_scan_bus(child);
+ pci_hp_add_bridge(func->dev);
+ child = func->dev->subordinate;
+ if (child)
+ pci_bus_add_devices(child);
}
+ out:
+ pci_unlock_rescan_remove();
return 0;
}
/*******************************************************
- * Returns whether the bus is empty or not
+ * Returns whether the bus is empty or not
*******************************************************/
-static int is_bus_empty(struct slot * slot_cur)
+static int is_bus_empty(struct slot *slot_cur)
{
int rc;
- struct slot * tmp_slot;
+ struct slot *tmp_slot;
u8 i = slot_cur->bus_on->slot_min;
while (i <= slot_cur->bus_on->slot_max) {
@@ -842,12 +851,12 @@ static int is_bus_empty(struct slot * slot_cur)
}
/***********************************************************
- * If the HPC permits and the bus currently empty, tries to set the
+ * If the HPC permits and the bus currently empty, tries to set the
* bus speed and mode at the maximum card and bus capability
* Parameters: slot
* Returns: bus is set (0) or error code
***********************************************************/
-static int set_bus(struct slot * slot_cur)
+static int set_bus(struct slot *slot_cur)
{
int rc;
u8 speed;
@@ -856,7 +865,7 @@ static int set_bus(struct slot * slot_cur)
static struct pci_device_id ciobx[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, 0x0101) },
{ },
- };
+ };
debug("%s - entry slot # %d\n", __func__, slot_cur->number);
if (SET_BUS_STATUS(slot_cur->ctrl) && is_bus_empty(slot_cur)) {
@@ -877,7 +886,7 @@ static int set_bus(struct slot * slot_cur)
else if (!SLOT_BUS_MODE(slot_cur->ext_status))
/* if max slot/bus capability is 66 pci
and there's no bus mode mismatch, then
- the adapter supports 66 pci */
+ the adapter supports 66 pci */
cmd = HPC_BUS_66CONVMODE;
else
cmd = HPC_BUS_33CONVMODE;
@@ -930,7 +939,7 @@ static int set_bus(struct slot * slot_cur)
return -EIO;
}
}
- /* This is for x440, once Brandon fixes the firmware,
+ /* This is for x440, once Brandon fixes the firmware,
will not need this delay */
msleep(1000);
debug("%s -Exit\n", __func__);
@@ -938,16 +947,16 @@ static int set_bus(struct slot * slot_cur)
}
/* This routine checks the bus limitations that the slot is on from the BIOS.
- * This is used in deciding whether or not to power up the slot.
+ * This is used in deciding whether or not to power up the slot.
* (electrical/spec limitations. For example, >1 133 MHz or >2 66 PCI cards on
- * same bus)
+ * same bus)
* Parameters: slot
* Returns: 0 = no limitations, -EINVAL = exceeded limitations on the bus
*/
static int check_limitations(struct slot *slot_cur)
{
u8 i;
- struct slot * tmp_slot;
+ struct slot *tmp_slot;
u8 count = 0;
u8 limitation = 0;
@@ -986,7 +995,7 @@ static int check_limitations(struct slot *slot_cur)
static inline void print_card_capability(struct slot *slot_cur)
{
info("capability of the card is ");
- if ((slot_cur->ext_status & CARD_INFO) == PCIX133)
+ if ((slot_cur->ext_status & CARD_INFO) == PCIX133)
info(" 133 MHz PCI-X\n");
else if ((slot_cur->ext_status & CARD_INFO) == PCIX66)
info(" 66 MHz PCI-X\n");
@@ -1020,7 +1029,7 @@ static int enable_slot(struct hotplug_slot *hs)
}
attn_LED_blink(slot_cur);
-
+
rc = set_bus(slot_cur);
if (rc) {
err("was not able to set the bus\n");
@@ -1036,8 +1045,7 @@ static int enable_slot(struct hotplug_slot *hs)
rc = check_limitations(slot_cur);
if (rc) {
err("Adding this card exceeds the limitations of this bus.\n");
- err("(i.e., >1 133MHz cards running on same bus, or "
- ">2 66 PCI cards running on same bus.\n");
+ err("(i.e., >1 133MHz cards running on same bus, or >2 66 PCI cards running on same bus.\n");
err("Try hot-adding into another bus\n");
rc = -EINVAL;
goto error_nopower;
@@ -1061,12 +1069,10 @@ static int enable_slot(struct hotplug_slot *hs)
!(SLOT_PWRGD(slot_cur->status)))
err("power fault occurred trying to power up\n");
else if (SLOT_BUS_SPEED(slot_cur->status)) {
- err("bus speed mismatch occurred. please check "
- "current bus speed and card capability\n");
+ err("bus speed mismatch occurred. please check current bus speed and card capability\n");
print_card_capability(slot_cur);
} else if (SLOT_BUS_MODE(slot_cur->ext_status)) {
- err("bus mode mismatch occurred. please check "
- "current bus mode and card capability\n");
+ err("bus mode mismatch occurred. please check current bus mode and card capability\n");
print_card_capability(slot_cur);
}
ibmphp_update_slot_info(slot_cur);
@@ -1082,18 +1088,17 @@ static int enable_slot(struct hotplug_slot *hs)
rc = slot_update(&slot_cur);
if (rc)
goto error_power;
-
+
rc = -EINVAL;
if (SLOT_POWER(slot_cur->status) && !(SLOT_PWRGD(slot_cur->status))) {
err("power fault occurred trying to power up...\n");
goto error_power;
}
if (SLOT_POWER(slot_cur->status) && (SLOT_BUS_SPEED(slot_cur->status))) {
- err("bus speed mismatch occurred. please check current bus "
- "speed and card capability\n");
+ err("bus speed mismatch occurred. please check current bus speed and card capability\n");
print_card_capability(slot_cur);
goto error_power;
- }
+ }
/* Don't think this case will happen after above checks...
* but just in case, for paranoia sake */
if (!(SLOT_POWER(slot_cur->status))) {
@@ -1144,7 +1149,7 @@ static int enable_slot(struct hotplug_slot *hs)
ibmphp_print_test();
rc = ibmphp_update_slot_info(slot_cur);
exit:
- ibmphp_unlock_operations();
+ ibmphp_unlock_operations();
return rc;
error_nopower:
@@ -1180,7 +1185,7 @@ static int ibmphp_disable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
int rc;
-
+
ibmphp_lock_operations();
rc = ibmphp_do_disable_slot(slot);
ibmphp_unlock_operations();
@@ -1192,12 +1197,12 @@ int ibmphp_do_disable_slot(struct slot *slot_cur)
int rc;
u8 flag;
- debug("DISABLING SLOT...\n");
-
+ debug("DISABLING SLOT...\n");
+
if ((slot_cur == NULL) || (slot_cur->ctrl == NULL)) {
return -ENODEV;
}
-
+
flag = slot_cur->flag;
slot_cur->flag = 1;
@@ -1210,7 +1215,7 @@ int ibmphp_do_disable_slot(struct slot *slot_cur)
attn_LED_blink(slot_cur);
if (slot_cur->func == NULL) {
- /* We need this for fncs's that were there on bootup */
+ /* We need this for functions that were there on bootup */
slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL);
if (!slot_cur->func) {
err("out of system memory\n");
@@ -1222,12 +1227,13 @@ int ibmphp_do_disable_slot(struct slot *slot_cur)
}
ibm_unconfigure_device(slot_cur->func);
-
- /* If we got here from latch suddenly opening on operating card or
- a power fault, there's no power to the card, so cannot
- read from it to determine what resources it occupied. This operation
- is forbidden anyhow. The best we can do is remove it from kernel
- lists at least */
+
+ /*
+ * If we got here from latch suddenly opening on operating card or
+ * a power fault, there's no power to the card, so cannot
+ * read from it to determine what resources it occupied. This operation
+ * is forbidden anyhow. The best we can do is remove it from kernel
+ * lists at least */
if (!flag) {
attn_off(slot_cur);
@@ -1264,7 +1270,7 @@ error:
rc = -EFAULT;
goto exit;
}
- if (flag)
+ if (flag)
ibmphp_update_slot_info(slot_cur);
goto exit;
}
@@ -1339,7 +1345,7 @@ static int __init ibmphp_init(void)
debug("AFTER Resource & EBDA INITIALIZATIONS\n");
max_slots = get_max_slots();
-
+
if ((rc = ibmphp_register_pci()))
goto error;
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 5becbdee402..0f65ac55543 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -123,7 +123,7 @@ static struct ebda_pci_rsrc *alloc_ebda_pci_rsrc (void)
static void __init print_bus_info (void)
{
struct bus_info *ptr;
-
+
list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
debug ("%s - slot_min = %x\n", __func__, ptr->slot_min);
debug ("%s - slot_max = %x\n", __func__, ptr->slot_max);
@@ -131,7 +131,7 @@ static void __init print_bus_info (void)
debug ("%s - bus# = %x\n", __func__, ptr->busno);
debug ("%s - current_speed = %x\n", __func__, ptr->current_speed);
debug ("%s - controller_id = %x\n", __func__, ptr->controller_id);
-
+
debug ("%s - slots_at_33_conv = %x\n", __func__, ptr->slots_at_33_conv);
debug ("%s - slots_at_66_conv = %x\n", __func__, ptr->slots_at_66_conv);
debug ("%s - slots_at_66_pcix = %x\n", __func__, ptr->slots_at_66_pcix);
@@ -144,7 +144,7 @@ static void __init print_bus_info (void)
static void print_lo_info (void)
{
struct rio_detail *ptr;
- debug ("print_lo_info ----\n");
+ debug ("print_lo_info ----\n");
list_for_each_entry(ptr, &rio_lo_head, rio_detail_list) {
debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id);
debug ("%s - rio_type = %x\n", __func__, ptr->rio_type);
@@ -176,7 +176,7 @@ static void __init print_ebda_pci_rsrc (void)
struct ebda_pci_rsrc *ptr;
list_for_each_entry(ptr, &ibmphp_ebda_pci_rsrc_head, ebda_pci_rsrc_list) {
- debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
+ debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
__func__, ptr->rsrc_type ,ptr->bus_num, ptr->dev_fun,ptr->start_addr, ptr->end_addr);
}
}
@@ -259,7 +259,7 @@ int __init ibmphp_access_ebda (void)
ebda_seg = readw (io_mem);
iounmap (io_mem);
debug ("returned ebda segment: %x\n", ebda_seg);
-
+
io_mem = ioremap(ebda_seg<<4, 1);
if (!io_mem)
return -ENOMEM;
@@ -276,6 +276,12 @@ int __init ibmphp_access_ebda (void)
for (;;) {
offset = next_offset;
+
+ /* Make sure what we read is still in the mapped section */
+ if (WARN(offset > (ebda_sz * 1024 - 4),
+ "ibmphp_ebda: next read is beyond ebda_sz\n"))
+ break;
+
next_offset = readw (io_mem + offset); /* offset of next blk */
offset += 2;
@@ -304,7 +310,7 @@ int __init ibmphp_access_ebda (void)
re = readw (io_mem + sub_addr); /* next sub blk */
sub_addr += 2;
- rc_id = readw (io_mem + sub_addr); /* sub blk id */
+ rc_id = readw (io_mem + sub_addr); /* sub blk id */
sub_addr += 2;
if (rc_id != 0x5243)
@@ -324,7 +330,7 @@ int __init ibmphp_access_ebda (void)
debug ("info about hpc descriptor---\n");
debug ("hot blk format: %x\n", format);
debug ("num of controller: %x\n", num_ctlrs);
- debug ("offset of hpc data structure enteries: %x\n ", sub_addr);
+ debug ("offset of hpc data structure entries: %x\n ", sub_addr);
sub_addr = base + re; /* re sub blk */
/* FIXME: rc is never used/checked */
@@ -353,7 +359,7 @@ int __init ibmphp_access_ebda (void)
debug ("info about rsrc descriptor---\n");
debug ("format: %x\n", format);
debug ("num of rsrc: %x\n", num_entries);
- debug ("offset of rsrc data structure enteries: %x\n ", sub_addr);
+ debug ("offset of rsrc data structure entries: %x\n ", sub_addr);
hs_complete = 1;
} else {
@@ -362,13 +368,15 @@ int __init ibmphp_access_ebda (void)
debug ("rio blk id: %x\n", blk_id);
rio_table_ptr = kzalloc(sizeof(struct rio_table_hdr), GFP_KERNEL);
- if (!rio_table_ptr)
- return -ENOMEM;
+ if (!rio_table_ptr) {
+ rc = -ENOMEM;
+ goto out;
+ }
rio_table_ptr->ver_num = readb (io_mem + offset);
rio_table_ptr->scal_count = readb (io_mem + offset + 1);
rio_table_ptr->riodev_count = readb (io_mem + offset + 2);
rio_table_ptr->offset = offset +3 ;
-
+
debug("info about rio table hdr ---\n");
debug("ver_num: %x\nscal_count: %x\nriodev_count: %x\noffset of rio table: %x\n ",
rio_table_ptr->ver_num, rio_table_ptr->scal_count,
@@ -432,12 +440,12 @@ static int __init ebda_rio_table (void)
rio_detail_ptr->chassis_num = readb (io_mem + offset + 14);
// debug ("rio_node_id: %x\nbbar: %x\nrio_type: %x\nowner_id: %x\nport0_node: %x\nport0_port: %x\nport1_node: %x\nport1_port: %x\nfirst_slot_num: %x\nstatus: %x\n", rio_detail_ptr->rio_node_id, rio_detail_ptr->bbar, rio_detail_ptr->rio_type, rio_detail_ptr->owner_id, rio_detail_ptr->port0_node_connect, rio_detail_ptr->port0_port_connect, rio_detail_ptr->port1_node_connect, rio_detail_ptr->port1_port_connect, rio_detail_ptr->first_slot_num, rio_detail_ptr->status);
//create linked list of chassis
- if (rio_detail_ptr->rio_type == 4 || rio_detail_ptr->rio_type == 5)
+ if (rio_detail_ptr->rio_type == 4 || rio_detail_ptr->rio_type == 5)
list_add (&rio_detail_ptr->rio_detail_list, &rio_vg_head);
- //create linked list of expansion box
- else if (rio_detail_ptr->rio_type == 6 || rio_detail_ptr->rio_type == 7)
+ //create linked list of expansion box
+ else if (rio_detail_ptr->rio_type == 6 || rio_detail_ptr->rio_type == 7)
list_add (&rio_detail_ptr->rio_detail_list, &rio_lo_head);
- else
+ else
// not in my concern
kfree (rio_detail_ptr);
offset += 15;
@@ -448,7 +456,7 @@ static int __init ebda_rio_table (void)
}
/*
- * reorganizing linked list of chassis
+ * reorganizing linked list of chassis
*/
static struct opt_rio *search_opt_vg (u8 chassis_num)
{
@@ -456,7 +464,7 @@ static struct opt_rio *search_opt_vg (u8 chassis_num)
list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) {
if (ptr->chassis_num == chassis_num)
return ptr;
- }
+ }
return NULL;
}
@@ -464,7 +472,7 @@ static int __init combine_wpg_for_chassis (void)
{
struct opt_rio *opt_rio_ptr = NULL;
struct rio_detail *rio_detail_ptr = NULL;
-
+
list_for_each_entry(rio_detail_ptr, &rio_vg_head, rio_detail_list) {
opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num);
if (!opt_rio_ptr) {
@@ -476,14 +484,14 @@ static int __init combine_wpg_for_chassis (void)
opt_rio_ptr->first_slot_num = rio_detail_ptr->first_slot_num;
opt_rio_ptr->middle_num = rio_detail_ptr->first_slot_num;
list_add (&opt_rio_ptr->opt_rio_list, &opt_vg_head);
- } else {
+ } else {
opt_rio_ptr->first_slot_num = min (opt_rio_ptr->first_slot_num, rio_detail_ptr->first_slot_num);
opt_rio_ptr->middle_num = max (opt_rio_ptr->middle_num, rio_detail_ptr->first_slot_num);
- }
+ }
}
print_opt_vg ();
- return 0;
-}
+ return 0;
+}
/*
* reorganizing linked list of expansion box
@@ -494,7 +502,7 @@ static struct opt_rio_lo *search_opt_lo (u8 chassis_num)
list_for_each_entry(ptr, &opt_lo_head, opt_rio_lo_list) {
if (ptr->chassis_num == chassis_num)
return ptr;
- }
+ }
return NULL;
}
@@ -502,7 +510,7 @@ static int combine_wpg_for_expansion (void)
{
struct opt_rio_lo *opt_rio_lo_ptr = NULL;
struct rio_detail *rio_detail_ptr = NULL;
-
+
list_for_each_entry(rio_detail_ptr, &rio_lo_head, rio_detail_list) {
opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num);
if (!opt_rio_lo_ptr) {
@@ -514,22 +522,22 @@ static int combine_wpg_for_expansion (void)
opt_rio_lo_ptr->first_slot_num = rio_detail_ptr->first_slot_num;
opt_rio_lo_ptr->middle_num = rio_detail_ptr->first_slot_num;
opt_rio_lo_ptr->pack_count = 1;
-
+
list_add (&opt_rio_lo_ptr->opt_rio_lo_list, &opt_lo_head);
- } else {
+ } else {
opt_rio_lo_ptr->first_slot_num = min (opt_rio_lo_ptr->first_slot_num, rio_detail_ptr->first_slot_num);
opt_rio_lo_ptr->middle_num = max (opt_rio_lo_ptr->middle_num, rio_detail_ptr->first_slot_num);
opt_rio_lo_ptr->pack_count = 2;
- }
+ }
}
- return 0;
+ return 0;
}
-
+
/* Since we don't know the max slot number per each chassis, hence go
* through the list of all chassis to find out the range
- * Arguments: slot_num, 1st slot number of the chassis we think we are on,
- * var (0 = chassis, 1 = expansion box)
+ * Arguments: slot_num, 1st slot number of the chassis we think we are on,
+ * var (0 = chassis, 1 = expansion box)
*/
static int first_slot_num (u8 slot_num, u8 first_slot, u8 var)
{
@@ -539,7 +547,7 @@ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var)
if (!var) {
list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) {
- if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) {
+ if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) {
rc = -ENODEV;
break;
}
@@ -555,25 +563,25 @@ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var)
return rc;
}
-static struct opt_rio_lo * find_rxe_num (u8 slot_num)
+static struct opt_rio_lo *find_rxe_num (u8 slot_num)
{
struct opt_rio_lo *opt_lo_ptr;
list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) {
//check to see if this slot_num belongs to expansion box
- if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1)))
+ if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1)))
return opt_lo_ptr;
}
return NULL;
}
-static struct opt_rio * find_chassis_num (u8 slot_num)
+static struct opt_rio *find_chassis_num (u8 slot_num)
{
struct opt_rio *opt_vg_ptr;
list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) {
- //check to see if this slot_num belongs to chassis
- if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0)))
+ //check to see if this slot_num belongs to chassis
+ if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0)))
return opt_vg_ptr;
}
return NULL;
@@ -585,21 +593,21 @@ static struct opt_rio * find_chassis_num (u8 slot_num)
static u8 calculate_first_slot (u8 slot_num)
{
u8 first_slot = 1;
- struct slot * slot_cur;
-
+ struct slot *slot_cur;
+
list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) {
if (slot_cur->ctrl) {
- if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num))
+ if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num))
first_slot = slot_cur->ctrl->ending_slot_num;
}
- }
+ }
return first_slot + 1;
}
#define SLOT_NAME_SIZE 30
-static char *create_file_name (struct slot * slot_cur)
+static char *create_file_name (struct slot *slot_cur)
{
struct opt_rio *opt_vg_ptr = NULL;
struct opt_rio_lo *opt_lo_ptr = NULL;
@@ -614,11 +622,11 @@ static char *create_file_name (struct slot * slot_cur)
err ("Structure passed is empty\n");
return NULL;
}
-
+
slot_num = slot_cur->number;
memset (str, 0, sizeof(str));
-
+
if (rio_table_ptr) {
if (rio_table_ptr->ver_num == 3) {
opt_vg_ptr = find_chassis_num (slot_num);
@@ -652,7 +660,7 @@ static char *create_file_name (struct slot * slot_cur)
/* if both NULL and we DO have correct RIO table in BIOS */
return NULL;
}
- }
+ }
if (!flag) {
if (slot_cur->ctrl->ctlr_type == 4) {
first_slot = calculate_first_slot (slot_num);
@@ -776,7 +784,7 @@ static int __init ebda_rsrc_controller (void)
hpc_ptr->ctlr_relative_id = ctlr;
hpc_ptr->slot_count = slot_num;
hpc_ptr->bus_count = bus_num;
- debug ("now enter ctlr data struture ---\n");
+ debug ("now enter ctlr data structure ---\n");
debug ("ctlr id: %x\n", ctlr_id);
debug ("ctlr_relative_id: %x\n", hpc_ptr->ctlr_relative_id);
debug ("count of slots controlled by this ctlr: %x\n", slot_num);
@@ -790,7 +798,7 @@ static int __init ebda_rsrc_controller (void)
slot_ptr->ctl_index = readb (io_mem + addr_slot + 2*slot_num);
slot_ptr->slot_cap = readb (io_mem + addr_slot + 3*slot_num);
- // create bus_info lined list --- if only one slot per bus: slot_min = slot_max
+ // create bus_info lined list --- if only one slot per bus: slot_min = slot_max
bus_info_ptr2 = ibmphp_find_same_bus_num (slot_ptr->slot_bus_num);
if (!bus_info_ptr2) {
@@ -806,9 +814,9 @@ static int __init ebda_rsrc_controller (void)
bus_info_ptr1->index = bus_index++;
bus_info_ptr1->current_speed = 0xff;
bus_info_ptr1->current_bus_mode = 0xff;
-
+
bus_info_ptr1->controller_id = hpc_ptr->ctlr_id;
-
+
list_add_tail (&bus_info_ptr1->bus_info_list, &bus_info_head);
} else {
@@ -843,7 +851,7 @@ static int __init ebda_rsrc_controller (void)
bus_info_ptr2->slots_at_66_conv = bus_ptr->slots_at_66_conv;
bus_info_ptr2->slots_at_66_pcix = bus_ptr->slots_at_66_pcix;
bus_info_ptr2->slots_at_100_pcix = bus_ptr->slots_at_100_pcix;
- bus_info_ptr2->slots_at_133_pcix = bus_ptr->slots_at_133_pcix;
+ bus_info_ptr2->slots_at_133_pcix = bus_ptr->slots_at_133_pcix;
}
bus_ptr++;
}
@@ -856,7 +864,7 @@ static int __init ebda_rsrc_controller (void)
hpc_ptr->u.pci_ctlr.dev_fun = readb (io_mem + addr + 1);
hpc_ptr->irq = readb (io_mem + addr + 2);
addr += 3;
- debug ("ctrl bus = %x, ctlr devfun = %x, irq = %x\n",
+ debug ("ctrl bus = %x, ctlr devfun = %x, irq = %x\n",
hpc_ptr->u.pci_ctlr.bus,
hpc_ptr->u.pci_ctlr.dev_fun, hpc_ptr->irq);
break;
@@ -924,7 +932,7 @@ static int __init ebda_rsrc_controller (void)
tmp_slot->supported_speed = 2;
else if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_66_MAX) == EBDA_SLOT_66_MAX)
tmp_slot->supported_speed = 1;
-
+
if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_PCIX_CAP) == EBDA_SLOT_PCIX_CAP)
tmp_slot->supported_bus_mode = 1;
else
@@ -992,7 +1000,7 @@ error_no_hpc:
return rc;
}
-/*
+/*
* map info (bus, devfun, start addr, end addr..) of i/o, memory,
* pfm from the physical addr to a list of resource.
*/
@@ -1049,7 +1057,7 @@ static int __init ebda_rsrc_rsrc (void)
addr += 10;
debug ("rsrc from mem or pfm ---\n");
- debug ("rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
+ debug ("rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
rsrc_ptr->rsrc_type, rsrc_ptr->bus_num, rsrc_ptr->dev_fun, rsrc_ptr->start_addr, rsrc_ptr->end_addr);
list_add (&rsrc_ptr->ebda_pci_rsrc_list, &ibmphp_ebda_pci_rsrc_head);
@@ -1088,7 +1096,7 @@ struct bus_info *ibmphp_find_same_bus_num (u32 num)
struct bus_info *ptr;
list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
- if (ptr->busno == num)
+ if (ptr->busno == num)
return ptr;
}
return NULL;
@@ -1102,7 +1110,7 @@ int ibmphp_get_bus_index (u8 num)
struct bus_info *ptr;
list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
- if (ptr->busno == num)
+ if (ptr->busno == num)
return ptr->index;
}
return -ENODEV;
@@ -1160,7 +1168,7 @@ static struct pci_device_id id_table[] = {
.subdevice = HPC_SUBSYSTEM_ID,
.class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00),
}, {}
-};
+};
MODULE_DEVICE_TABLE(pci, id_table);
@@ -1184,12 +1192,12 @@ int ibmphp_register_pci (void)
}
return rc;
}
-static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids)
+static int ibmphp_probe (struct pci_dev *dev, const struct pci_device_id *ids)
{
struct controller *ctrl;
debug ("inside ibmphp_probe\n");
-
+
list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) {
if (ctrl->ctlr_type == 1) {
if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) {
@@ -1202,4 +1210,3 @@ static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids)
}
return -ENODEV;
}
-
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index f59ed30512b..a936022956e 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -258,7 +258,7 @@ static u8 i2c_ctrl_write (struct controller *ctlr_ptr, void __iomem *WPGBbar, u8
{
u8 rc;
void __iomem *wpg_addr; // base addr + offset
- unsigned long wpg_data; // data to/from WPG LOHI format
+ unsigned long wpg_data; // data to/from WPG LOHI format
unsigned long ultemp;
unsigned long data; // actual data HILO format
int i;
@@ -351,7 +351,7 @@ static u8 i2c_ctrl_write (struct controller *ctlr_ptr, void __iomem *WPGBbar, u8
}
//------------------------------------------------------------
-// Read from ISA type HPC
+// Read from ISA type HPC
//------------------------------------------------------------
static u8 isa_ctrl_read (struct controller *ctlr_ptr, u8 offset)
{
@@ -372,7 +372,7 @@ static void isa_ctrl_write (struct controller *ctlr_ptr, u8 offset, u8 data)
{
u16 start_address;
u16 port_address;
-
+
start_address = ctlr_ptr->u.isa_ctlr.io_start;
port_address = start_address + (u16) offset;
outb (data, port_address);
@@ -533,7 +533,7 @@ static u8 hpc_readcmdtoindex (u8 cmd, u8 index)
*
* Return 0 or error codes
*---------------------------------------------------------------------*/
-int ibmphp_hpc_readslot (struct slot * pslot, u8 cmd, u8 * pstatus)
+int ibmphp_hpc_readslot (struct slot *pslot, u8 cmd, u8 *pstatus)
{
void __iomem *wpg_bbar = NULL;
struct controller *ctlr_ptr;
@@ -656,11 +656,11 @@ int ibmphp_hpc_readslot (struct slot * pslot, u8 cmd, u8 * pstatus)
//--------------------------------------------------------------------
// cleanup
//--------------------------------------------------------------------
-
+
// remove physical to logical address mapping
if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4))
iounmap (wpg_bbar);
-
+
free_hpc_access ();
debug_polling ("%s - Exit rc[%d]\n", __func__, rc);
@@ -672,7 +672,7 @@ int ibmphp_hpc_readslot (struct slot * pslot, u8 cmd, u8 * pstatus)
*
* Action: issue a WRITE command to HPC
*---------------------------------------------------------------------*/
-int ibmphp_hpc_writeslot (struct slot * pslot, u8 cmd)
+int ibmphp_hpc_writeslot (struct slot *pslot, u8 cmd)
{
void __iomem *wpg_bbar = NULL;
struct controller *ctlr_ptr;
@@ -835,7 +835,7 @@ static int poll_hpc(void *data)
down (&semOperations);
switch (poll_state) {
- case POLL_LATCH_REGISTER:
+ case POLL_LATCH_REGISTER:
oldlatchlow = curlatchlow;
ctrl_count = 0x00;
list_for_each (pslotlist, &ibmphp_slot_head) {
@@ -892,16 +892,16 @@ static int poll_hpc(void *data)
if (kthread_should_stop())
goto out_sleep;
-
+
down (&semOperations);
-
+
if (poll_count >= POLL_LATCH_CNT) {
poll_count = 0;
poll_state = POLL_SLOTS;
} else
poll_state = POLL_LATCH_REGISTER;
break;
- }
+ }
/* give up the hardware semaphore */
up (&semOperations);
/* sleep for a short time just for good measure */
@@ -958,7 +958,7 @@ static int process_changeinstatus (struct slot *pslot, struct slot *poldslot)
// bit 5 - HPC_SLOT_PWRGD
if ((pslot->status & 0x20) != (poldslot->status & 0x20))
// OFF -> ON: ignore, ON -> OFF: disable slot
- if ((poldslot->status & 0x20) && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status)))
+ if ((poldslot->status & 0x20) && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status)))
disable = 1;
// bit 6 - HPC_SLOT_BUS_SPEED
@@ -980,7 +980,7 @@ static int process_changeinstatus (struct slot *pslot, struct slot *poldslot)
pslot->status &= ~HPC_SLOT_POWER;
}
}
- // CLOSE -> OPEN
+ // CLOSE -> OPEN
else if ((SLOT_PWRGD (poldslot->status) == HPC_SLOT_PWRGD_GOOD)
&& (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status))) {
disable = 1;
@@ -1075,7 +1075,7 @@ void __exit ibmphp_hpc_stop_poll_thread (void)
debug ("before locking operations \n");
ibmphp_lock_operations ();
debug ("after locking operations \n");
-
+
// wait for poll thread to exit
debug ("before sem_exit down \n");
down (&sem_exit);
@@ -1102,7 +1102,7 @@ void __exit ibmphp_hpc_stop_poll_thread (void)
* Value:
*---------------------------------------------------------------------*/
static int hpc_wait_ctlr_notworking (int timeout, struct controller *ctlr_ptr, void __iomem *wpg_bbar,
- u8 * pstatus)
+ u8 *pstatus)
{
int rc = 0;
u8 done = 0;
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index 7b09e16173a..2fd296706ce 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -1,8 +1,8 @@
/*
* IBM Hot Plug Controller Driver
- *
+ *
* Written By: Irene Zubarev, IBM Corporation
- *
+ *
* Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2001,2002 IBM Corp.
*
@@ -42,12 +42,12 @@ static u8 find_sec_number (u8 primary_busno, u8 slotno);
/*
* NOTE..... If BIOS doesn't provide default routing, we assign:
- * 9 for SCSI, 10 for LAN adapters, and 11 for everything else.
+ * 9 for SCSI, 10 for LAN adapters, and 11 for everything else.
* If adapter is bridged, then we assign 11 to it and devices behind it.
* We also assign the same irq numbers for multi function devices.
* These are PIC mode, so shouldn't matter n.e.ways (hopefully)
*/
-static void assign_alt_irq (struct pci_func * cur_func, u8 class_code)
+static void assign_alt_irq (struct pci_func *cur_func, u8 class_code)
{
int j;
for (j = 0; j < 4; j++) {
@@ -71,11 +71,11 @@ static void assign_alt_irq (struct pci_func * cur_func, u8 class_code)
* Configures the device to be added (will allocate needed resources if it
* can), the device can be a bridge or a regular pci device, can also be
* multi-functional
- *
+ *
* Input: function to be added
- *
+ *
* TO DO: The error case with Multifunction device or multi function bridge,
- * if there is an error, will need to go through all previous functions and
+ * if there is an error, will need to go through all previous functions and
* unconfigure....or can add some code into unconfigure_card....
*/
int ibmphp_configure_card (struct pci_func *func, u8 slotno)
@@ -98,7 +98,7 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
cur_func = func;
/* We only get bus and device from IRQ routing table. So at this point,
- * func->busno is correct, and func->device contains only device (at the 5
+ * func->busno is correct, and func->device contains only device (at the 5
* highest bits)
*/
@@ -109,7 +109,7 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
cur_func->function = function;
- debug ("inside the loop, cur_func->busno = %x, cur_func->device = %x, cur_func->funcion = %x\n",
+ debug ("inside the loop, cur_func->busno = %x, cur_func->device = %x, cur_func->function = %x\n",
cur_func->busno, cur_func->device, cur_func->function);
pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_VENDOR_ID, &vendor_id);
@@ -137,8 +137,8 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
"Please choose another device.\n", cur_func->device);
return -ENODEV;
} else if (class == PCI_CLASS_DISPLAY_VGA) {
- err ("The device %x is not supported for hot plugging. "
- "Please choose another device.\n", cur_func->device);
+ err ("The device %x is not supported for hot plugging. Please choose another device.\n",
+ cur_func->device);
return -ENODEV;
}
switch (hdr_type) {
@@ -151,7 +151,7 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
cur_func->device, cur_func->busno);
cleanup_count = 6;
goto error;
- }
+ }
cur_func->next = NULL;
function = 0x8;
break;
@@ -179,8 +179,8 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
case PCI_HEADER_TYPE_MULTIBRIDGE:
class >>= 8;
if (class != PCI_CLASS_BRIDGE_PCI) {
- err ("This %x is not PCI-to-PCI bridge, and as is not supported for hot-plugging. "
- "Please insert another card.\n", cur_func->device);
+ err ("This %x is not PCI-to-PCI bridge, and as is not supported for hot-plugging. Please insert another card.\n",
+ cur_func->device);
return -ENODEV;
}
assign_alt_irq (cur_func, class_code);
@@ -247,8 +247,8 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
class >>= 8;
debug ("class now is %x\n", class);
if (class != PCI_CLASS_BRIDGE_PCI) {
- err ("This %x is not PCI-to-PCI bridge, and as is not supported for hot-plugging. "
- "Please insert another card.\n", cur_func->device);
+ err ("This %x is not PCI-to-PCI bridge, and as is not supported for hot-plugging. Please insert another card.\n",
+ cur_func->device);
return -ENODEV;
}
@@ -339,7 +339,7 @@ error:
}
/*
- * This function configures the pci BARs of a single device.
+ * This function configures the pci BARs of a single device.
* Input: pointer to the pci_func
* Output: configured PCI, 0, or error
*/
@@ -371,17 +371,17 @@ static int configure_device (struct pci_func *func)
for (count = 0; address[count]; count++) { /* for 6 BARs */
- /* not sure if i need this. per scott, said maybe need smth like this
+ /* not sure if i need this. per scott, said maybe need * something like this
if devices don't adhere 100% to the spec, so don't want to write
to the reserved bits
- pcibios_read_config_byte(cur_func->busno, cur_func->device,
+ pcibios_read_config_byte(cur_func->busno, cur_func->device,
PCI_BASE_ADDRESS_0 + 4 * count, &tmp);
if (tmp & 0x01) // IO
- pcibios_write_config_dword(cur_func->busno, cur_func->device,
+ pcibios_write_config_dword(cur_func->busno, cur_func->device,
PCI_BASE_ADDRESS_0 + 4 * count, 0xFFFFFFFD);
else // Memory
- pcibios_write_config_dword(cur_func->busno, cur_func->device,
+ pcibios_write_config_dword(cur_func->busno, cur_func->device,
PCI_BASE_ADDRESS_0 + 4 * count, 0xFFFFFFFF);
*/
pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFF);
@@ -421,8 +421,8 @@ static int configure_device (struct pci_func *func)
return -EIO;
}
pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->io[count]->start);
-
- /* _______________This is for debugging purposes only_____________________ */
+
+ /* _______________This is for debugging purposes only_____________________ */
debug ("b4 writing, the IO address is %x\n", func->io[count]->start);
pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]);
debug ("after writing.... the start address is %x\n", bar[count]);
@@ -484,7 +484,7 @@ static int configure_device (struct pci_func *func)
pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->pfmem[count]->start);
- /*_______________This is for debugging purposes only______________________________*/
+ /*_______________This is for debugging purposes only______________________________*/
debug ("b4 writing, start address is %x\n", func->pfmem[count]->start);
pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]);
debug ("after writing, start address is %x\n", bar[count]);
@@ -559,7 +559,7 @@ static int configure_device (struct pci_func *func)
/******************************************************************************
* This routine configures a PCI-2-PCI bridge and the functions behind it
* Parameters: pci_func
- * Returns:
+ * Returns:
******************************************************************************/
static int configure_bridge (struct pci_func **func_passed, u8 slotno)
{
@@ -622,7 +622,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
debug ("AFTER FIND_SEC_NUMBER, func->busno IS %x\n", func->busno);
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, sec_number);
-
+
/* __________________For debugging purposes only __________________________________
pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_number);
debug ("sec_number after write/read is %x\n", sec_number);
@@ -644,7 +644,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
/* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !!!!!!!!!!!!!!!NEED TO ADD!!! FAST BACK-TO-BACK ENABLE!!!!!!!!!!!!!!!!!!!!
+ !!!!!!!!!!!!!!!NEED TO ADD!!! FAST BACK-TO-BACK ENABLE!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
@@ -670,7 +670,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
debug ("len[count] in IO = %x\n", len[count]);
bus_io[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
-
+
if (!bus_io[count]) {
err ("out of system memory\n");
retval = -ENOMEM;
@@ -735,7 +735,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
ibmphp_add_pfmem_from_mem (bus_pfmem[count]);
func->pfmem[count] = bus_pfmem[count];
} else {
- err ("cannot allocate requested pfmem for bus %x, device %x, len %x\n",
+ err ("cannot allocate requested pfmem for bus %x, device %x, len %x\n",
func->busno, func->device, len[count]);
kfree (mem_tmp);
kfree (bus_pfmem[count]);
@@ -805,7 +805,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
debug ("amount_needed->mem = %x\n", amount_needed->mem);
debug ("amount_needed->pfmem = %x\n", amount_needed->pfmem);
- if (amount_needed->not_correct) {
+ if (amount_needed->not_correct) {
debug ("amount_needed is not correct\n");
for (count = 0; address[count]; count++) {
/* for 2 BARs */
@@ -830,7 +830,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
} else {
debug ("it wants %x IO behind the bridge\n", amount_needed->io);
io = kzalloc(sizeof(*io), GFP_KERNEL);
-
+
if (!io) {
err ("out of system memory\n");
retval = -ENOMEM;
@@ -959,7 +959,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
if (bus->noIORanges) {
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, 0x00 | bus->rangeIO->start >> 8);
- pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_LIMIT, 0x00 | bus->rangeIO->end >> 8);
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_LIMIT, 0x00 | bus->rangeIO->end >> 8);
/* _______________This is for debugging purposes only ____________________
pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, &temp);
@@ -980,7 +980,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
if (bus->noMemRanges) {
pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, 0x0000 | bus->rangeMem->start >> 16);
pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, 0x0000 | bus->rangeMem->end >> 16);
-
+
/* ____________________This is for debugging purposes only ________________________
pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &temp);
debug ("mem_base = %x\n", (temp & PCI_MEMORY_RANGE_TYPE_MASK) << 16);
@@ -1017,7 +1017,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_INTERRUPT_PIN, &irq);
if ((irq > 0x00) && (irq < 0x05))
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_INTERRUPT_LINE, func->irq[irq - 1]);
- /*
+ /*
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, ctrl);
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_PARITY);
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_SERR);
@@ -1071,9 +1071,9 @@ error:
* This function adds up the amount of resources needed behind the PPB bridge
* and passes it to the configure_bridge function
* Input: bridge function
- * Ouput: amount of resources needed
+ * Output: amount of resources needed
*****************************************************************************/
-static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno)
+static struct res_needed *scan_behind_bridge (struct pci_func *func, u8 busno)
{
int count, len[6];
u16 vendor_id;
@@ -1125,13 +1125,11 @@ static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno)
class >>= 8; /* to take revision out, class = class.subclass.prog i/f */
if (class == PCI_CLASS_NOT_DEFINED_VGA) {
- err ("The device %x is VGA compatible and as is not supported for hot plugging. "
- "Please choose another device.\n", device);
+ err ("The device %x is VGA compatible and as is not supported for hot plugging. Please choose another device.\n", device);
amount->not_correct = 1;
return amount;
} else if (class == PCI_CLASS_DISPLAY_VGA) {
- err ("The device %x is not supported for hot plugging. "
- "Please choose another device.\n", device);
+ err ("The device %x is not supported for hot plugging. Please choose another device.\n", device);
amount->not_correct = 1;
return amount;
}
@@ -1204,9 +1202,9 @@ static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno)
return amount;
}
-/* The following 3 unconfigure_boot_ routines deal with the case when we had the card
- * upon bootup in the system, since we don't allocate func to such case, we need to read
- * the start addresses from pci config space and then find the corresponding entries in
+/* The following 3 unconfigure_boot_ routines deal with the case when we had the card
+ * upon bootup in the system, since we don't allocate func to such case, we need to read
+ * the start addresses from pci config space and then find the corresponding entries in
* our resource lists. The functions return either 0, -ENODEV, or -1 (general failure)
* Change: we also call these functions even if we configured the card ourselves (i.e., not
* the bootup case), since it should work same way
@@ -1483,12 +1481,10 @@ static int unconfigure_boot_card (struct slot *slot_cur)
debug ("hdr_type %x, class %x\n", hdr_type, class);
class >>= 8; /* to take revision out, class = class.subclass.prog i/f */
if (class == PCI_CLASS_NOT_DEFINED_VGA) {
- err ("The device %x function %x is VGA compatible and is not supported for hot removing. "
- "Please choose another device.\n", device, function);
+ err ("The device %x function %x is VGA compatible and is not supported for hot removing. Please choose another device.\n", device, function);
return -ENODEV;
} else if (class == PCI_CLASS_DISPLAY_VGA) {
- err ("The device %x function %x is not supported for hot removing. "
- "Please choose another device.\n", device, function);
+ err ("The device %x function %x is not supported for hot removing. Please choose another device.\n", device, function);
return -ENODEV;
}
@@ -1513,9 +1509,7 @@ static int unconfigure_boot_card (struct slot *slot_cur)
case PCI_HEADER_TYPE_BRIDGE:
class >>= 8;
if (class != PCI_CLASS_BRIDGE_PCI) {
- err ("This device %x function %x is not PCI-to-PCI bridge, "
- "and is not supported for hot-removing. "
- "Please try another card.\n", device, function);
+ err ("This device %x function %x is not PCI-to-PCI bridge, and is not supported for hot-removing. Please try another card.\n", device, function);
return -ENODEV;
}
rc = unconfigure_boot_bridge (busno, device, function);
@@ -1529,9 +1523,7 @@ static int unconfigure_boot_card (struct slot *slot_cur)
case PCI_HEADER_TYPE_MULTIBRIDGE:
class >>= 8;
if (class != PCI_CLASS_BRIDGE_PCI) {
- err ("This device %x function %x is not PCI-to-PCI bridge, "
- "and is not supported for hot-removing. "
- "Please try another card.\n", device, function);
+ err ("This device %x function %x is not PCI-to-PCI bridge, and is not supported for hot-removing. Please try another card.\n", device, function);
return -ENODEV;
}
rc = unconfigure_boot_bridge (busno, device, function);
@@ -1561,8 +1553,8 @@ static int unconfigure_boot_card (struct slot *slot_cur)
* unconfiguring the device
* TO DO: will probably need to add some code in case there was some resource,
* to remove it... this is from when we have errors in the configure_card...
- * !!!!!!!!!!!!!!!!!!!!!!!!!FOR BUSES!!!!!!!!!!!!
- * Returns: 0, -1, -ENODEV
+ * !!!!!!!!!!!!!!!!!!!!!!!!!FOR BUSES!!!!!!!!!!!!
+ * Returns: 0, -1, -ENODEV
*/
int ibmphp_unconfigure_card (struct slot **slot_cur, int the_end)
{
@@ -1634,7 +1626,7 @@ int ibmphp_unconfigure_card (struct slot **slot_cur, int the_end)
* Input: bus and the amount of resources needed (we know we can assign those,
* since they've been checked already
* Output: bus added to the correct spot
- * 0, -1, error
+ * 0, -1, error
*/
static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct resource_node *mem, struct resource_node *pfmem, u8 parent_busno)
{
@@ -1650,7 +1642,7 @@ static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct r
err ("strange, cannot find bus which is supposed to be at the system... something is terribly wrong...\n");
return -ENODEV;
}
-
+
list_add (&bus->bus_list, &cur_bus->bus_list);
}
if (io) {
@@ -1679,7 +1671,7 @@ static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct r
}
if (pfmem) {
pfmem_range = kzalloc(sizeof(*pfmem_range), GFP_KERNEL);
- if (!pfmem_range) {
+ if (!pfmem_range) {
err ("out of system memory\n");
return -ENOMEM;
}
@@ -1726,4 +1718,3 @@ static u8 find_sec_number (u8 primary_busno, u8 slotno)
return busno;
return 0xff;
}
-
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index e2dc289f767..f34745abd5b 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -46,9 +46,9 @@ static struct bus_node *find_bus_wprev (u8, struct bus_node **, u8);
static LIST_HEAD(gbuses);
-static struct bus_node * __init alloc_error_bus (struct ebda_pci_rsrc * curr, u8 busno, int flag)
+static struct bus_node * __init alloc_error_bus (struct ebda_pci_rsrc *curr, u8 busno, int flag)
{
- struct bus_node * newbus;
+ struct bus_node *newbus;
if (!(curr) && !(flag)) {
err ("NULL pointer passed\n");
@@ -69,10 +69,10 @@ static struct bus_node * __init alloc_error_bus (struct ebda_pci_rsrc * curr, u8
return newbus;
}
-static struct resource_node * __init alloc_resources (struct ebda_pci_rsrc * curr)
+static struct resource_node * __init alloc_resources (struct ebda_pci_rsrc *curr)
{
struct resource_node *rs;
-
+
if (!curr) {
err ("NULL passed to allocate\n");
return NULL;
@@ -93,7 +93,7 @@ static struct resource_node * __init alloc_resources (struct ebda_pci_rsrc * cur
static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node **new_range, struct ebda_pci_rsrc *curr, int flag, u8 first_bus)
{
- struct bus_node * newbus;
+ struct bus_node *newbus;
struct range_node *newrange;
u8 num_ranges = 0;
@@ -128,7 +128,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node
}
newrange->start = curr->start_addr;
newrange->end = curr->end_addr;
-
+
if (first_bus || (!num_ranges))
newrange->rangeno = 1;
else {
@@ -162,7 +162,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node
newbus->rangePFMem = newrange;
if (first_bus)
newbus->noPFMemRanges = 1;
- else {
+ else {
debug ("1st PFMemory Primary on Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
++newbus->noPFMemRanges;
fix_resources (newbus);
@@ -190,7 +190,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node
* This is the Resource Management initialization function. It will go through
* the Resource list taken from EBDA and fill in this module's data structures
*
- * THIS IS NOT TAKING INTO CONSIDERATION IO RESTRICTIONS OF PRIMARY BUSES,
+ * THIS IS NOT TAKING INTO CONSIDERATION IO RESTRICTIONS OF PRIMARY BUSES,
* SINCE WE'RE GOING TO ASSUME FOR NOW WE DON'T HAVE THOSE ON OUR BUSES FOR NOW
*
* Input: ptr to the head of the resource list from EBDA
@@ -382,7 +382,7 @@ int __init ibmphp_rsrc_init (void)
* pci devices' resources for the appropriate resource
*
* Input: type of the resource, range to add, current bus
- * Output: 0 or -1, bus and range ptrs
+ * Output: 0 or -1, bus and range ptrs
********************************************************************************/
static int add_bus_range (int type, struct range_node *range, struct bus_node *bus_cur)
{
@@ -466,7 +466,7 @@ static void update_resources (struct bus_node *bus_cur, int type, int rangeno)
switch (type) {
case MEM:
- if (bus_cur->firstMem)
+ if (bus_cur->firstMem)
res = bus_cur->firstMem;
break;
case PFMEM:
@@ -583,7 +583,7 @@ static void fix_resources (struct bus_node *bus_cur)
}
/*******************************************************************************
- * This routine adds a resource to the list of resources to the appropriate bus
+ * This routine adds a resource to the list of resources to the appropriate bus
* based on their resource type and sorted by their starting addresses. It assigns
* the ptrs to next and nextRange if needed.
*
@@ -605,11 +605,11 @@ int ibmphp_add_resource (struct resource_node *res)
err ("NULL passed to add\n");
return -ENODEV;
}
-
+
bus_cur = find_bus_wprev (res->busno, NULL, 0);
-
+
if (!bus_cur) {
- /* didn't find a bus, smth's wrong!!! */
+ /* didn't find a bus, something's wrong!!! */
debug ("no bus in the system, either pci_dev's wrong or allocation failed\n");
return -ENODEV;
}
@@ -648,7 +648,7 @@ int ibmphp_add_resource (struct resource_node *res)
if (!range_cur) {
switch (res->type) {
case IO:
- ++bus_cur->needIOUpdate;
+ ++bus_cur->needIOUpdate;
break;
case MEM:
++bus_cur->needMemUpdate;
@@ -659,13 +659,13 @@ int ibmphp_add_resource (struct resource_node *res)
}
res->rangeno = -1;
}
-
+
debug ("The range is %d\n", res->rangeno);
if (!res_start) {
/* no first{IO,Mem,Pfmem} on the bus, 1st IO/Mem/Pfmem resource ever */
switch (res->type) {
case IO:
- bus_cur->firstIO = res;
+ bus_cur->firstIO = res;
break;
case MEM:
bus_cur->firstMem = res;
@@ -673,7 +673,7 @@ int ibmphp_add_resource (struct resource_node *res)
case PFMEM:
bus_cur->firstPFMem = res;
break;
- }
+ }
res->next = NULL;
res->nextRange = NULL;
} else {
@@ -770,7 +770,7 @@ int ibmphp_add_resource (struct resource_node *res)
* This routine will remove the resource from the list of resources
*
* Input: io, mem, and/or pfmem resource to be deleted
- * Ouput: modified resource list
+ * Output: modified resource list
* 0 or error code
****************************************************************************/
int ibmphp_remove_resource (struct resource_node *res)
@@ -789,8 +789,7 @@ int ibmphp_remove_resource (struct resource_node *res)
bus_cur = find_bus_wprev (res->busno, NULL, 0);
if (!bus_cur) {
- err ("cannot find corresponding bus of the io resource to remove "
- "bailing out...\n");
+ err ("cannot find corresponding bus of the io resource to remove bailing out...\n");
return -ENODEV;
}
@@ -825,7 +824,7 @@ int ibmphp_remove_resource (struct resource_node *res)
if (!res_cur) {
if (res->type == PFMEM) {
- /*
+ /*
* case where pfmem might be in the PFMemFromMem list
* so will also need to remove the corresponding mem
* entry
@@ -934,9 +933,9 @@ int ibmphp_remove_resource (struct resource_node *res)
return 0;
}
-static struct range_node * find_range (struct bus_node *bus_cur, struct resource_node * res)
+static struct range_node *find_range (struct bus_node *bus_cur, struct resource_node *res)
{
- struct range_node * range = NULL;
+ struct range_node *range = NULL;
switch (res->type) {
case IO:
@@ -961,12 +960,12 @@ static struct range_node * find_range (struct bus_node *bus_cur, struct resource
}
/*****************************************************************************
- * This routine will check to make sure the io/mem/pfmem->len that the device asked for
+ * This routine will check to make sure the io/mem/pfmem->len that the device asked for
* can fit w/i our list of available IO/MEM/PFMEM resources. If cannot, returns -EINVAL,
* otherwise, returns 0
*
* Input: resource
- * Ouput: the correct start and end address are inputted into the resource node,
+ * Output: the correct start and end address are inputted into the resource node,
* 0 or -EINVAL
*****************************************************************************/
int ibmphp_check_resource (struct resource_node *res, u8 bridge)
@@ -996,7 +995,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
bus_cur = find_bus_wprev (res->busno, NULL, 0);
if (!bus_cur) {
- /* didn't find a bus, smth's wrong!!! */
+ /* didn't find a bus, something's wrong!!! */
debug ("no bus in the system, either pci_dev's wrong or allocation failed\n");
return -EINVAL;
}
@@ -1066,7 +1065,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
break;
}
}
-
+
if (flag && len_cur == res->len) {
debug ("but we are not here, right?\n");
res->start = start_cur;
@@ -1118,10 +1117,10 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
if (res_prev) {
if (res_prev->rangeno != res_cur->rangeno) {
/* 1st device on this range */
- if ((res_cur->start != range->start) &&
+ if ((res_cur->start != range->start) &&
((len_tmp = res_cur->start - 1 - range->start) >= res->len)) {
if ((len_tmp < len_cur) || (len_cur == 0)) {
- if ((range->start % tmp_divide) == 0) {
+ if ((range->start % tmp_divide) == 0) {
/* just perfect, starting address is divisible by length */
flag = 1;
len_cur = len_tmp;
@@ -1344,7 +1343,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
* This routine is called from remove_card if the card contained PPB.
* It will remove all the resources on the bus as well as the bus itself
* Input: Bus
- * Ouput: 0, -ENODEV
+ * Output: 0, -ENODEV
********************************************************************************/
int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno)
{
@@ -1353,7 +1352,7 @@ int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno)
struct bus_node *prev_bus;
int rc;
- prev_bus = find_bus_wprev (parent_busno, NULL, 0);
+ prev_bus = find_bus_wprev (parent_busno, NULL, 0);
if (!prev_bus) {
debug ("something terribly wrong. Cannot find parent bus to the one to remove\n");
@@ -1424,7 +1423,7 @@ int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno)
}
/******************************************************************************
- * This routine deletes the ranges from a given bus, and the entries from the
+ * This routine deletes the ranges from a given bus, and the entries from the
* parent's bus in the resources
* Input: current bus, previous bus
* Output: 0, -EINVAL
@@ -1453,7 +1452,7 @@ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev)
if (bus_cur->noMemRanges) {
range_cur = bus_cur->rangeMem;
for (i = 0; i < bus_cur->noMemRanges; i++) {
- if (ibmphp_find_resource (bus_prev, range_cur->start, &res, MEM) < 0)
+ if (ibmphp_find_resource (bus_prev, range_cur->start, &res, MEM) < 0)
return -EINVAL;
ibmphp_remove_resource (res);
@@ -1467,7 +1466,7 @@ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev)
if (bus_cur->noPFMemRanges) {
range_cur = bus_cur->rangePFMem;
for (i = 0; i < bus_cur->noPFMemRanges; i++) {
- if (ibmphp_find_resource (bus_prev, range_cur->start, &res, PFMEM) < 0)
+ if (ibmphp_find_resource (bus_prev, range_cur->start, &res, PFMEM) < 0)
return -EINVAL;
ibmphp_remove_resource (res);
@@ -1482,7 +1481,7 @@ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev)
}
/*
- * find the resource node in the bus
+ * find the resource node in the bus
* Input: Resource needed, start address of the resource, type of resource
*/
int ibmphp_find_resource (struct bus_node *bus, u32 start_address, struct resource_node **res, int flag)
@@ -1512,7 +1511,7 @@ int ibmphp_find_resource (struct bus_node *bus, u32 start_address, struct resour
err ("wrong type of flag\n");
return -EINVAL;
}
-
+
while (res_cur) {
if (res_cur->start == start_address) {
*res = res_cur;
@@ -1718,7 +1717,7 @@ static int __init once_over (void)
} /* end for pfmem */
} /* end if */
} /* end list_for_each bus */
- return 0;
+ return 0;
}
int ibmphp_add_pfmem_from_mem (struct resource_node *pfmem)
@@ -1760,9 +1759,9 @@ static struct bus_node *find_bus_wprev (u8 bus_number, struct bus_node **prev, u
list_for_each (tmp, &gbuses) {
tmp_prev = tmp->prev;
bus_cur = list_entry (tmp, struct bus_node, bus_list);
- if (flag)
+ if (flag)
*prev = list_entry (tmp_prev, struct bus_node, bus_list);
- if (bus_cur->busno == bus_number)
+ if (bus_cur->busno == bus_number)
return bus_cur;
}
@@ -1776,7 +1775,7 @@ void ibmphp_print_test (void)
struct range_node *range;
struct resource_node *res;
struct list_head *tmp;
-
+
debug_pci ("*****************START**********************\n");
if ((!list_empty(&gbuses)) && flags) {
@@ -1906,7 +1905,7 @@ static int range_exists_already (struct range_node * range, struct bus_node * bu
return 1;
range_cur = range_cur->next;
}
-
+
return 0;
}
@@ -1920,7 +1919,7 @@ static int range_exists_already (struct range_node * range, struct bus_node * bu
* Returns: none
* Note: this function doesn't take into account IO restrictions etc,
* so will only work for bridges with no video/ISA devices behind them It
- * also will not work for onboard PPB's that can have more than 1 *bus
+ * also will not work for onboard PPBs that can have more than 1 *bus
* behind them All these are TO DO.
* Also need to add more error checkings... (from fnc returns etc)
*/
@@ -1963,7 +1962,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
case PCI_HEADER_TYPE_BRIDGE:
function = 0x8;
case PCI_HEADER_TYPE_MULTIBRIDGE:
- /* We assume here that only 1 bus behind the bridge
+ /* We assume here that only 1 bus behind the bridge
TO DO: add functionality for several:
temp = secondary;
while (temp < subordinate) {
@@ -1972,7 +1971,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
}
*/
pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_busno);
- bus_sec = find_bus_wprev (sec_busno, NULL, 0);
+ bus_sec = find_bus_wprev (sec_busno, NULL, 0);
/* this bus structure doesn't exist yet, PPB was configured during previous loading of ibmphp */
if (!bus_sec) {
bus_sec = alloc_error_bus (NULL, sec_busno, 1);
@@ -2028,7 +2027,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
io->len = io->end - io->start + 1;
ibmphp_add_resource (io);
}
- }
+ }
pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &start_mem_address);
pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, &end_mem_address);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 6d2eea93298..56d8486dc16 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -41,6 +41,7 @@
#include <linux/pci_hotplug.h>
#include <asm/uaccess.h>
#include "../pci.h"
+#include "cpci_hotplug.h"
#define MY_NAME "pci_hotplug"
@@ -51,29 +52,19 @@
/* local variables */
-static int debug;
+static bool debug;
#define DRIVER_VERSION "0.5"
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Scott Murray <scottm@somanetworks.com>"
#define DRIVER_DESC "PCI Hot Plug PCI Core"
-//////////////////////////////////////////////////////////////////
-
static LIST_HEAD(pci_hotplug_slot_list);
static DEFINE_MUTEX(pci_hp_mutex);
-#ifdef CONFIG_HOTPLUG_PCI_CPCI
-extern int cpci_hotplug_init(int debug);
-extern void cpci_hotplug_exit(void);
-#else
-static inline int cpci_hotplug_init(int debug) { return 0; }
-static inline void cpci_hotplug_exit(void) { }
-#endif
-
/* Weee, fun with macros... */
-#define GET_STATUS(name,type) \
-static int get_##name (struct hotplug_slot *slot, type *value) \
+#define GET_STATUS(name, type) \
+static int get_##name(struct hotplug_slot *slot, type *value) \
{ \
struct hotplug_slot_ops *ops = slot->ops; \
int retval = 0; \
@@ -99,46 +90,45 @@ static ssize_t power_read_file(struct pci_slot *slot, char *buf)
retval = get_power_status(slot->hotplug, &value);
if (retval)
- goto exit;
- retval = sprintf (buf, "%d\n", value);
-exit:
- return retval;
+ return retval;
+
+ return sprintf(buf, "%d\n", value);
}
static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
- size_t count)
+ size_t count)
{
struct hotplug_slot *slot = pci_slot->hotplug;
unsigned long lpower;
u8 power;
int retval = 0;
- lpower = simple_strtoul (buf, NULL, 10);
+ lpower = simple_strtoul(buf, NULL, 10);
power = (u8)(lpower & 0xff);
- dbg ("power = %d\n", power);
+ dbg("power = %d\n", power);
if (!try_module_get(slot->ops->owner)) {
retval = -ENODEV;
goto exit;
}
switch (power) {
- case 0:
- if (slot->ops->disable_slot)
- retval = slot->ops->disable_slot(slot);
- break;
-
- case 1:
- if (slot->ops->enable_slot)
- retval = slot->ops->enable_slot(slot);
- break;
-
- default:
- err ("Illegal value specified for power\n");
- retval = -EINVAL;
+ case 0:
+ if (slot->ops->disable_slot)
+ retval = slot->ops->disable_slot(slot);
+ break;
+
+ case 1:
+ if (slot->ops->enable_slot)
+ retval = slot->ops->enable_slot(slot);
+ break;
+
+ default:
+ err("Illegal value specified for power\n");
+ retval = -EINVAL;
}
module_put(slot->ops->owner);
-exit:
+exit:
if (retval)
return retval;
return count;
@@ -157,24 +147,22 @@ static ssize_t attention_read_file(struct pci_slot *slot, char *buf)
retval = get_attention_status(slot->hotplug, &value);
if (retval)
- goto exit;
- retval = sprintf(buf, "%d\n", value);
+ return retval;
-exit:
- return retval;
+ return sprintf(buf, "%d\n", value);
}
static ssize_t attention_write_file(struct pci_slot *slot, const char *buf,
- size_t count)
+ size_t count)
{
struct hotplug_slot_ops *ops = slot->hotplug->ops;
unsigned long lattention;
u8 attention;
int retval = 0;
- lattention = simple_strtoul (buf, NULL, 10);
+ lattention = simple_strtoul(buf, NULL, 10);
attention = (u8)(lattention & 0xff);
- dbg (" - attention = %d\n", attention);
+ dbg(" - attention = %d\n", attention);
if (!try_module_get(ops->owner)) {
retval = -ENODEV;
@@ -184,7 +172,7 @@ static ssize_t attention_write_file(struct pci_slot *slot, const char *buf,
retval = ops->set_attention_status(slot->hotplug, attention);
module_put(ops->owner);
-exit:
+exit:
if (retval)
return retval;
return count;
@@ -203,11 +191,9 @@ static ssize_t latch_read_file(struct pci_slot *slot, char *buf)
retval = get_latch_status(slot->hotplug, &value);
if (retval)
- goto exit;
- retval = sprintf (buf, "%d\n", value);
+ return retval;
-exit:
- return retval;
+ return sprintf(buf, "%d\n", value);
}
static struct pci_slot_attribute hotplug_slot_attr_latch = {
@@ -222,11 +208,9 @@ static ssize_t presence_read_file(struct pci_slot *slot, char *buf)
retval = get_adapter_status(slot->hotplug, &value);
if (retval)
- goto exit;
- retval = sprintf (buf, "%d\n", value);
+ return retval;
-exit:
- return retval;
+ return sprintf(buf, "%d\n", value);
}
static struct pci_slot_attribute hotplug_slot_attr_presence = {
@@ -235,7 +219,7 @@ static struct pci_slot_attribute hotplug_slot_attr_presence = {
};
static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
- size_t count)
+ size_t count)
{
struct hotplug_slot *slot = pci_slot->hotplug;
unsigned long ltest;
@@ -244,7 +228,7 @@ static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
ltest = simple_strtoul (buf, NULL, 10);
test = (u32)(ltest & 0xffffffff);
- dbg ("test = %d\n", test);
+ dbg("test = %d\n", test);
if (!try_module_get(slot->ops->owner)) {
retval = -ENODEV;
@@ -254,7 +238,7 @@ static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
retval = slot->ops->hardware_test(slot, test);
module_put(slot->ops->owner);
-exit:
+exit:
if (retval)
return retval;
return count;
@@ -268,6 +252,7 @@ static struct pci_slot_attribute hotplug_slot_attr_test = {
static bool has_power_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
+
if ((!slot) || (!slot->ops))
return false;
if ((slot->ops->enable_slot) ||
@@ -280,6 +265,7 @@ static bool has_power_file(struct pci_slot *pci_slot)
static bool has_attention_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
+
if ((!slot) || (!slot->ops))
return false;
if ((slot->ops->set_attention_status) ||
@@ -291,6 +277,7 @@ static bool has_attention_file(struct pci_slot *pci_slot)
static bool has_latch_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
+
if ((!slot) || (!slot->ops))
return false;
if (slot->ops->get_latch_status)
@@ -301,6 +288,7 @@ static bool has_latch_file(struct pci_slot *pci_slot)
static bool has_adapter_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
+
if ((!slot) || (!slot->ops))
return false;
if (slot->ops->get_adapter_status)
@@ -311,6 +299,7 @@ static bool has_adapter_file(struct pci_slot *pci_slot)
static bool has_test_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
+
if ((!slot) || (!slot->ops))
return false;
if (slot->ops->hardware_test)
@@ -404,13 +393,13 @@ static void fs_remove_slot(struct pci_slot *slot)
pci_hp_remove_module_link(slot);
}
-static struct hotplug_slot *get_slot_from_name (const char *name)
+static struct hotplug_slot *get_slot_from_name(const char *name)
{
struct hotplug_slot *slot;
struct list_head *tmp;
- list_for_each (tmp, &pci_hotplug_slot_list) {
- slot = list_entry (tmp, struct hotplug_slot, slot_list);
+ list_for_each(tmp, &pci_hotplug_slot_list) {
+ slot = list_entry(tmp, struct hotplug_slot, slot_list);
if (strcmp(hotplug_slot_name(slot), name) == 0)
return slot;
}
@@ -443,8 +432,7 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
if ((slot->info == NULL) || (slot->ops == NULL))
return -EINVAL;
if (slot->release == NULL) {
- dbg("Why are you trying to register a hotplug slot "
- "without a proper release function?\n");
+ dbg("Why are you trying to register a hotplug slot without a proper release function?\n");
return -EINVAL;
}
@@ -475,6 +463,7 @@ out:
mutex_unlock(&pci_hp_mutex);
return result;
}
+EXPORT_SYMBOL_GPL(__pci_hp_register);
/**
* pci_hp_deregister - deregister a hotplug_slot with the PCI hotplug subsystem
@@ -513,47 +502,45 @@ int pci_hp_deregister(struct hotplug_slot *hotplug)
return 0;
}
+EXPORT_SYMBOL_GPL(pci_hp_deregister);
/**
* pci_hp_change_slot_info - changes the slot's information structure in the core
* @hotplug: pointer to the slot whose info has changed
* @info: pointer to the info copy into the slot's info structure
*
- * @slot must have been registered with the pci
+ * @slot must have been registered with the pci
* hotplug subsystem previously with a call to pci_hp_register().
*
* Returns 0 if successful, anything else for an error.
*/
-int __must_check pci_hp_change_slot_info(struct hotplug_slot *hotplug,
- struct hotplug_slot_info *info)
+int pci_hp_change_slot_info(struct hotplug_slot *hotplug,
+ struct hotplug_slot_info *info)
{
- struct pci_slot *slot;
if (!hotplug || !info)
return -ENODEV;
- slot = hotplug->pci_slot;
memcpy(hotplug->info, info, sizeof(struct hotplug_slot_info));
return 0;
}
+EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
-static int __init pci_hotplug_init (void)
+static int __init pci_hotplug_init(void)
{
int result;
result = cpci_hotplug_init(debug);
if (result) {
- err ("cpci_hotplug_init with error %d\n", result);
- goto err_cpci;
+ err("cpci_hotplug_init with error %d\n", result);
+ return result;
}
- info (DRIVER_DESC " version: " DRIVER_VERSION "\n");
-
-err_cpci:
+ info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
return result;
}
-static void __exit pci_hotplug_exit (void)
+static void __exit pci_hotplug_exit(void)
{
cpci_hotplug_exit();
}
@@ -566,7 +553,3 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
-
-EXPORT_SYMBOL_GPL(__pci_hp_register);
-EXPORT_SYMBOL_GPL(pci_hp_deregister);
-EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 838f571027b..8e9012dca45 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -40,12 +40,9 @@
#define MY_NAME "pciehp"
-extern int pciehp_poll_mode;
+extern bool pciehp_poll_mode;
extern int pciehp_poll_time;
-extern int pciehp_debug;
-extern int pciehp_force;
-extern struct workqueue_struct *pciehp_wq;
-extern struct workqueue_struct *pciehp_ordered_wq;
+extern bool pciehp_debug;
#define dbg(format, arg...) \
do { \
@@ -79,6 +76,8 @@ struct slot {
struct hotplug_slot *hotplug_slot;
struct delayed_work work; /* work for button event */
struct mutex lock;
+ struct mutex hotplug_lock;
+ struct workqueue_struct *wq;
};
struct event_info {
@@ -111,6 +110,8 @@ struct controller {
#define INT_BUTTON_PRESS 7
#define INT_BUTTON_RELEASE 8
#define INT_BUTTON_CANCEL 9
+#define INT_LINK_UP 10
+#define INT_LINK_DOWN 11
#define STATIC_STATE 0
#define BLINKINGON_STATE 1
@@ -126,40 +127,39 @@ struct controller {
#define HP_SUPR_RM(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_HPS)
#define EMI(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_EIP)
#define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_NCCS)
-#define PSN(ctrl) ((ctrl)->slot_cap >> 19)
-
-extern int pciehp_sysfs_enable_slot(struct slot *slot);
-extern int pciehp_sysfs_disable_slot(struct slot *slot);
-extern u8 pciehp_handle_attention_button(struct slot *p_slot);
-extern u8 pciehp_handle_switch_change(struct slot *p_slot);
-extern u8 pciehp_handle_presence_change(struct slot *p_slot);
-extern u8 pciehp_handle_power_fault(struct slot *p_slot);
-extern int pciehp_configure_device(struct slot *p_slot);
-extern int pciehp_unconfigure_device(struct slot *p_slot);
-extern void pciehp_queue_pushbutton_work(struct work_struct *work);
+#define PSN(ctrl) (((ctrl)->slot_cap & PCI_EXP_SLTCAP_PSN) >> 19)
+
+int pciehp_sysfs_enable_slot(struct slot *slot);
+int pciehp_sysfs_disable_slot(struct slot *slot);
+u8 pciehp_handle_attention_button(struct slot *p_slot);
+u8 pciehp_handle_switch_change(struct slot *p_slot);
+u8 pciehp_handle_presence_change(struct slot *p_slot);
+u8 pciehp_handle_power_fault(struct slot *p_slot);
+void pciehp_handle_linkstate_change(struct slot *p_slot);
+int pciehp_configure_device(struct slot *p_slot);
+int pciehp_unconfigure_device(struct slot *p_slot);
+void pciehp_queue_pushbutton_work(struct work_struct *work);
struct controller *pcie_init(struct pcie_device *dev);
int pcie_init_notification(struct controller *ctrl);
int pciehp_enable_slot(struct slot *p_slot);
int pciehp_disable_slot(struct slot *p_slot);
-int pcie_enable_notification(struct controller *ctrl);
+void pcie_enable_notification(struct controller *ctrl);
int pciehp_power_on_slot(struct slot *slot);
-int pciehp_power_off_slot(struct slot *slot);
-int pciehp_get_power_status(struct slot *slot, u8 *status);
-int pciehp_get_attention_status(struct slot *slot, u8 *status);
-
-int pciehp_set_attention_status(struct slot *slot, u8 status);
-int pciehp_get_latch_status(struct slot *slot, u8 *status);
-int pciehp_get_adapter_status(struct slot *slot, u8 *status);
-int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *speed);
-int pciehp_get_max_link_width(struct slot *slot, enum pcie_link_width *val);
-int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *speed);
-int pciehp_get_cur_link_width(struct slot *slot, enum pcie_link_width *val);
+void pciehp_power_off_slot(struct slot *slot);
+void pciehp_get_power_status(struct slot *slot, u8 *status);
+void pciehp_get_attention_status(struct slot *slot, u8 *status);
+
+void pciehp_set_attention_status(struct slot *slot, u8 status);
+void pciehp_get_latch_status(struct slot *slot, u8 *status);
+void pciehp_get_adapter_status(struct slot *slot, u8 *status);
int pciehp_query_power_fault(struct slot *slot);
void pciehp_green_led_on(struct slot *slot);
void pciehp_green_led_off(struct slot *slot);
void pciehp_green_led_blink(struct slot *slot);
int pciehp_check_link_status(struct controller *ctrl);
+bool pciehp_check_link_active(struct controller *ctrl);
void pciehp_release_ctrl(struct controller *ctrl);
+int pciehp_reset_slot(struct slot *slot, int probe);
static inline const char *slot_name(struct slot *slot)
{
@@ -167,12 +167,10 @@ static inline const char *slot_name(struct slot *slot)
}
#ifdef CONFIG_ACPI
-#include <acpi/acpi.h>
-#include <acpi/acpi_bus.h>
#include <linux/pci-acpi.h>
-extern void __init pciehp_acpi_slot_detection_init(void);
-extern int pciehp_acpi_slot_detection_check(struct pci_dev *dev);
+void __init pciehp_acpi_slot_detection_init(void);
+int pciehp_acpi_slot_detection_check(struct pci_dev *dev);
static inline void pciehp_firmware_init(void)
{
@@ -184,5 +182,5 @@ static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
{
return 0;
}
-#endif /* CONFIG_ACPI */
+#endif /* CONFIG_ACPI */
#endif /* _PCIEHP_H */
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 2574700db46..93cc9266e8c 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "pciehp.h"
#define PCIEHP_DETECT_PCIE (0)
@@ -53,7 +54,7 @@ int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
{
if (slot_detection_mode != PCIEHP_DETECT_ACPI)
return 0;
- if (acpi_pci_detect_ejectable(DEVICE_ACPI_HANDLE(&dev->dev)))
+ if (acpi_pci_detect_ejectable(ACPI_HANDLE(&dev->dev)))
return 0;
return -ENODEV;
}
@@ -77,45 +78,43 @@ static int __initdata dup_slot_id;
static int __initdata acpi_slot_detected;
static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots);
-/* Dummy driver for dumplicate name detection */
+/* Dummy driver for duplicate name detection */
static int __init dummy_probe(struct pcie_device *dev)
{
- int pos;
u32 slot_cap;
acpi_handle handle;
struct dummy_slot *slot, *tmp;
struct pci_dev *pdev = dev->port;
- pos = pci_pcie_cap(pdev);
- if (!pos)
- return -ENODEV;
- pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap);
+ pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
return -ENOMEM;
- slot->number = slot_cap >> 19;
+ slot->number = (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19;
list_for_each_entry(tmp, &dummy_slots, list) {
if (tmp->number == slot->number)
dup_slot_id++;
}
list_add_tail(&slot->list, &dummy_slots);
- handle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ handle = ACPI_HANDLE(&pdev->dev);
if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle))
acpi_slot_detected = 1;
return -ENODEV; /* dummy driver always returns error */
}
static struct pcie_port_service_driver __initdata dummy_driver = {
- .name = "pciehp_dummy",
+ .name = "pciehp_dummy",
.port_type = PCIE_ANY_PORT,
.service = PCIE_PORT_SERVICE_HP,
- .probe = dummy_probe,
+ .probe = dummy_probe,
};
static int __init select_detection_mode(void)
{
struct dummy_slot *slot, *tmp;
- pcie_port_service_register(&dummy_driver);
+
+ if (pcie_port_service_register(&dummy_driver))
+ return PCIEHP_DETECT_ACPI;
pcie_port_service_unregister(&dummy_driver);
list_for_each_entry_safe(slot, tmp, &dummy_slots, list) {
list_del(&slot->list);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 7ac8358df8f..a2297db8081 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -38,12 +38,10 @@
#include <linux/time.h>
/* Global variables */
-int pciehp_debug;
-int pciehp_poll_mode;
+bool pciehp_debug;
+bool pciehp_poll_mode;
int pciehp_poll_time;
-int pciehp_force;
-struct workqueue_struct *pciehp_wq;
-struct workqueue_struct *pciehp_ordered_wq;
+static bool pciehp_force;
#define DRIVER_VERSION "0.4"
#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
@@ -71,6 +69,7 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value);
static int get_attention_status (struct hotplug_slot *slot, u8 *value);
static int get_latch_status (struct hotplug_slot *slot, u8 *value);
static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
+static int reset_slot (struct hotplug_slot *slot, int probe);
/**
* release_slot - free up the memory used by a slot
@@ -109,10 +108,12 @@ static int init_slot(struct controller *ctrl)
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
goto out;
+
ops->enable_slot = enable_slot;
ops->disable_slot = disable_slot;
ops->get_power_status = get_power_status;
ops->get_adapter_status = get_adapter_status;
+ ops->reset_slot = reset_slot;
if (MRL_SENS(ctrl))
ops->get_latch_status = get_latch_status;
if (ATTN_LED(ctrl)) {
@@ -160,7 +161,8 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- return pciehp_set_attention_status(slot, status);
+ pciehp_set_attention_status(slot, status);
+ return 0;
}
@@ -192,7 +194,8 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- return pciehp_get_power_status(slot, value);
+ pciehp_get_power_status(slot, value);
+ return 0;
}
static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
@@ -202,7 +205,8 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- return pciehp_get_attention_status(slot, value);
+ pciehp_get_attention_status(slot, value);
+ return 0;
}
static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
@@ -212,7 +216,8 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- return pciehp_get_latch_status(slot, value);
+ pciehp_get_latch_status(slot, value);
+ return 0;
}
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
@@ -222,7 +227,18 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- return pciehp_get_adapter_status(slot, value);
+ pciehp_get_adapter_status(slot, value);
+ return 0;
+}
+
+static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ return pciehp_reset_slot(slot, probe);
}
static int pciehp_probe(struct pcie_device *dev)
@@ -250,8 +266,7 @@ static int pciehp_probe(struct pcie_device *dev)
rc = init_slot(ctrl);
if (rc) {
if (rc == -EBUSY)
- ctrl_warn(ctrl, "Slot already registered by another "
- "hotplug driver\n");
+ ctrl_warn(ctrl, "Slot already registered by another hotplug driver\n");
else
ctrl_err(ctrl, "Slot initialization failed\n");
goto err_out_release_ctlr;
@@ -268,8 +283,11 @@ static int pciehp_probe(struct pcie_device *dev)
slot = ctrl->slot;
pciehp_get_adapter_status(slot, &occupied);
pciehp_get_power_status(slot, &poweron);
- if (occupied && pciehp_force)
+ if (occupied && pciehp_force) {
+ mutex_lock(&slot->hotplug_lock);
pciehp_enable_slot(slot);
+ mutex_unlock(&slot->hotplug_lock);
+ }
/* If empty slot's power status is on, turn power off */
if (!occupied && poweron && POWER_CTRL(ctrl))
pciehp_power_off_slot(slot);
@@ -293,32 +311,32 @@ static void pciehp_remove(struct pcie_device *dev)
}
#ifdef CONFIG_PM
-static int pciehp_suspend (struct pcie_device *dev)
+static int pciehp_suspend(struct pcie_device *dev)
{
- dev_info(&dev->device, "%s ENTRY\n", __func__);
return 0;
}
-static int pciehp_resume (struct pcie_device *dev)
+static int pciehp_resume(struct pcie_device *dev)
{
- dev_info(&dev->device, "%s ENTRY\n", __func__);
- if (pciehp_force) {
- struct controller *ctrl = get_service_data(dev);
- struct slot *slot;
- u8 status;
+ struct controller *ctrl;
+ struct slot *slot;
+ u8 status;
- /* reinitialize the chipset's event detection logic */
- pcie_enable_notification(ctrl);
+ ctrl = get_service_data(dev);
- slot = ctrl->slot;
+ /* reinitialize the chipset's event detection logic */
+ pcie_enable_notification(ctrl);
- /* Check if slot is occupied */
- pciehp_get_adapter_status(slot, &status);
- if (status)
- pciehp_enable_slot(slot);
- else
- pciehp_disable_slot(slot);
- }
+ slot = ctrl->slot;
+
+ /* Check if slot is occupied */
+ pciehp_get_adapter_status(slot, &status);
+ mutex_lock(&slot->hotplug_lock);
+ if (status)
+ pciehp_enable_slot(slot);
+ else
+ pciehp_disable_slot(slot);
+ mutex_unlock(&slot->hotplug_lock);
return 0;
}
#endif /* PM */
@@ -341,33 +359,19 @@ static int __init pcied_init(void)
{
int retval = 0;
- pciehp_wq = alloc_workqueue("pciehp", 0, 0);
- if (!pciehp_wq)
- return -ENOMEM;
-
- pciehp_ordered_wq = alloc_ordered_workqueue("pciehp_ordered", 0);
- if (!pciehp_ordered_wq) {
- destroy_workqueue(pciehp_wq);
- return -ENOMEM;
- }
-
pciehp_firmware_init();
retval = pcie_port_service_register(&hpdriver_portdrv);
- dbg("pcie_port_service_register = %d\n", retval);
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
- if (retval) {
- destroy_workqueue(pciehp_ordered_wq);
- destroy_workqueue(pciehp_wq);
+ dbg("pcie_port_service_register = %d\n", retval);
+ info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+ if (retval)
dbg("Failure to register service\n");
- }
+
return retval;
}
static void __exit pcied_cleanup(void)
{
dbg("unload_pciehpd()\n");
- destroy_workqueue(pciehp_ordered_wq);
- destroy_workqueue(pciehp_wq);
pcie_port_service_unregister(&hpdriver_portdrv);
info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 085dbb5fc16..ff32e85e1de 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -49,7 +49,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
info->p_slot = p_slot;
INIT_WORK(&info->work, interrupt_event_handler);
- queue_work(pciehp_wq, &info->work);
+ queue_work(p_slot->wq, &info->work);
return 0;
}
@@ -150,19 +150,37 @@ u8 pciehp_handle_power_fault(struct slot *p_slot)
return 1;
}
+void pciehp_handle_linkstate_change(struct slot *p_slot)
+{
+ u32 event_type;
+ struct controller *ctrl = p_slot->ctrl;
+
+ /* Link Status Change */
+ ctrl_dbg(ctrl, "Data Link Layer State change\n");
+
+ if (pciehp_check_link_active(ctrl)) {
+ ctrl_info(ctrl, "slot(%s): Link Up event\n",
+ slot_name(p_slot));
+ event_type = INT_LINK_UP;
+ } else {
+ ctrl_info(ctrl, "slot(%s): Link Down event\n",
+ slot_name(p_slot));
+ event_type = INT_LINK_DOWN;
+ }
+
+ queue_interrupt_event(p_slot, event_type);
+}
+
/* The following routines constitute the bulk of the
hotplug controller logic
*/
-static void set_slot_off(struct controller *ctrl, struct slot * pslot)
+static void set_slot_off(struct controller *ctrl, struct slot *pslot)
{
/* turn off slot, turn on Amber LED, turn off Green LED if supported*/
if (POWER_CTRL(ctrl)) {
- if (pciehp_power_off_slot(pslot)) {
- ctrl_err(ctrl,
- "Issue of Slot Power Off command failed\n");
- return;
- }
+ pciehp_power_off_slot(pslot);
+
/*
* After turning power off, we must wait for at least 1 second
* before taking any action that relies on power having been
@@ -171,16 +189,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
msleep(1000);
}
- if (PWR_LED(ctrl))
- pciehp_green_led_off(pslot);
-
- if (ATTN_LED(ctrl)) {
- if (pciehp_set_attention_status(pslot, 1)) {
- ctrl_err(ctrl,
- "Issue of Set Attention Led command failed\n");
- return;
- }
- }
+ pciehp_green_led_off(pslot);
+ pciehp_set_attention_status(pslot, 1);
}
/**
@@ -203,8 +213,7 @@ static int board_added(struct slot *p_slot)
return retval;
}
- if (PWR_LED(ctrl))
- pciehp_green_led_blink(p_slot);
+ pciehp_green_led_blink(p_slot);
/* Check link training status */
retval = pciehp_check_link_status(ctrl);
@@ -224,12 +233,11 @@ static int board_added(struct slot *p_slot)
if (retval) {
ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n",
pci_domain_nr(parent), parent->number);
- goto err_exit;
+ if (retval != -EEXIST)
+ goto err_exit;
}
- if (PWR_LED(ctrl))
- pciehp_green_led_on(p_slot);
-
+ pciehp_green_led_on(p_slot);
return 0;
err_exit:
@@ -243,7 +251,7 @@ err_exit:
*/
static int remove_board(struct slot *p_slot)
{
- int retval = 0;
+ int retval;
struct controller *ctrl = p_slot->ctrl;
retval = pciehp_unconfigure_device(p_slot);
@@ -251,13 +259,8 @@ static int remove_board(struct slot *p_slot)
return retval;
if (POWER_CTRL(ctrl)) {
- /* power off slot */
- retval = pciehp_power_off_slot(p_slot);
- if (retval) {
- ctrl_err(ctrl,
- "Issue of Slot Disable command failed\n");
- return retval;
- }
+ pciehp_power_off_slot(p_slot);
+
/*
* After turning power off, we must wait for at least 1 second
* before taking any action that relies on power having been
@@ -267,15 +270,16 @@ static int remove_board(struct slot *p_slot)
}
/* turn off Green LED */
- if (PWR_LED(ctrl))
- pciehp_green_led_off(p_slot);
-
+ pciehp_green_led_off(p_slot);
return 0;
}
struct power_work_info {
struct slot *p_slot;
struct work_struct work;
+ unsigned int req;
+#define DISABLE_REQ 0
+#define ENABLE_REQ 1
};
/**
@@ -290,30 +294,38 @@ static void pciehp_power_thread(struct work_struct *work)
struct power_work_info *info =
container_of(work, struct power_work_info, work);
struct slot *p_slot = info->p_slot;
+ int ret;
- mutex_lock(&p_slot->lock);
- switch (p_slot->state) {
- case POWEROFF_STATE:
- mutex_unlock(&p_slot->lock);
+ switch (info->req) {
+ case DISABLE_REQ:
ctrl_dbg(p_slot->ctrl,
"Disabling domain:bus:device=%04x:%02x:00\n",
pci_domain_nr(p_slot->ctrl->pcie->port->subordinate),
p_slot->ctrl->pcie->port->subordinate->number);
+ mutex_lock(&p_slot->hotplug_lock);
pciehp_disable_slot(p_slot);
+ mutex_unlock(&p_slot->hotplug_lock);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
- break;
- case POWERON_STATE:
mutex_unlock(&p_slot->lock);
- if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl))
+ break;
+ case ENABLE_REQ:
+ ctrl_dbg(p_slot->ctrl,
+ "Enabling domain:bus:device=%04x:%02x:00\n",
+ pci_domain_nr(p_slot->ctrl->pcie->port->subordinate),
+ p_slot->ctrl->pcie->port->subordinate->number);
+ mutex_lock(&p_slot->hotplug_lock);
+ ret = pciehp_enable_slot(p_slot);
+ mutex_unlock(&p_slot->hotplug_lock);
+ if (ret)
pciehp_green_led_off(p_slot);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
+ mutex_unlock(&p_slot->lock);
break;
default:
break;
}
- mutex_unlock(&p_slot->lock);
kfree(info);
}
@@ -336,15 +348,17 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
switch (p_slot->state) {
case BLINKINGOFF_STATE:
p_slot->state = POWEROFF_STATE;
+ info->req = DISABLE_REQ;
break;
case BLINKINGON_STATE:
p_slot->state = POWERON_STATE;
+ info->req = ENABLE_REQ;
break;
default:
kfree(info);
goto out;
}
- queue_work(pciehp_ordered_wq, &info->work);
+ queue_work(p_slot->wq, &info->work);
out:
mutex_unlock(&p_slot->lock);
}
@@ -362,22 +376,17 @@ static void handle_button_press_event(struct slot *p_slot)
pciehp_get_power_status(p_slot, &getstatus);
if (getstatus) {
p_slot->state = BLINKINGOFF_STATE;
- ctrl_info(ctrl,
- "PCI slot #%s - powering off due to button "
- "press.\n", slot_name(p_slot));
+ ctrl_info(ctrl, "PCI slot #%s - powering off due to button press\n",
+ slot_name(p_slot));
} else {
p_slot->state = BLINKINGON_STATE;
- ctrl_info(ctrl,
- "PCI slot #%s - powering on due to button "
- "press.\n", slot_name(p_slot));
+ ctrl_info(ctrl, "PCI slot #%s - powering on due to button press\n",
+ slot_name(p_slot));
}
/* blink green LED and turn off amber */
- if (PWR_LED(ctrl))
- pciehp_green_led_blink(p_slot);
- if (ATTN_LED(ctrl))
- pciehp_set_attention_status(p_slot, 0);
-
- queue_delayed_work(pciehp_wq, &p_slot->work, 5*HZ);
+ pciehp_green_led_blink(p_slot);
+ pciehp_set_attention_status(p_slot, 0);
+ queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
break;
case BLINKINGOFF_STATE:
case BLINKINGON_STATE:
@@ -388,17 +397,13 @@ static void handle_button_press_event(struct slot *p_slot)
*/
ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot));
cancel_delayed_work(&p_slot->work);
- if (p_slot->state == BLINKINGOFF_STATE) {
- if (PWR_LED(ctrl))
- pciehp_green_led_on(p_slot);
- } else {
- if (PWR_LED(ctrl))
- pciehp_green_led_off(p_slot);
- }
- if (ATTN_LED(ctrl))
- pciehp_set_attention_status(p_slot, 0);
- ctrl_info(ctrl, "PCI slot #%s - action canceled "
- "due to button press\n", slot_name(p_slot));
+ if (p_slot->state == BLINKINGOFF_STATE)
+ pciehp_green_led_on(p_slot);
+ else
+ pciehp_green_led_off(p_slot);
+ pciehp_set_attention_status(p_slot, 0);
+ ctrl_info(ctrl, "PCI slot #%s - action canceled due to button press\n",
+ slot_name(p_slot));
p_slot->state = STATIC_STATE;
break;
case POWEROFF_STATE:
@@ -434,12 +439,79 @@ static void handle_surprise_event(struct slot *p_slot)
INIT_WORK(&info->work, pciehp_power_thread);
pciehp_get_adapter_status(p_slot, &getstatus);
- if (!getstatus)
+ if (!getstatus) {
p_slot->state = POWEROFF_STATE;
- else
+ info->req = DISABLE_REQ;
+ } else {
p_slot->state = POWERON_STATE;
+ info->req = ENABLE_REQ;
+ }
+
+ queue_work(p_slot->wq, &info->work);
+}
+
+/*
+ * Note: This function must be called with slot->lock held
+ */
+static void handle_link_event(struct slot *p_slot, u32 event)
+{
+ struct controller *ctrl = p_slot->ctrl;
+ struct power_work_info *info;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
+ __func__);
+ return;
+ }
+ info->p_slot = p_slot;
+ info->req = event == INT_LINK_UP ? ENABLE_REQ : DISABLE_REQ;
+ INIT_WORK(&info->work, pciehp_power_thread);
- queue_work(pciehp_ordered_wq, &info->work);
+ switch (p_slot->state) {
+ case BLINKINGON_STATE:
+ case BLINKINGOFF_STATE:
+ cancel_delayed_work(&p_slot->work);
+ /* Fall through */
+ case STATIC_STATE:
+ p_slot->state = event == INT_LINK_UP ?
+ POWERON_STATE : POWEROFF_STATE;
+ queue_work(p_slot->wq, &info->work);
+ break;
+ case POWERON_STATE:
+ if (event == INT_LINK_UP) {
+ ctrl_info(ctrl,
+ "Link Up event ignored on slot(%s): already powering on\n",
+ slot_name(p_slot));
+ kfree(info);
+ } else {
+ ctrl_info(ctrl,
+ "Link Down event queued on slot(%s): currently getting powered on\n",
+ slot_name(p_slot));
+ p_slot->state = POWEROFF_STATE;
+ queue_work(p_slot->wq, &info->work);
+ }
+ break;
+ case POWEROFF_STATE:
+ if (event == INT_LINK_UP) {
+ ctrl_info(ctrl,
+ "Link Up event queued on slot(%s): currently getting powered off\n",
+ slot_name(p_slot));
+ p_slot->state = POWERON_STATE;
+ queue_work(p_slot->wq, &info->work);
+ } else {
+ ctrl_info(ctrl,
+ "Link Down event ignored on slot(%s): already powering off\n",
+ slot_name(p_slot));
+ kfree(info);
+ }
+ break;
+ default:
+ ctrl_err(ctrl, "Not a valid state on slot(%s)\n",
+ slot_name(p_slot));
+ kfree(info);
+ break;
+ }
}
static void interrupt_event_handler(struct work_struct *work)
@@ -456,18 +528,27 @@ static void interrupt_event_handler(struct work_struct *work)
case INT_POWER_FAULT:
if (!POWER_CTRL(ctrl))
break;
- if (ATTN_LED(ctrl))
- pciehp_set_attention_status(p_slot, 1);
- if (PWR_LED(ctrl))
- pciehp_green_led_off(p_slot);
+ pciehp_set_attention_status(p_slot, 1);
+ pciehp_green_led_off(p_slot);
break;
case INT_PRESENCE_ON:
- case INT_PRESENCE_OFF:
if (!HP_SUPR_RM(ctrl))
break;
+ ctrl_dbg(ctrl, "Surprise Insertion\n");
+ handle_surprise_event(p_slot);
+ break;
+ case INT_PRESENCE_OFF:
+ /*
+ * Regardless of surprise capability, we need to
+ * definitely remove a card that has been pulled out!
+ */
ctrl_dbg(ctrl, "Surprise Removal\n");
handle_surprise_event(p_slot);
break;
+ case INT_LINK_UP:
+ case INT_LINK_DOWN:
+ handle_link_event(p_slot, info->event_type);
+ break;
default:
break;
}
@@ -476,20 +557,23 @@ static void interrupt_event_handler(struct work_struct *work)
kfree(info);
}
+/*
+ * Note: This function must be called with slot->hotplug_lock held
+ */
int pciehp_enable_slot(struct slot *p_slot)
{
u8 getstatus = 0;
int rc;
struct controller *ctrl = p_slot->ctrl;
- rc = pciehp_get_adapter_status(p_slot, &getstatus);
- if (rc || !getstatus) {
+ pciehp_get_adapter_status(p_slot, &getstatus);
+ if (!getstatus) {
ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
return -ENODEV;
}
if (MRL_SENS(p_slot->ctrl)) {
- rc = pciehp_get_latch_status(p_slot, &getstatus);
- if (rc || getstatus) {
+ pciehp_get_latch_status(p_slot, &getstatus);
+ if (getstatus) {
ctrl_info(ctrl, "Latch open on slot(%s)\n",
slot_name(p_slot));
return -ENODEV;
@@ -497,8 +581,8 @@ int pciehp_enable_slot(struct slot *p_slot)
}
if (POWER_CTRL(p_slot->ctrl)) {
- rc = pciehp_get_power_status(p_slot, &getstatus);
- if (rc || getstatus) {
+ pciehp_get_power_status(p_slot, &getstatus);
+ if (getstatus) {
ctrl_info(ctrl, "Already enabled on slot(%s)\n",
slot_name(p_slot));
return -EINVAL;
@@ -508,43 +592,26 @@ int pciehp_enable_slot(struct slot *p_slot)
pciehp_get_latch_status(p_slot, &getstatus);
rc = board_added(p_slot);
- if (rc) {
+ if (rc)
pciehp_get_latch_status(p_slot, &getstatus);
- }
+
return rc;
}
-
+/*
+ * Note: This function must be called with slot->hotplug_lock held
+ */
int pciehp_disable_slot(struct slot *p_slot)
{
u8 getstatus = 0;
- int ret = 0;
struct controller *ctrl = p_slot->ctrl;
if (!p_slot->ctrl)
return 1;
- if (!HP_SUPR_RM(p_slot->ctrl)) {
- ret = pciehp_get_adapter_status(p_slot, &getstatus);
- if (ret || !getstatus) {
- ctrl_info(ctrl, "No adapter on slot(%s)\n",
- slot_name(p_slot));
- return -ENODEV;
- }
- }
-
- if (MRL_SENS(p_slot->ctrl)) {
- ret = pciehp_get_latch_status(p_slot, &getstatus);
- if (ret || getstatus) {
- ctrl_info(ctrl, "Latch open on slot(%s)\n",
- slot_name(p_slot));
- return -ENODEV;
- }
- }
-
if (POWER_CTRL(p_slot->ctrl)) {
- ret = pciehp_get_power_status(p_slot, &getstatus);
- if (ret || !getstatus) {
+ pciehp_get_power_status(p_slot, &getstatus);
+ if (!getstatus) {
ctrl_info(ctrl, "Already disabled on slot(%s)\n",
slot_name(p_slot));
return -EINVAL;
@@ -566,7 +633,9 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
case STATIC_STATE:
p_slot->state = POWERON_STATE;
mutex_unlock(&p_slot->lock);
+ mutex_lock(&p_slot->hotplug_lock);
retval = pciehp_enable_slot(p_slot);
+ mutex_unlock(&p_slot->hotplug_lock);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
break;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 50a23da5d24..42914e04d11 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -41,34 +41,11 @@
#include "../pci.h"
#include "pciehp.h"
-static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value)
+static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
{
- struct pci_dev *dev = ctrl->pcie->port;
- return pci_read_config_word(dev, pci_pcie_cap(dev) + reg, value);
+ return ctrl->pcie->port;
}
-static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value)
-{
- struct pci_dev *dev = ctrl->pcie->port;
- return pci_read_config_dword(dev, pci_pcie_cap(dev) + reg, value);
-}
-
-static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value)
-{
- struct pci_dev *dev = ctrl->pcie->port;
- return pci_write_config_word(dev, pci_pcie_cap(dev) + reg, value);
-}
-
-static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
-{
- struct pci_dev *dev = ctrl->pcie->port;
- return pci_write_config_dword(dev, pci_pcie_cap(dev) + reg, value);
-}
-
-/* Power Control Command */
-#define POWER_ON 0
-#define POWER_OFF PCI_EXP_SLTCTL_PCC
-
static irqreturn_t pcie_isr(int irq, void *dev_id);
static void start_int_poll_timer(struct controller *ctrl, int sec);
@@ -92,7 +69,7 @@ static void start_int_poll_timer(struct controller *ctrl, int sec)
{
/* Clamp to sane value */
if ((sec <= 0) || (sec > 60))
- sec = 2;
+ sec = 2;
ctrl->poll_timer.function = &int_poll_timeout;
ctrl->poll_timer.data = (unsigned long)ctrl;
@@ -129,20 +106,23 @@ static inline void pciehp_free_irq(struct controller *ctrl)
static int pcie_poll_cmd(struct controller *ctrl)
{
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
- int err, timeout = 1000;
+ int timeout = 1000;
- err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) {
- pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC);
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ if (slot_status & PCI_EXP_SLTSTA_CC) {
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_CC);
return 1;
}
while (timeout > 0) {
msleep(10);
timeout -= 10;
- err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) {
- pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC);
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ if (slot_status & PCI_EXP_SLTSTA_CC) {
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_CC);
return 1;
}
}
@@ -169,22 +149,18 @@ static void pcie_wait_cmd(struct controller *ctrl, int poll)
* @cmd: command value written to slot control register
* @mask: bitmask of slot control register to be modified
*/
-static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
+static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
{
- int retval = 0;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
u16 slot_ctrl;
mutex_lock(&ctrl->ctrl_lock);
- retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
- __func__);
- goto out;
- }
-
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
if (slot_status & PCI_EXP_SLTSTA_CC) {
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_CC);
if (!ctrl->no_cmd_complete) {
/*
* After 1 sec and CMD_COMPLETED still not set, just
@@ -194,37 +170,28 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
ctrl_dbg(ctrl, "CMD_COMPLETED not clear after 1 sec\n");
} else if (!NO_CMD_CMPL(ctrl)) {
/*
- * This controller semms to notify of command completed
+ * This controller seems to notify of command completed
* event even though it supports none of power
* controller, attention led, power led and EMI.
*/
- ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Need to "
- "wait for command completed event.\n");
+ ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Need to wait for command completed event\n");
ctrl->no_cmd_complete = 0;
} else {
- ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Maybe "
- "the controller is broken.\n");
+ ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Maybe the controller is broken\n");
}
}
- retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
- goto out;
- }
-
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
slot_ctrl &= ~mask;
slot_ctrl |= (cmd & mask);
ctrl->cmd_busy = 1;
smp_mb();
- retval = pciehp_writew(ctrl, PCI_EXP_SLTCTL, slot_ctrl);
- if (retval)
- ctrl_err(ctrl, "Cannot write to SLOTCTRL register\n");
+ pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
/*
* Wait for command completion.
*/
- if (!retval && !ctrl->no_cmd_complete) {
+ if (!ctrl->no_cmd_complete) {
int poll = 0;
/*
* if hotplug interrupt is not enabled or command
@@ -234,328 +201,313 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
if (!(slot_ctrl & PCI_EXP_SLTCTL_HPIE) ||
!(slot_ctrl & PCI_EXP_SLTCTL_CCIE))
poll = 1;
- pcie_wait_cmd(ctrl, poll);
+ pcie_wait_cmd(ctrl, poll);
}
- out:
mutex_unlock(&ctrl->ctrl_lock);
- return retval;
}
-static inline int check_link_active(struct controller *ctrl)
+bool pciehp_check_link_active(struct controller *ctrl)
{
- u16 link_status;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ u16 lnk_status;
+ bool ret;
- if (pciehp_readw(ctrl, PCI_EXP_LNKSTA, &link_status))
- return 0;
- return !!(link_status & PCI_EXP_LNKSTA_DLLLA);
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+ ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
+
+ if (ret)
+ ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
+
+ return ret;
}
-static void pcie_wait_link_active(struct controller *ctrl)
+static void __pcie_wait_link_active(struct controller *ctrl, bool active)
{
int timeout = 1000;
- if (check_link_active(ctrl))
+ if (pciehp_check_link_active(ctrl) == active)
return;
while (timeout > 0) {
msleep(10);
timeout -= 10;
- if (check_link_active(ctrl))
+ if (pciehp_check_link_active(ctrl) == active)
return;
}
- ctrl_dbg(ctrl, "Data Link Layer Link Active not set in 1000 msec\n");
+ ctrl_dbg(ctrl, "Data Link Layer Link Active not %s in 1000 msec\n",
+ active ? "set" : "cleared");
+}
+
+static void pcie_wait_link_active(struct controller *ctrl)
+{
+ __pcie_wait_link_active(ctrl, true);
+}
+
+static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
+{
+ u32 l;
+ int count = 0;
+ int delay = 1000, step = 20;
+ bool found = false;
+
+ do {
+ found = pci_bus_read_dev_vendor_id(bus, devfn, &l, 0);
+ count++;
+
+ if (found)
+ break;
+
+ msleep(step);
+ delay -= step;
+ } while (delay > 0);
+
+ if (count > 1 && pciehp_debug)
+ printk(KERN_DEBUG "pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
+ pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), count, step, l);
+
+ return found;
}
int pciehp_check_link_status(struct controller *ctrl)
{
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ bool found;
u16 lnk_status;
- int retval = 0;
-
- /*
- * Data Link Layer Link Active Reporting must be capable for
- * hot-plug capable downstream port. But old controller might
- * not implement it. In this case, we wait for 1000 ms.
- */
- if (ctrl->link_active_reporting){
- /* Wait for Data Link Layer Link Active bit to be set */
- pcie_wait_link_active(ctrl);
- /*
- * We must wait for 100 ms after the Data Link Layer
- * Link Active bit reads 1b before initiating a
- * configuration access to the hot added device.
- */
- msleep(100);
- } else
- msleep(1000);
-
- retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
- if (retval) {
- ctrl_err(ctrl, "Cannot read LNKSTATUS register\n");
- return retval;
- }
+ /*
+ * Data Link Layer Link Active Reporting must be capable for
+ * hot-plug capable downstream port. But old controller might
+ * not implement it. In this case, we wait for 1000 ms.
+ */
+ if (ctrl->link_active_reporting)
+ pcie_wait_link_active(ctrl);
+ else
+ msleep(1000);
+
+ /* wait 100ms before read pci conf, and try in 1s */
+ msleep(100);
+ found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
+ PCI_DEVFN(0, 0));
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
!(lnk_status & PCI_EXP_LNKSTA_NLW)) {
- ctrl_err(ctrl, "Link Training Error occurs \n");
- retval = -1;
- return retval;
+ ctrl_err(ctrl, "Link Training Error occurs\n");
+ return -1;
}
- return retval;
+ pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
+
+ if (!found)
+ return -1;
+
+ return 0;
+}
+
+static int __pciehp_link_set(struct controller *ctrl, bool enable)
+{
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ u16 lnk_ctrl;
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl);
+
+ if (enable)
+ lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
+ else
+ lnk_ctrl |= PCI_EXP_LNKCTL_LD;
+
+ pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl);
+ ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
+ return 0;
}
-int pciehp_get_attention_status(struct slot *slot, u8 *status)
+static int pciehp_link_enable(struct controller *ctrl)
+{
+ return __pciehp_link_set(ctrl, true);
+}
+
+void pciehp_get_attention_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_ctrl;
- u8 atten_led_state;
- int retval = 0;
-
- retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
- return retval;
- }
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
- atten_led_state = (slot_ctrl & PCI_EXP_SLTCTL_AIC) >> 6;
-
- switch (atten_led_state) {
- case 0:
- *status = 0xFF; /* Reserved */
- break;
- case 1:
+ switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) {
+ case PCI_EXP_SLTCTL_ATTN_IND_ON:
*status = 1; /* On */
break;
- case 2:
+ case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
*status = 2; /* Blink */
break;
- case 3:
+ case PCI_EXP_SLTCTL_ATTN_IND_OFF:
*status = 0; /* Off */
break;
default:
*status = 0xFF;
break;
}
-
- return 0;
}
-int pciehp_get_power_status(struct slot *slot, u8 *status)
+void pciehp_get_power_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_ctrl;
- u8 pwr_state;
- int retval = 0;
- retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
- return retval;
- }
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
- pwr_state = (slot_ctrl & PCI_EXP_SLTCTL_PCC) >> 10;
-
- switch (pwr_state) {
- case 0:
- *status = 1;
+ switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) {
+ case PCI_EXP_SLTCTL_PWR_ON:
+ *status = 1; /* On */
break;
- case 1:
- *status = 0;
+ case PCI_EXP_SLTCTL_PWR_OFF:
+ *status = 0; /* Off */
break;
default:
*status = 0xFF;
break;
}
-
- return retval;
}
-int pciehp_get_latch_status(struct slot *slot, u8 *status)
+void pciehp_get_latch_status(struct slot *slot, u8 *status)
{
- struct controller *ctrl = slot->ctrl;
+ struct pci_dev *pdev = ctrl_dev(slot->ctrl);
u16 slot_status;
- int retval;
- retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
- __func__);
- return retval;
- }
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
- return 0;
}
-int pciehp_get_adapter_status(struct slot *slot, u8 *status)
+void pciehp_get_adapter_status(struct slot *slot, u8 *status)
{
- struct controller *ctrl = slot->ctrl;
+ struct pci_dev *pdev = ctrl_dev(slot->ctrl);
u16 slot_status;
- int retval;
- retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
- __func__);
- return retval;
- }
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
*status = !!(slot_status & PCI_EXP_SLTSTA_PDS);
- return 0;
}
int pciehp_query_power_fault(struct slot *slot)
{
- struct controller *ctrl = slot->ctrl;
+ struct pci_dev *pdev = ctrl_dev(slot->ctrl);
u16 slot_status;
- int retval;
- retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (retval) {
- ctrl_err(ctrl, "Cannot check for power fault\n");
- return retval;
- }
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
return !!(slot_status & PCI_EXP_SLTSTA_PFD);
}
-int pciehp_set_attention_status(struct slot *slot, u8 value)
+void pciehp_set_attention_status(struct slot *slot, u8 value)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
- u16 cmd_mask;
- cmd_mask = PCI_EXP_SLTCTL_AIC;
+ if (!ATTN_LED(ctrl))
+ return;
+
switch (value) {
- case 0 : /* turn off */
- slot_cmd = 0x00C0;
+ case 0: /* turn off */
+ slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_OFF;
break;
case 1: /* turn on */
- slot_cmd = 0x0040;
+ slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_ON;
break;
case 2: /* turn blink */
- slot_cmd = 0x0080;
+ slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_BLINK;
break;
default:
- return -EINVAL;
+ return;
}
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
- return pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
+ pcie_write_cmd(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
}
void pciehp_green_led_on(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
- u16 slot_cmd;
- u16 cmd_mask;
- slot_cmd = 0x0100;
- cmd_mask = PCI_EXP_SLTCTL_PIC;
- pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
+ if (!PWR_LED(ctrl))
+ return;
+
+ pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, PCI_EXP_SLTCTL_PIC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
- pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_PWR_IND_ON);
}
void pciehp_green_led_off(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
- u16 slot_cmd;
- u16 cmd_mask;
- slot_cmd = 0x0300;
- cmd_mask = PCI_EXP_SLTCTL_PIC;
- pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
+ if (!PWR_LED(ctrl))
+ return;
+
+ pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, PCI_EXP_SLTCTL_PIC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
- pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_PWR_IND_OFF);
}
void pciehp_green_led_blink(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
- u16 slot_cmd;
- u16 cmd_mask;
- slot_cmd = 0x0200;
- cmd_mask = PCI_EXP_SLTCTL_PIC;
- pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
+ if (!PWR_LED(ctrl))
+ return;
+
+ pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, PCI_EXP_SLTCTL_PIC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
- pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_PWR_IND_BLINK);
}
-int pciehp_power_on_slot(struct slot * slot)
+int pciehp_power_on_slot(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
- u16 slot_cmd;
- u16 cmd_mask;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
- u16 lnk_status;
- int retval = 0;
+ int retval;
/* Clear sticky power-fault bit from previous power failures */
- retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
- __func__);
- return retval;
- }
- slot_status &= PCI_EXP_SLTSTA_PFD;
- if (slot_status) {
- retval = pciehp_writew(ctrl, PCI_EXP_SLTSTA, slot_status);
- if (retval) {
- ctrl_err(ctrl,
- "%s: Cannot write to SLOTSTATUS register\n",
- __func__);
- return retval;
- }
- }
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ if (slot_status & PCI_EXP_SLTSTA_PFD)
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_PFD);
ctrl->power_fault_detected = 0;
- slot_cmd = POWER_ON;
- cmd_mask = PCI_EXP_SLTCTL_PCC;
- retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
- if (retval) {
- ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd);
- return retval;
- }
+ pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
- pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_PWR_ON);
- retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read LNKSTA register\n",
- __func__);
- return retval;
- }
- pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
+ retval = pciehp_link_enable(ctrl);
+ if (retval)
+ ctrl_err(ctrl, "%s: Can not enable the link!\n", __func__);
return retval;
}
-int pciehp_power_off_slot(struct slot * slot)
+void pciehp_power_off_slot(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
- u16 slot_cmd;
- u16 cmd_mask;
- int retval;
- slot_cmd = POWER_OFF;
- cmd_mask = PCI_EXP_SLTCTL_PCC;
- retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
- if (retval) {
- ctrl_err(ctrl, "Write command failed!\n");
- return retval;
- }
+ pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
- pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
- return 0;
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_PWR_OFF);
}
static irqreturn_t pcie_isr(int irq, void *dev_id)
{
struct controller *ctrl = (struct controller *)dev_id;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
struct slot *slot = ctrl->slot;
u16 detected, intr_loc;
@@ -566,24 +518,18 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
*/
intr_loc = 0;
do {
- if (pciehp_readw(ctrl, PCI_EXP_SLTSTA, &detected)) {
- ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS\n",
- __func__);
- return IRQ_NONE;
- }
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &detected);
detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
- PCI_EXP_SLTSTA_CC);
+ PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
detected &= ~intr_loc;
intr_loc |= detected;
if (!intr_loc)
return IRQ_NONE;
- if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, intr_loc)) {
- ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n",
- __func__);
- return IRQ_NONE;
- }
+ if (detected)
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ intr_loc);
} while (detected);
ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc);
@@ -615,111 +561,14 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
ctrl->power_fault_detected = 1;
pciehp_handle_power_fault(slot);
}
- return IRQ_HANDLED;
-}
-
-int pciehp_get_max_lnk_width(struct slot *slot,
- enum pcie_link_width *value)
-{
- struct controller *ctrl = slot->ctrl;
- enum pcie_link_width lnk_wdth;
- u32 lnk_cap;
- int retval = 0;
-
- retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
- return retval;
- }
-
- switch ((lnk_cap & PCI_EXP_LNKSTA_NLW) >> 4){
- case 0:
- lnk_wdth = PCIE_LNK_WIDTH_RESRV;
- break;
- case 1:
- lnk_wdth = PCIE_LNK_X1;
- break;
- case 2:
- lnk_wdth = PCIE_LNK_X2;
- break;
- case 4:
- lnk_wdth = PCIE_LNK_X4;
- break;
- case 8:
- lnk_wdth = PCIE_LNK_X8;
- break;
- case 12:
- lnk_wdth = PCIE_LNK_X12;
- break;
- case 16:
- lnk_wdth = PCIE_LNK_X16;
- break;
- case 32:
- lnk_wdth = PCIE_LNK_X32;
- break;
- default:
- lnk_wdth = PCIE_LNK_WIDTH_UNKNOWN;
- break;
- }
-
- *value = lnk_wdth;
- ctrl_dbg(ctrl, "Max link width = %d\n", lnk_wdth);
-
- return retval;
-}
-
-int pciehp_get_cur_lnk_width(struct slot *slot,
- enum pcie_link_width *value)
-{
- struct controller *ctrl = slot->ctrl;
- enum pcie_link_width lnk_wdth = PCIE_LNK_WIDTH_UNKNOWN;
- int retval = 0;
- u16 lnk_status;
-
- retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
- __func__);
- return retval;
- }
-
- switch ((lnk_status & PCI_EXP_LNKSTA_NLW) >> 4){
- case 0:
- lnk_wdth = PCIE_LNK_WIDTH_RESRV;
- break;
- case 1:
- lnk_wdth = PCIE_LNK_X1;
- break;
- case 2:
- lnk_wdth = PCIE_LNK_X2;
- break;
- case 4:
- lnk_wdth = PCIE_LNK_X4;
- break;
- case 8:
- lnk_wdth = PCIE_LNK_X8;
- break;
- case 12:
- lnk_wdth = PCIE_LNK_X12;
- break;
- case 16:
- lnk_wdth = PCIE_LNK_X16;
- break;
- case 32:
- lnk_wdth = PCIE_LNK_X32;
- break;
- default:
- lnk_wdth = PCIE_LNK_WIDTH_UNKNOWN;
- break;
- }
- *value = lnk_wdth;
- ctrl_dbg(ctrl, "Current link width = %d\n", lnk_wdth);
+ if (intr_loc & PCI_EXP_SLTSTA_DLLSC)
+ pciehp_handle_linkstate_change(slot);
- return retval;
+ return IRQ_HANDLED;
}
-int pcie_enable_notification(struct controller *ctrl)
+void pcie_enable_notification(struct controller *ctrl)
{
u16 cmd, mask;
@@ -733,9 +582,17 @@ int pcie_enable_notification(struct controller *ctrl)
* when it is cleared in the interrupt service routine, and
* next power fault detected interrupt was notified again.
*/
- cmd = PCI_EXP_SLTCTL_PDCE;
+
+ /*
+ * Always enable link events: thus link-up and link-down shall
+ * always be treated as hotplug and unplug respectively. Enable
+ * presence detect only if Attention Button is not present.
+ */
+ cmd = PCI_EXP_SLTCTL_DLLSCE;
if (ATTN_BUTTN(ctrl))
cmd |= PCI_EXP_SLTCTL_ABPE;
+ else
+ cmd |= PCI_EXP_SLTCTL_PDCE;
if (MRL_SENS(ctrl))
cmd |= PCI_EXP_SLTCTL_MRLSCE;
if (!pciehp_poll_mode)
@@ -743,34 +600,66 @@ int pcie_enable_notification(struct controller *ctrl)
mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
- PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE);
+ PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
+ PCI_EXP_SLTCTL_DLLSCE);
- if (pcie_write_cmd(ctrl, cmd, mask)) {
- ctrl_err(ctrl, "Cannot enable software notification\n");
- return -1;
- }
- return 0;
+ pcie_write_cmd(ctrl, cmd, mask);
}
static void pcie_disable_notification(struct controller *ctrl)
{
u16 mask;
+
mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
PCI_EXP_SLTCTL_DLLSCE);
- if (pcie_write_cmd(ctrl, 0, mask))
- ctrl_warn(ctrl, "Cannot disable software notification\n");
+ pcie_write_cmd(ctrl, 0, mask);
+}
+
+/*
+ * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
+ * bus reset of the bridge, but at the same time we want to ensure that it is
+ * not seen as a hot-unplug, followed by the hot-plug of the device. Thus,
+ * disable link state notification and presence detection change notification
+ * momentarily, if we see that they could interfere. Also, clear any spurious
+ * events after.
+ */
+int pciehp_reset_slot(struct slot *slot, int probe)
+{
+ struct controller *ctrl = slot->ctrl;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ u16 stat_mask = 0, ctrl_mask = 0;
+
+ if (probe)
+ return 0;
+
+ if (!ATTN_BUTTN(ctrl)) {
+ ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
+ stat_mask |= PCI_EXP_SLTSTA_PDC;
+ }
+ ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
+ stat_mask |= PCI_EXP_SLTSTA_DLLSC;
+
+ pcie_write_cmd(ctrl, 0, ctrl_mask);
+ if (pciehp_poll_mode)
+ del_timer_sync(&ctrl->poll_timer);
+
+ pci_reset_bridge_secondary_bus(ctrl->pcie->port);
+
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
+ pcie_write_cmd(ctrl, ctrl_mask, ctrl_mask);
+ if (pciehp_poll_mode)
+ int_poll_timeout(ctrl->poll_timer.data);
+
+ return 0;
}
int pcie_init_notification(struct controller *ctrl)
{
if (pciehp_request_irq(ctrl))
return -1;
- if (pcie_enable_notification(ctrl)) {
- pciehp_free_irq(ctrl);
- return -1;
- }
+ pcie_enable_notification(ctrl);
ctrl->notification_enabled = 1;
return 0;
}
@@ -792,19 +681,26 @@ static int pcie_init_slot(struct controller *ctrl)
if (!slot)
return -ENOMEM;
+ slot->wq = alloc_workqueue("pciehp-%u", 0, 0, PSN(ctrl));
+ if (!slot->wq)
+ goto abort;
+
slot->ctrl = ctrl;
mutex_init(&slot->lock);
+ mutex_init(&slot->hotplug_lock);
INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
ctrl->slot = slot;
return 0;
+abort:
+ kfree(slot);
+ return -ENOMEM;
}
static void pcie_cleanup_slot(struct controller *ctrl)
{
struct slot *slot = ctrl->slot;
cancel_delayed_work(&slot->work);
- flush_workqueue(pciehp_wq);
- flush_workqueue(pciehp_ordered_wq);
+ destroy_workqueue(slot->wq);
kfree(slot);
}
@@ -852,12 +748,14 @@ static inline void dbg_ctrl(struct controller *ctrl)
EMI(ctrl) ? "yes" : "no");
ctrl_info(ctrl, " Command Completed : %3s\n",
NO_CMD_CMPL(ctrl) ? "no" : "yes");
- pciehp_readw(ctrl, PCI_EXP_SLTSTA, &reg16);
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16);
- pciehp_readw(ctrl, PCI_EXP_SLTCTL, &reg16);
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16);
}
+#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
+
struct controller *pcie_init(struct pcie_device *dev)
{
struct controller *ctrl;
@@ -870,15 +768,7 @@ struct controller *pcie_init(struct pcie_device *dev)
goto abort;
}
ctrl->pcie = dev;
- if (!pci_pcie_cap(pdev)) {
- ctrl_err(ctrl, "Cannot find PCI Express capability\n");
- goto abort_ctrl;
- }
- if (pciehp_readl(ctrl, PCI_EXP_SLTCAP, &slot_cap)) {
- ctrl_err(ctrl, "Cannot read SLOTCAP register\n");
- goto abort_ctrl;
- }
-
+ pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
ctrl->slot_cap = slot_cap;
mutex_init(&ctrl->ctrl_lock);
init_waitqueue_head(&ctrl->queue);
@@ -891,28 +781,34 @@ struct controller *pcie_init(struct pcie_device *dev)
*/
if (NO_CMD_CMPL(ctrl) ||
!(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl)))
- ctrl->no_cmd_complete = 1;
-
- /* Check if Data Link Layer Link Active Reporting is implemented */
- if (pciehp_readl(ctrl, PCI_EXP_LNKCAP, &link_cap)) {
- ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
- goto abort_ctrl;
- }
- if (link_cap & PCI_EXP_LNKCAP_DLLLARC) {
- ctrl_dbg(ctrl, "Link Active Reporting supported\n");
- ctrl->link_active_reporting = 1;
- }
+ ctrl->no_cmd_complete = 1;
+
+ /* Check if Data Link Layer Link Active Reporting is implemented */
+ pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
+ if (link_cap & PCI_EXP_LNKCAP_DLLLARC) {
+ ctrl_dbg(ctrl, "Link Active Reporting supported\n");
+ ctrl->link_active_reporting = 1;
+ }
/* Clear all remaining event bits in Slot Status register */
- if (pciehp_writew(ctrl, PCI_EXP_SLTSTA, 0x1f))
- goto abort_ctrl;
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+ PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
+ PCI_EXP_SLTSTA_CC);
- /* Disable sotfware notification */
+ /* Disable software notification */
pcie_disable_notification(ctrl);
- ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
- pdev->vendor, pdev->device, pdev->subsystem_vendor,
- pdev->subsystem_device);
+ ctrl_info(ctrl, "Slot #%d AttnBtn%c AttnInd%c PwrInd%c PwrCtrl%c MRL%c Interlock%c NoCompl%c LLActRep%c\n",
+ (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
+ FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
+ FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC));
if (pcie_init_slot(ctrl))
goto abort_ctrl;
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index a4031dfe938..5f871f4c4af 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -34,126 +34,102 @@
#include "../pci.h"
#include "pciehp.h"
-static int __ref pciehp_add_bridge(struct pci_dev *dev)
-{
- struct pci_bus *parent = dev->bus;
- int pass, busnr, start = parent->secondary;
- int end = parent->subordinate;
-
- for (busnr = start; busnr <= end; busnr++) {
- if (!pci_find_bus(pci_domain_nr(parent), busnr))
- break;
- }
- if (busnr-- > end) {
- err("No bus number available for hot-added bridge %s\n",
- pci_name(dev));
- return -1;
- }
- for (pass = 0; pass < 2; pass++)
- busnr = pci_scan_bridge(parent, dev, busnr, pass);
- if (!dev->subordinate)
- return -1;
-
- return 0;
-}
-
int pciehp_configure_device(struct slot *p_slot)
{
struct pci_dev *dev;
struct pci_dev *bridge = p_slot->ctrl->pcie->port;
struct pci_bus *parent = bridge->subordinate;
- int num, fn;
+ int num, ret = 0;
struct controller *ctrl = p_slot->ctrl;
+ pci_lock_rescan_remove();
+
dev = pci_get_slot(parent, PCI_DEVFN(0, 0));
if (dev) {
- ctrl_err(ctrl, "Device %s already exists "
- "at %04x:%02x:00, cannot hot-add\n", pci_name(dev),
- pci_domain_nr(parent), parent->number);
+ ctrl_err(ctrl, "Device %s already exists at %04x:%02x:00, cannot hot-add\n",
+ pci_name(dev), pci_domain_nr(parent), parent->number);
pci_dev_put(dev);
- return -EINVAL;
+ ret = -EEXIST;
+ goto out;
}
num = pci_scan_slot(parent, PCI_DEVFN(0, 0));
if (num == 0) {
ctrl_err(ctrl, "No new device found\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
}
- for (fn = 0; fn < 8; fn++) {
- dev = pci_get_slot(parent, PCI_DEVFN(0, fn));
- if (!dev)
- continue;
- if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
- (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
- pciehp_add_bridge(dev);
- }
- pci_dev_put(dev);
- }
+ list_for_each_entry(dev, &parent->devices, bus_list)
+ if (pci_is_bridge(dev))
+ pci_hp_add_bridge(dev);
pci_assign_unassigned_bridge_resources(bridge);
- for (fn = 0; fn < 8; fn++) {
- dev = pci_get_slot(parent, PCI_DEVFN(0, fn));
- if (!dev)
- continue;
- if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
- pci_dev_put(dev);
+ list_for_each_entry(dev, &parent->devices, bus_list) {
+ if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
continue;
- }
+
pci_configure_slot(dev);
- pci_dev_put(dev);
}
pci_bus_add_devices(parent);
- return 0;
+ out:
+ pci_unlock_rescan_remove();
+ return ret;
}
int pciehp_unconfigure_device(struct slot *p_slot)
{
- int ret, rc = 0;
- int j;
+ int rc = 0;
u8 bctl = 0;
u8 presence = 0;
+ struct pci_dev *dev, *temp;
struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
u16 command;
struct controller *ctrl = p_slot->ctrl;
ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n",
__func__, pci_domain_nr(parent), parent->number);
- ret = pciehp_get_adapter_status(p_slot, &presence);
- if (ret)
- presence = 0;
-
- for (j = 0; j < 8; j++) {
- struct pci_dev *temp = pci_get_slot(parent, PCI_DEVFN(0, j));
- if (!temp)
- continue;
- if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
- pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl);
+ pciehp_get_adapter_status(p_slot, &presence);
+
+ pci_lock_rescan_remove();
+
+ /*
+ * Stopping an SR-IOV PF device removes all the associated VFs,
+ * which will update the bus->devices list and confuse the
+ * iterator. Therefore, iterate in reverse so we remove the VFs
+ * first, then the PF. We do the same in pci_stop_bus_device().
+ */
+ list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
+ bus_list) {
+ pci_dev_get(dev);
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
+ pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl);
if (bctl & PCI_BRIDGE_CTL_VGA) {
ctrl_err(ctrl,
"Cannot remove display device %s\n",
- pci_name(temp));
- pci_dev_put(temp);
+ pci_name(dev));
+ pci_dev_put(dev);
rc = -EINVAL;
break;
}
}
- pci_remove_bus_device(temp);
+ pci_stop_and_remove_bus_device(dev);
/*
* Ensure that no new Requests will be generated from
* the device.
*/
if (presence) {
- pci_read_config_word(temp, PCI_COMMAND, &command);
+ pci_read_config_word(dev, PCI_COMMAND, &command);
command &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
command |= PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(temp, PCI_COMMAND, command);
+ pci_write_config_word(dev, PCI_COMMAND, command);
}
- pci_dev_put(temp);
+ pci_dev_put(dev);
}
+ pci_unlock_rescan_remove();
return rc;
}
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
index 5175d9b26f0..d062c008fc9 100644
--- a/drivers/pci/hotplug/pcihp_skeleton.c
+++ b/drivers/pci/hotplug/pcihp_skeleton.c
@@ -51,15 +51,15 @@ static LIST_HEAD(slot_list);
#define dbg(format, arg...) \
do { \
if (debug) \
- printk (KERN_DEBUG "%s: " format "\n", \
- MY_NAME , ## arg); \
+ printk(KERN_DEBUG "%s: " format "\n", \
+ MY_NAME , ## arg); \
} while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
/* local variables */
-static int debug;
+static bool debug;
static int num_slots;
#define DRIVER_VERSION "0.3"
@@ -128,18 +128,18 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
switch (status) {
- case 0:
- /*
- * Fill in code here to turn light off
- */
- break;
-
- case 1:
- default:
- /*
- * Fill in code here to turn light on
- */
- break;
+ case 0:
+ /*
+ * Fill in code here to turn light off
+ */
+ break;
+
+ case 1:
+ default:
+ /*
+ * Fill in code here to turn light on
+ */
+ break;
}
return retval;
@@ -153,12 +153,12 @@ static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
switch (value) {
- case 0:
- /* Specify a test here */
- break;
- case 1:
- /* Specify another test here */
- break;
+ case 0:
+ /* Specify a test here */
+ break;
+ case 1:
+ /* Specify another test here */
+ break;
}
return retval;
@@ -252,7 +252,7 @@ static int __init init_slots(void)
struct slot *slot;
struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *info;
- int retval = -ENOMEM;
+ int retval;
int i;
/*
@@ -261,17 +261,23 @@ static int __init init_slots(void)
*/
for (i = 0; i < num_slots; ++i) {
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot)
+ if (!slot) {
+ retval = -ENOMEM;
goto error;
+ }
hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot)
+ if (!hotplug_slot) {
+ retval = -ENOMEM;
goto error_slot;
+ }
slot->hotplug_slot = hotplug_slot;
info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
+ if (!info) {
+ retval = -ENOMEM;
goto error_hpslot;
+ }
hotplug_slot->info = info;
slot->number = i;
@@ -281,7 +287,7 @@ static int __init init_slots(void)
hotplug_slot->release = &release_slot;
make_slot_name(slot);
hotplug_slot->ops = &skel_hotplug_slot_ops;
-
+
/*
* Initialize the slot info structure with some known
* good values.
@@ -290,7 +296,7 @@ static int __init init_slots(void)
get_attention_status(hotplug_slot, &info->attention_status);
get_latch_status(hotplug_slot, &info->latch_status);
get_adapter_status(hotplug_slot, &info->adapter_status);
-
+
dbg("registering slot %d\n", i);
retval = pci_hp_register(slot->hotplug_slot);
if (retval) {
@@ -330,7 +336,7 @@ static void __exit cleanup_slots(void)
pci_hp_deregister(slot->hotplug_slot);
}
}
-
+
static int __init pcihp_skel_init(void)
{
int retval;
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
index 80b461c9855..e246a10a0d2 100644
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -24,6 +24,7 @@
*/
#include <linux/pci.h>
+#include <linux/export.h>
#include <linux/pci_hotplug.h>
static struct hpp_type0 pci_default_type0 = {
@@ -95,17 +96,11 @@ static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
{
int pos;
- u16 reg16;
u32 reg32;
if (!hpp)
return;
- /* Find PCI Express capability */
- pos = pci_pcie_cap(dev);
- if (!pos)
- return;
-
if (hpp->revision > 1) {
dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
hpp->revision);
@@ -113,17 +108,13 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
}
/* Initialize Device Control Register */
- pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
- reg16 = (reg16 & hpp->pci_exp_devctl_and) | hpp->pci_exp_devctl_or;
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16);
+ pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
+ ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
/* Initialize Link Control Register */
- if (dev->subordinate) {
- pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &reg16);
- reg16 = (reg16 & hpp->pci_exp_lnkctl_and)
- | hpp->pci_exp_lnkctl_or;
- pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, reg16);
- }
+ if (dev->subordinate)
+ pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
+ ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
/* Find Advanced Error Reporting Enhanced Capability */
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
@@ -169,6 +160,8 @@ void pci_configure_slot(struct pci_dev *dev)
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
return;
+ pcie_bus_configure_settings(dev->bus);
+
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
if (ret)
diff --git a/drivers/pci/hotplug/rpadlpar.h b/drivers/pci/hotplug/rpadlpar.h
index 4a0a59b82ea..81df93931ad 100644
--- a/drivers/pci/hotplug/rpadlpar.h
+++ b/drivers/pci/hotplug/rpadlpar.h
@@ -15,10 +15,10 @@
#ifndef _RPADLPAR_IO_H_
#define _RPADLPAR_IO_H_
-extern int dlpar_sysfs_init(void);
-extern void dlpar_sysfs_exit(void);
+int dlpar_sysfs_init(void);
+void dlpar_sysfs_exit(void);
-extern int dlpar_add_slot(char *drc_name);
-extern int dlpar_remove_slot(char *drc_name);
+int dlpar_add_slot(char *drc_name);
+int dlpar_remove_slot(char *drc_name);
#endif
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index 083034710fa..7660232ef46 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -18,6 +18,7 @@
#undef DEBUG
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
@@ -156,9 +157,8 @@ static void dlpar_pci_add_bus(struct device_node *dn)
}
/* Scan below the new bridge */
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
- dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
- of_scan_pci_bridge(dn, dev);
+ if (pci_is_bridge(dev))
+ of_scan_pci_bridge(dev);
/* Map IO space for child bus, which may or may not succeed */
pcibios_map_io_space(dev->subordinate);
@@ -216,7 +216,7 @@ static int dlpar_remove_phb(char *drc_name, struct device_node *dn)
if (!pcibios_find_pci_bus(dn))
return -EINVAL;
- /* If pci slot is hotplugable, use hotplug to remove it */
+ /* If pci slot is hotpluggable, use hotplug to remove it */
slot = find_php_slot(dn);
if (slot && rpaphp_deregister_slot(slot)) {
printk(KERN_ERR "%s: unable to remove hotplug slot %s\n",
@@ -353,10 +353,15 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
{
struct pci_bus *bus;
struct slot *slot;
+ int ret = 0;
+
+ pci_lock_rescan_remove();
bus = pcibios_find_pci_bus(dn);
- if (!bus)
- return -EINVAL;
+ if (!bus) {
+ ret = -EINVAL;
+ goto out;
+ }
pr_debug("PCI: Removing PCI slot below EADS bridge %s\n",
bus->self ? pci_name(bus->self) : "<!PHB!>");
@@ -370,7 +375,8 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
printk(KERN_ERR
"%s: unable to remove hotplug slot %s\n",
__func__, drc_name);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
}
@@ -381,16 +387,18 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
if (pcibios_unmap_io_space(bus)) {
printk(KERN_ERR "%s: failed to unmap bus range\n",
__func__);
- return -ERANGE;
+ ret = -ERANGE;
+ goto out;
}
/* Remove the EADS bridge device itself */
BUG_ON(!bus->self);
pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self));
- eeh_remove_bus_device(bus->self);
- pci_remove_bus_device(bus->self);
+ pci_stop_and_remove_bus_device(bus->self);
- return 0;
+ out:
+ pci_unlock_rescan_remove();
+ return ret;
}
/**
diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h
index 419919a87b0..b2593e876a0 100644
--- a/drivers/pci/hotplug/rpaphp.h
+++ b/drivers/pci/hotplug/rpaphp.h
@@ -46,12 +46,12 @@
#define PRESENT 1 /* Card in slot */
#define MY_NAME "rpaphp"
-extern int rpaphp_debug;
+extern bool rpaphp_debug;
#define dbg(format, arg...) \
do { \
- if (rpaphp_debug) \
+ if (rpaphp_debug) \
printk(KERN_DEBUG "%s: " format, \
- MY_NAME , ## arg); \
+ MY_NAME , ## arg); \
} while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
@@ -86,18 +86,18 @@ extern struct list_head rpaphp_slot_head;
/* function prototypes */
/* rpaphp_pci.c */
-extern int rpaphp_enable_slot(struct slot *slot);
-extern int rpaphp_get_sensor_state(struct slot *slot, int *state);
+int rpaphp_enable_slot(struct slot *slot);
+int rpaphp_get_sensor_state(struct slot *slot, int *state);
/* rpaphp_core.c */
-extern int rpaphp_add_slot(struct device_node *dn);
-extern int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
+int rpaphp_add_slot(struct device_node *dn);
+int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
char **drc_name, char **drc_type, int *drc_power_domain);
/* rpaphp_slot.c */
-extern void dealloc_slot_struct(struct slot *slot);
-extern struct slot *alloc_slot_struct(struct device_node *dn, int drc_index, char *drc_name, int power_domain);
-extern int rpaphp_register_slot(struct slot *slot);
-extern int rpaphp_deregister_slot(struct slot *slot);
-
+void dealloc_slot_struct(struct slot *slot);
+struct slot *alloc_slot_struct(struct device_node *dn, int drc_index, char *drc_name, int power_domain);
+int rpaphp_register_slot(struct slot *slot);
+int rpaphp_deregister_slot(struct slot *slot);
+
#endif /* _PPC64PHP_H */
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index ef7411c660b..93aa29f6d39 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -37,8 +37,9 @@
/* and pci_do_scan_bus */
#include "rpaphp.h"
-int rpaphp_debug;
+bool rpaphp_debug;
LIST_HEAD(rpaphp_slot_head);
+EXPORT_SYMBOL_GPL(rpaphp_slot_head);
#define DRIVER_VERSION "0.1"
#define DRIVER_AUTHOR "Linda Xie <lxie@us.ibm.com>"
@@ -88,7 +89,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
* @hotplug_slot: slot to get status
* @value: pointer to store status
*/
-static int get_power_status(struct hotplug_slot *hotplug_slot, u8 * value)
+static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
int retval, level;
struct slot *slot = (struct slot *)hotplug_slot->private;
@@ -104,14 +105,14 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 * value)
* @hotplug_slot: slot to get status
* @value: pointer to store status
*/
-static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 * value)
+static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = (struct slot *)hotplug_slot->private;
*value = slot->hotplug_slot->info->attention_status;
return 0;
}
-static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 * value)
+static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = (struct slot *)hotplug_slot->private;
int rc, state;
@@ -223,16 +224,16 @@ int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
type_tmp = (char *) &types[1];
/* Iterate through parent properties, looking for my-drc-index */
- for (i = 0; i < indexes[0]; i++) {
+ for (i = 0; i < be32_to_cpu(indexes[0]); i++) {
if ((unsigned int) indexes[i + 1] == *my_index) {
if (drc_name)
- *drc_name = name_tmp;
+ *drc_name = name_tmp;
if (drc_type)
*drc_type = type_tmp;
if (drc_index)
- *drc_index = *my_index;
+ *drc_index = be32_to_cpu(*my_index);
if (drc_power_domain)
- *drc_power_domain = domains[i+1];
+ *drc_power_domain = be32_to_cpu(domains[i+1]);
return 0;
}
name_tmp += (strlen(name_tmp) + 1);
@@ -241,6 +242,7 @@ int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(rpaphp_get_drc_props);
static int is_php_type(char *drc_type)
{
@@ -289,8 +291,8 @@ static int is_php_dn(struct device_node *dn, const int **indexes,
* rpaphp_add_slot -- declare a hotplug slot to the hotplug subsystem.
* @dn: device node of slot
*
- * This subroutine will register a hotplugable slot with the
- * PCI hotplug infrastructure. This routine is typicaly called
+ * This subroutine will register a hotpluggable slot with the
+ * PCI hotplug infrastructure. This routine is typically called
* during boot time, if the hotplug slots are present at boot time,
* or is called later, by the dlpar add code, if the slot is
* being dynamically added during runtime.
@@ -321,16 +323,19 @@ int rpaphp_add_slot(struct device_node *dn)
/* register PCI devices */
name = (char *) &names[1];
type = (char *) &types[1];
- for (i = 0; i < indexes[0]; i++) {
+ for (i = 0; i < be32_to_cpu(indexes[0]); i++) {
+ int index;
- slot = alloc_slot_struct(dn, indexes[i + 1], name, power_domains[i + 1]);
+ index = be32_to_cpu(indexes[i + 1]);
+ slot = alloc_slot_struct(dn, index, name,
+ be32_to_cpu(power_domains[i + 1]));
if (!slot)
return -ENOMEM;
slot->type = simple_strtoul(type, NULL, 10);
-
+
dbg("Found drc-index:0x%x drc-name:%s drc-type:%s\n",
- indexes[i + 1], name, type);
+ index, name, type);
retval = rpaphp_enable_slot(slot);
if (!retval)
@@ -347,6 +352,7 @@ int rpaphp_add_slot(struct device_node *dn)
/* XXX FIXME: reports a failure only if last entry in loop failed */
return retval;
}
+EXPORT_SYMBOL_GPL(rpaphp_add_slot);
static void __exit cleanup_slots(void)
{
@@ -356,7 +362,7 @@ static void __exit cleanup_slots(void)
/*
* Unregister all of our slots with the pci_hotplug subsystem,
* and free up all memory that we had allocated.
- * memory will be freed in release_slot callback.
+ * memory will be freed in release_slot callback.
*/
list_for_each_safe(tmp, n, &rpaphp_slot_head) {
@@ -398,7 +404,9 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
return retval;
if (state == PRESENT) {
+ pci_lock_rescan_remove();
pcibios_add_pci_devices(slot->bus);
+ pci_unlock_rescan_remove();
slot->state = CONFIGURED;
} else if (state == EMPTY) {
slot->state = EMPTY;
@@ -418,7 +426,9 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
if (slot->state == NOT_CONFIGURED)
return -EINVAL;
+ pci_lock_rescan_remove();
pcibios_remove_pci_devices(slot->bus);
+ pci_unlock_rescan_remove();
vm_unmap_aliases();
slot->state = NOT_CONFIGURED;
@@ -436,7 +446,3 @@ struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
module_init(rpaphp_init);
module_exit(rpaphp_exit);
-
-EXPORT_SYMBOL_GPL(rpaphp_add_slot);
-EXPORT_SYMBOL_GPL(rpaphp_slot_head);
-EXPORT_SYMBOL_GPL(rpaphp_get_drc_props);
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index 513e1e28239..9243f3e7a1c 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -44,7 +44,7 @@ int rpaphp_get_sensor_state(struct slot *slot, int *state)
dbg("%s: slot must be power up to get sensor-state\n",
__func__);
- /* some slots have to be powered up
+ /* some slots have to be powered up
* before get-sensor will succeed.
*/
rc = rtas_set_power_level(slot->power_domain, POWER_ON,
@@ -133,4 +133,3 @@ int rpaphp_enable_slot(struct slot *slot)
return 0;
}
-
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 2ea9cf1a8d0..a6082cc263f 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -1,5 +1,5 @@
/*
- * RPA Virtual I/O device functions
+ * RPA Virtual I/O device functions
* Copyright (C) 2004 Linda Xie <lxie@us.ibm.com>
*
* All rights reserved.
@@ -24,7 +24,6 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/pci.h>
#include <linux/string.h>
@@ -52,27 +51,27 @@ struct slot *alloc_slot_struct(struct device_node *dn,
int drc_index, char *drc_name, int power_domain)
{
struct slot *slot;
-
+
slot = kzalloc(sizeof(struct slot), GFP_KERNEL);
if (!slot)
goto error_nomem;
slot->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
if (!slot->hotplug_slot)
- goto error_slot;
+ goto error_slot;
slot->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
GFP_KERNEL);
if (!slot->hotplug_slot->info)
goto error_hpslot;
slot->name = kstrdup(drc_name, GFP_KERNEL);
if (!slot->name)
- goto error_info;
+ goto error_info;
slot->dn = dn;
slot->index = drc_index;
slot->power_domain = power_domain;
slot->hotplug_slot->private = slot;
slot->hotplug_slot->ops = &rpaphp_hotplug_slot_ops;
slot->hotplug_slot->release = &rpaphp_release_slot;
-
+
return (slot);
error_info:
@@ -92,7 +91,7 @@ static int is_registered(struct slot *slot)
list_for_each_entry(tmp_slot, &rpaphp_slot_head, rpaphp_slot_list) {
if (!strcmp(tmp_slot->name, slot->name))
return 1;
- }
+ }
return 0;
}
@@ -105,7 +104,7 @@ int rpaphp_deregister_slot(struct slot *slot)
__func__, slot->name);
list_del(&slot->rpaphp_slot_list);
-
+
retval = pci_hp_deregister(php_slot);
if (retval)
err("Problem unregistering a slot %s\n", slot->name);
@@ -121,7 +120,7 @@ int rpaphp_register_slot(struct slot *slot)
int retval;
int slotno;
- dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n",
+ dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n",
__func__, slot->dn->full_name, slot->index, slot->name,
slot->power_domain, slot->type);
@@ -129,7 +128,7 @@ int rpaphp_register_slot(struct slot *slot)
if (is_registered(slot)) {
err("rpaphp_register_slot: slot[%s] is already registered\n", slot->name);
return -EAGAIN;
- }
+ }
if (slot->dn->child)
slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn);
@@ -146,4 +145,3 @@ int rpaphp_register_slot(struct slot *slot)
info("Slot [%s] registered\n", slot->name);
return 0;
}
-
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
new file mode 100644
index 00000000000..d1332d2f873
--- /dev/null
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -0,0 +1,214 @@
+/*
+ * PCI Hot Plug Controller Driver for System z
+ *
+ * Copyright 2012 IBM Corp.
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#define COMPONENT "zPCI hpc"
+#define pr_fmt(fmt) COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <asm/pci_debug.h>
+#include <asm/sclp.h>
+
+#define SLOT_NAME_SIZE 10
+static LIST_HEAD(s390_hotplug_slot_list);
+
+MODULE_AUTHOR("Jan Glauber <jang@linux.vnet.ibm.com");
+MODULE_DESCRIPTION("Hot Plug PCI Controller for System z");
+MODULE_LICENSE("GPL");
+
+static int zpci_fn_configured(enum zpci_state state)
+{
+ return state == ZPCI_FN_STATE_CONFIGURED ||
+ state == ZPCI_FN_STATE_ONLINE;
+}
+
+/*
+ * struct slot - slot information for each *physical* slot
+ */
+struct slot {
+ struct list_head slot_list;
+ struct hotplug_slot *hotplug_slot;
+ struct zpci_dev *zdev;
+};
+
+static inline int slot_configure(struct slot *slot)
+{
+ int ret = sclp_pci_configure(slot->zdev->fid);
+
+ zpci_dbg(3, "conf fid:%x, rc:%d\n", slot->zdev->fid, ret);
+ if (!ret)
+ slot->zdev->state = ZPCI_FN_STATE_CONFIGURED;
+
+ return ret;
+}
+
+static inline int slot_deconfigure(struct slot *slot)
+{
+ int ret = sclp_pci_deconfigure(slot->zdev->fid);
+
+ zpci_dbg(3, "deconf fid:%x, rc:%d\n", slot->zdev->fid, ret);
+ if (!ret)
+ slot->zdev->state = ZPCI_FN_STATE_STANDBY;
+
+ return ret;
+}
+
+static int enable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+ int rc;
+
+ if (slot->zdev->state != ZPCI_FN_STATE_STANDBY)
+ return -EIO;
+
+ rc = slot_configure(slot);
+ if (rc)
+ return rc;
+
+ rc = zpci_enable_device(slot->zdev);
+ if (rc)
+ goto out_deconfigure;
+
+ pci_scan_slot(slot->zdev->bus, ZPCI_DEVFN);
+ pci_lock_rescan_remove();
+ pci_bus_add_devices(slot->zdev->bus);
+ pci_unlock_rescan_remove();
+
+ return rc;
+
+out_deconfigure:
+ slot_deconfigure(slot);
+ return rc;
+}
+
+static int disable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+ int rc;
+
+ if (!zpci_fn_configured(slot->zdev->state))
+ return -EIO;
+
+ if (slot->zdev->pdev)
+ pci_stop_and_remove_bus_device_locked(slot->zdev->pdev);
+
+ rc = zpci_disable_device(slot->zdev);
+ if (rc)
+ return rc;
+
+ return slot_deconfigure(slot);
+}
+
+static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ switch (slot->zdev->state) {
+ case ZPCI_FN_STATE_STANDBY:
+ *value = 0;
+ break;
+ default:
+ *value = 1;
+ break;
+ }
+ return 0;
+}
+
+static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ /* if the slot exits it always contains a function */
+ *value = 1;
+ return 0;
+}
+
+static void release_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ kfree(slot->hotplug_slot->info);
+ kfree(slot->hotplug_slot);
+ kfree(slot);
+}
+
+static struct hotplug_slot_ops s390_hotplug_slot_ops = {
+ .enable_slot = enable_slot,
+ .disable_slot = disable_slot,
+ .get_power_status = get_power_status,
+ .get_adapter_status = get_adapter_status,
+};
+
+int zpci_init_slot(struct zpci_dev *zdev)
+{
+ struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot_info *info;
+ char name[SLOT_NAME_SIZE];
+ struct slot *slot;
+ int rc;
+
+ if (!zdev)
+ return 0;
+
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ goto error;
+
+ hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
+ if (!hotplug_slot)
+ goto error_hp;
+ hotplug_slot->private = slot;
+
+ slot->hotplug_slot = hotplug_slot;
+ slot->zdev = zdev;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ goto error_info;
+ hotplug_slot->info = info;
+
+ hotplug_slot->ops = &s390_hotplug_slot_ops;
+ hotplug_slot->release = &release_slot;
+
+ get_power_status(hotplug_slot, &info->power_status);
+ get_adapter_status(hotplug_slot, &info->adapter_status);
+
+ snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid);
+ rc = pci_hp_register(slot->hotplug_slot, zdev->bus,
+ ZPCI_DEVFN, name);
+ if (rc)
+ goto error_reg;
+
+ list_add(&slot->slot_list, &s390_hotplug_slot_list);
+ return 0;
+
+error_reg:
+ kfree(info);
+error_info:
+ kfree(hotplug_slot);
+error_hp:
+ kfree(slot);
+error:
+ return -ENOMEM;
+}
+
+void zpci_exit_slot(struct zpci_dev *zdev)
+{
+ struct list_head *tmp, *n;
+ struct slot *slot;
+
+ list_for_each_safe(tmp, n, &s390_hotplug_slot_list) {
+ slot = list_entry(tmp, struct slot, slot_list);
+ if (slot->zdev != zdev)
+ continue;
+ list_del(&slot->slot_list);
+ pci_hp_deregister(slot->hotplug_slot);
+ }
+}
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 72d507b6a2a..bada2099987 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -9,6 +9,7 @@
* Work to add BIOS PROM support was completed by Mike Habeck.
*/
+#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -29,7 +30,6 @@
#include <asm/sn/sn_feature_sets.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/types.h>
-#include <linux/acpi.h>
#include <asm/sn/acpi.h>
#include "../pci.h"
@@ -188,7 +188,7 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
return 0;
}
-static struct hotplug_slot * sn_hp_destroy(void)
+static struct hotplug_slot *sn_hp_destroy(void)
{
struct slot *slot;
struct pci_slot *pci_slot;
@@ -250,15 +250,13 @@ static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
}
if (rc == PCI_L1_ERR) {
- dev_dbg(&slot->pci_bus->self->dev,
- "L1 failure %d with message: %s",
+ dev_dbg(&slot->pci_bus->self->dev, "L1 failure %d with message: %s",
resp.resp_sub_errno, resp.resp_l1_msg);
return -EPERM;
}
if (rc) {
- dev_dbg(&slot->pci_bus->self->dev,
- "insert failed with error %d sub-error %d\n",
+ dev_dbg(&slot->pci_bus->self->dev, "insert failed with error %d sub-error %d\n",
rc, resp.resp_sub_errno);
return -EIO;
}
@@ -288,21 +286,18 @@ static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot,
}
if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_EMPTY_33MHZ)) {
- dev_dbg(&slot->pci_bus->self->dev,
- "Cannot remove last 33MHz card\n");
+ dev_dbg(&slot->pci_bus->self->dev, "Cannot remove last 33MHz card\n");
return -EPERM;
}
if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_L1_ERR)) {
- dev_dbg(&slot->pci_bus->self->dev,
- "L1 failure %d with message \n%s\n",
+ dev_dbg(&slot->pci_bus->self->dev, "L1 failure %d with message \n%s\n",
resp.resp_sub_errno, resp.resp_l1_msg);
return -EPERM;
}
if ((action == PCI_REQ_SLOT_ELIGIBLE) && rc) {
- dev_dbg(&slot->pci_bus->self->dev,
- "remove failed with error %d sub-error %d\n",
+ dev_dbg(&slot->pci_bus->self->dev, "remove failed with error %d sub-error %d\n",
rc, resp.resp_sub_errno);
return -EIO;
}
@@ -334,7 +329,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
struct slot *slot = bss_hotplug_slot->private;
struct pci_bus *new_bus = NULL;
struct pci_dev *dev;
- int func, num_funcs;
+ int num_funcs;
int new_ppb = 0;
int rc;
char *ssdt = NULL;
@@ -381,31 +376,26 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
* to the Linux PCI interface and tell the drivers
* about them.
*/
- for (func = 0; func < num_funcs; func++) {
- dev = pci_get_slot(slot->pci_bus,
- PCI_DEVFN(slot->device_num + 1,
- PCI_FUNC(func)));
- if (dev) {
- /* Need to do slot fixup on PPB before fixup of children
- * (PPB's pcidev_info needs to be in pcidev_info list
- * before child's SN_PCIDEV_INFO() call to setup
- * pdi_host_pcidev_info).
- */
- pcibios_fixup_device_resources(dev);
- if (SN_ACPI_BASE_SUPPORT())
- sn_acpi_slot_fixup(dev);
- else
- sn_io_slot_fixup(dev);
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- unsigned char sec_bus;
- pci_read_config_byte(dev, PCI_SECONDARY_BUS,
- &sec_bus);
- new_bus = pci_add_new_bus(dev->bus, dev,
- sec_bus);
- pci_scan_child_bus(new_bus);
+ list_for_each_entry(dev, &slot->pci_bus->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != slot->device_num + 1)
+ continue;
+
+ /* Need to do slot fixup on PPB before fixup of children
+ * (PPB's pcidev_info needs to be in pcidev_info list
+ * before child's SN_PCIDEV_INFO() call to setup
+ * pdi_host_pcidev_info).
+ */
+ pcibios_fixup_device_resources(dev);
+ if (SN_ACPI_BASE_SUPPORT())
+ sn_acpi_slot_fixup(dev);
+ else
+ sn_io_slot_fixup(dev);
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ pci_hp_add_bridge(dev);
+ if (dev->subordinate) {
+ new_bus = dev->subordinate;
new_ppb = 1;
}
- pci_dev_put(dev);
}
}
@@ -414,20 +404,19 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
if (SN_ACPI_BASE_SUPPORT() && ssdt) {
unsigned long long adr;
struct acpi_device *pdevice;
- struct acpi_device *device;
acpi_handle phandle;
acpi_handle chandle = NULL;
acpi_handle rethandle;
acpi_status ret;
- phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle;
+ phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion);
if (acpi_bus_get_device(phandle, &pdevice)) {
- dev_dbg(&slot->pci_bus->self->dev,
- "no parent device, assuming NULL\n");
+ dev_dbg(&slot->pci_bus->self->dev, "no parent device, assuming NULL\n");
pdevice = NULL;
}
+ acpi_scan_lock_acquire();
/*
* Walk the rootbus node's immediate children looking for
* the slot's device node(s). There can be more than
@@ -450,36 +439,33 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
if (ACPI_SUCCESS(ret) &&
(adr>>16) == (slot->device_num + 1)) {
- ret = acpi_bus_add(&device, pdevice, chandle,
- ACPI_BUS_TYPE_DEVICE);
+ ret = acpi_bus_scan(chandle);
if (ACPI_FAILURE(ret)) {
- printk(KERN_ERR "%s: acpi_bus_add "
- "failed (0x%x) for slot %d "
- "func %d\n", __func__,
- ret, (int)(adr>>16),
+ printk(KERN_ERR "%s: acpi_bus_scan failed (0x%x) for slot %d func %d\n",
+ __func__, ret, (int)(adr>>16),
(int)(adr&0xffff));
/* try to continue on */
- } else {
- acpi_bus_start(device);
}
}
}
+ acpi_scan_lock_release();
}
+ pci_lock_rescan_remove();
+
/* Call the driver for the new device */
pci_bus_add_devices(slot->pci_bus);
/* Call the drivers for the new devices subordinate to PPB */
if (new_ppb)
pci_bus_add_devices(new_bus);
+ pci_unlock_rescan_remove();
mutex_unlock(&sn_hotplug_mutex);
if (rc == 0)
- dev_dbg(&slot->pci_bus->self->dev,
- "insert operation successful\n");
+ dev_dbg(&slot->pci_bus->self->dev, "insert operation successful\n");
else
- dev_dbg(&slot->pci_bus->self->dev,
- "insert operation failed rc = %d\n", rc);
+ dev_dbg(&slot->pci_bus->self->dev, "insert operation failed rc = %d\n", rc);
return rc;
}
@@ -487,8 +473,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
{
struct slot *slot = bss_hotplug_slot->private;
- struct pci_dev *dev;
- int func;
+ struct pci_dev *dev, *temp;
int rc;
acpi_owner_id ssdt_id = 0;
@@ -503,7 +488,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
/* free the ACPI resources for the slot */
if (SN_ACPI_BASE_SUPPORT() &&
- PCI_CONTROLLER(slot->pci_bus)->acpi_handle) {
+ PCI_CONTROLLER(slot->pci_bus)->companion) {
unsigned long long adr;
struct acpi_device *device;
acpi_handle phandle;
@@ -512,8 +497,9 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
acpi_status ret;
/* Get the rootbus node pointer */
- phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle;
+ phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion);
+ acpi_scan_lock_acquire();
/*
* Walk the rootbus node's immediate children looking for
* the slot's device node(s). There can be more than
@@ -541,31 +527,31 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
ret = acpi_bus_get_device(chandle,
&device);
if (ACPI_SUCCESS(ret))
- acpi_bus_trim(device, 1);
+ acpi_bus_trim(device);
}
}
-
+ acpi_scan_lock_release();
}
+ pci_lock_rescan_remove();
/* Free the SN resources assigned to the Linux device.*/
- for (func = 0; func < 8; func++) {
- dev = pci_get_slot(slot->pci_bus,
- PCI_DEVFN(slot->device_num + 1,
- PCI_FUNC(func)));
- if (dev) {
- sn_bus_free_data(dev);
- pci_remove_bus_device(dev);
- pci_dev_put(dev);
- }
+ list_for_each_entry_safe(dev, temp, &slot->pci_bus->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != slot->device_num + 1)
+ continue;
+
+ pci_dev_get(dev);
+ sn_bus_free_data(dev);
+ pci_stop_and_remove_bus_device(dev);
+ pci_dev_put(dev);
}
+ pci_unlock_rescan_remove();
/* Remove the SSDT for the slot from the ACPI namespace */
if (SN_ACPI_BASE_SUPPORT() && ssdt_id) {
acpi_status ret;
ret = acpi_unload_table_id(ssdt_id);
if (ACPI_FAILURE(ret)) {
- printk(KERN_ERR "%s: acpi_unload_table_id "
- "failed (0x%x) for id %d\n",
+ printk(KERN_ERR "%s: acpi_unload_table_id failed (0x%x) for id %d\n",
__func__, ret, ssdt_id);
/* try to continue on */
}
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index e0c90e643b5..5897d516427 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -43,11 +43,9 @@
#define MY_NAME THIS_MODULE->name
#endif
-extern int shpchp_poll_mode;
+extern bool shpchp_poll_mode;
extern int shpchp_poll_time;
-extern int shpchp_debug;
-extern struct workqueue_struct *shpchp_wq;
-extern struct workqueue_struct *shpchp_ordered_wq;
+extern bool shpchp_debug;
#define dbg(format, arg...) \
do { \
@@ -91,6 +89,7 @@ struct slot {
struct list_head slot_list;
struct delayed_work work; /* work for button event */
struct mutex lock;
+ struct workqueue_struct *wq;
u8 hp_slot;
};
@@ -169,19 +168,19 @@ struct controller {
#define WRONG_BUS_FREQUENCY 0x0000000D
#define POWER_FAILURE 0x0000000E
-extern int __must_check shpchp_create_ctrl_files(struct controller *ctrl);
-extern void shpchp_remove_ctrl_files(struct controller *ctrl);
-extern int shpchp_sysfs_enable_slot(struct slot *slot);
-extern int shpchp_sysfs_disable_slot(struct slot *slot);
-extern u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl);
-extern u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl);
-extern u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl);
-extern u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl);
-extern int shpchp_configure_device(struct slot *p_slot);
-extern int shpchp_unconfigure_device(struct slot *p_slot);
-extern void cleanup_slots(struct controller *ctrl);
-extern void shpchp_queue_pushbutton_work(struct work_struct *work);
-extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev);
+int __must_check shpchp_create_ctrl_files(struct controller *ctrl);
+void shpchp_remove_ctrl_files(struct controller *ctrl);
+int shpchp_sysfs_enable_slot(struct slot *slot);
+int shpchp_sysfs_disable_slot(struct slot *slot);
+u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl);
+u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl);
+u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl);
+u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl);
+int shpchp_configure_device(struct slot *p_slot);
+int shpchp_unconfigure_device(struct slot *p_slot);
+void cleanup_slots(struct controller *ctrl);
+void shpchp_queue_pushbutton_work(struct work_struct *work);
+int shpc_init(struct controller *ctrl, struct pci_dev *pdev);
static inline const char *slot_name(struct slot *slot)
{
@@ -192,7 +191,7 @@ static inline const char *slot_name(struct slot *slot)
#include <linux/pci-acpi.h>
static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev)
{
- u32 flags = OSC_SHPC_NATIVE_HP_CONTROL;
+ u32 flags = OSC_PCI_SHPC_NATIVE_HP_CONTROL;
return acpi_get_hp_hw_control_from_firmware(dev, flags);
}
#else
@@ -217,13 +216,13 @@ struct ctrl_reg {
/* offsets to the controller registers based on the above structure layout */
enum ctrl_offsets {
- BASE_OFFSET = offsetof(struct ctrl_reg, base_offset),
- SLOT_AVAIL1 = offsetof(struct ctrl_reg, slot_avail1),
+ BASE_OFFSET = offsetof(struct ctrl_reg, base_offset),
+ SLOT_AVAIL1 = offsetof(struct ctrl_reg, slot_avail1),
SLOT_AVAIL2 = offsetof(struct ctrl_reg, slot_avail2),
- SLOT_CONFIG = offsetof(struct ctrl_reg, slot_config),
+ SLOT_CONFIG = offsetof(struct ctrl_reg, slot_config),
SEC_BUS_CONFIG = offsetof(struct ctrl_reg, sec_bus_config),
MSI_CTRL = offsetof(struct ctrl_reg, msi_ctrl),
- PROG_INTERFACE = offsetof(struct ctrl_reg, prog_interface),
+ PROG_INTERFACE = offsetof(struct ctrl_reg, prog_interface),
CMD = offsetof(struct ctrl_reg, cmd),
CMD_STATUS = offsetof(struct ctrl_reg, cmd_status),
INTR_LOC = offsetof(struct ctrl_reg, intr_loc),
@@ -296,7 +295,7 @@ static inline void amd_pogo_errata_restore_misc_reg(struct slot *p_slot)
pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MEM_BASE_LIMIT_OFFSET, rse_set);
}
/* restore MiscII register */
- pci_read_config_dword( p_slot->ctrl->pci_dev, PCIX_MISCII_OFFSET, &pcix_misc2_temp );
+ pci_read_config_dword(p_slot->ctrl->pci_dev, PCIX_MISCII_OFFSET, &pcix_misc2_temp );
if (p_slot->ctrl->pcix_misc2_reg & SERRFATALENABLE_MASK)
pcix_misc2_temp |= SERRFATALENABLE_MASK;
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index aca972bbfb4..294ef4b10cf 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -36,11 +36,9 @@
#include "shpchp.h"
/* Global variables */
-int shpchp_debug;
-int shpchp_poll_mode;
+bool shpchp_debug;
+bool shpchp_poll_mode;
int shpchp_poll_time;
-struct workqueue_struct *shpchp_wq;
-struct workqueue_struct *shpchp_ordered_wq;
#define DRIVER_VERSION "0.4"
#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
@@ -99,22 +97,28 @@ static int init_slots(struct controller *ctrl)
struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *info;
char name[SLOT_NAME_SIZE];
- int retval = -ENOMEM;
+ int retval;
int i;
for (i = 0; i < ctrl->num_slots; i++) {
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot)
+ if (!slot) {
+ retval = -ENOMEM;
goto error;
+ }
hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot)
+ if (!hotplug_slot) {
+ retval = -ENOMEM;
goto error_slot;
+ }
slot->hotplug_slot = hotplug_slot;
info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
+ if (!info) {
+ retval = -ENOMEM;
goto error_hpslot;
+ }
hotplug_slot->info = info;
slot->hp_slot = i;
@@ -123,6 +127,13 @@ static int init_slots(struct controller *ctrl)
slot->device = ctrl->slot_device_offset + i;
slot->hpc_ops = ctrl->hpc_ops;
slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i);
+
+ slot->wq = alloc_workqueue("shpchp-%d", 0, 0, slot->number);
+ if (!slot->wq) {
+ retval = -ENOMEM;
+ goto error_info;
+ }
+
mutex_init(&slot->lock);
INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work);
@@ -132,17 +143,16 @@ static int init_slots(struct controller *ctrl)
snprintf(name, SLOT_NAME_SIZE, "%d", slot->number);
hotplug_slot->ops = &shpchp_hotplug_slot_ops;
- ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x "
- "hp_slot=%x sun=%x slot_device_offset=%x\n",
- pci_domain_nr(ctrl->pci_dev->subordinate),
- slot->bus, slot->device, slot->hp_slot, slot->number,
- ctrl->slot_device_offset);
+ ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x hp_slot=%x sun=%x slot_device_offset=%x\n",
+ pci_domain_nr(ctrl->pci_dev->subordinate),
+ slot->bus, slot->device, slot->hp_slot, slot->number,
+ ctrl->slot_device_offset);
retval = pci_hp_register(slot->hotplug_slot,
ctrl->pci_dev->subordinate, slot->device, name);
if (retval) {
ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
retval);
- goto error_info;
+ goto error_slotwq;
}
get_power_status(hotplug_slot, &info->power_status);
@@ -154,6 +164,8 @@ static int init_slots(struct controller *ctrl)
}
return 0;
+error_slotwq:
+ destroy_workqueue(slot->wq);
error_info:
kfree(info);
error_hpslot:
@@ -174,8 +186,7 @@ void cleanup_slots(struct controller *ctrl)
slot = list_entry(tmp, struct slot, slot_list);
list_del(&slot->slot_list);
cancel_delayed_work(&slot->work);
- flush_workqueue(shpchp_wq);
- flush_workqueue(shpchp_ordered_wq);
+ destroy_workqueue(slot->wq);
pci_hp_deregister(slot->hotplug_slot);
}
}
@@ -278,8 +289,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
static int is_shpc_capable(struct pci_dev *dev)
{
- if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device ==
- PCI_DEVICE_ID_AMD_GOLAM_7450))
+ if (dev->vendor == PCI_VENDOR_ID_AMD &&
+ dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
return 1;
if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
return 0;
@@ -358,25 +369,12 @@ static struct pci_driver shpc_driver = {
static int __init shpcd_init(void)
{
- int retval = 0;
-
- shpchp_wq = alloc_ordered_workqueue("shpchp", 0);
- if (!shpchp_wq)
- return -ENOMEM;
-
- shpchp_ordered_wq = alloc_ordered_workqueue("shpchp_ordered", 0);
- if (!shpchp_ordered_wq) {
- destroy_workqueue(shpchp_wq);
- return -ENOMEM;
- }
+ int retval;
retval = pci_register_driver(&shpc_driver);
dbg("%s: pci_register_driver = %d\n", __func__, retval);
info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
- if (retval) {
- destroy_workqueue(shpchp_ordered_wq);
- destroy_workqueue(shpchp_wq);
- }
+
return retval;
}
@@ -384,8 +382,6 @@ static void __exit shpcd_cleanup(void)
{
dbg("unload_shpchpd()\n");
pci_unregister_driver(&shpc_driver);
- destroy_workqueue(shpchp_ordered_wq);
- destroy_workqueue(shpchp_wq);
info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
}
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index b00b09bdd38..a81fb67ea9a 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -51,7 +51,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
info->p_slot = p_slot;
INIT_WORK(&info->work, interrupt_event_handler);
- queue_work(shpchp_wq, &info->work);
+ queue_work(p_slot->wq, &info->work);
return 0;
}
@@ -162,7 +162,7 @@ u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) {
+ if (!(p_slot->hpc_ops->query_power_fault(p_slot))) {
/*
* Power fault Cleared
*/
@@ -196,8 +196,8 @@ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot,
ctrl_dbg(ctrl, "Change speed to %d\n", speed);
if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed))) {
- ctrl_err(ctrl, "%s: Issue of set bus speed mode command "
- "failed\n", __func__);
+ ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
+ __func__);
return WRONG_BUS_FREQUENCY;
}
return rc;
@@ -215,8 +215,8 @@ static int fix_bus_speed(struct controller *ctrl, struct slot *pslot,
*/
if (flag) {
if (asp < bsp) {
- ctrl_err(ctrl, "Speed of bus %x and adapter %x "
- "mismatch\n", bsp, asp);
+ ctrl_err(ctrl, "Speed of bus %x and adapter %x mismatch\n",
+ bsp, asp);
rc = WRONG_BUS_FREQUENCY;
}
return rc;
@@ -250,8 +250,7 @@ static int board_added(struct slot *p_slot)
hp_slot = p_slot->device - ctrl->slot_device_offset;
- ctrl_dbg(ctrl,
- "%s: p_slot->device, slot_offset, hp_slot = %d, %d ,%d\n",
+ ctrl_dbg(ctrl, "%s: p_slot->device, slot_offset, hp_slot = %d, %d ,%d\n",
__func__, p_slot->device, ctrl->slot_device_offset, hp_slot);
/* Power on slot without connecting to bus */
@@ -262,12 +261,9 @@ static int board_added(struct slot *p_slot)
}
if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) {
- if (slots_not_empty)
- return WRONG_BUS_FREQUENCY;
-
if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz))) {
- ctrl_err(ctrl, "%s: Issue of set bus speed mode command"
- " failed\n", __func__);
+ ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
+ __func__);
return WRONG_BUS_FREQUENCY;
}
@@ -280,20 +276,19 @@ static int board_added(struct slot *p_slot)
rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp);
if (rc) {
- ctrl_err(ctrl, "Can't get adapter speed or "
- "bus mode mismatch\n");
+ ctrl_err(ctrl, "Can't get adapter speed or bus mode mismatch\n");
return WRONG_BUS_FREQUENCY;
}
- bsp = ctrl->pci_dev->bus->cur_bus_speed;
- msp = ctrl->pci_dev->bus->max_bus_speed;
+ bsp = ctrl->pci_dev->subordinate->cur_bus_speed;
+ msp = ctrl->pci_dev->subordinate->max_bus_speed;
/* Check if there are other slots or devices on the same bus */
if (!list_empty(&ctrl->pci_dev->subordinate->devices))
slots_not_empty = 1;
- ctrl_dbg(ctrl, "%s: slots_not_empty %d, adapter_speed %d, bus_speed %d,"
- " max_bus_speed %d\n", __func__, slots_not_empty, asp,
+ ctrl_dbg(ctrl, "%s: slots_not_empty %d, adapter_speed %d, bus_speed %d, max_bus_speed %d\n",
+ __func__, slots_not_empty, asp,
bsp, msp);
rc = fix_bus_speed(ctrl, p_slot, slots_not_empty, asp, bsp, msp);
@@ -456,7 +451,7 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
kfree(info);
goto out;
}
- queue_work(shpchp_ordered_wq, &info->work);
+ queue_work(p_slot->wq, &info->work);
out:
mutex_unlock(&p_slot->lock);
}
@@ -493,18 +488,18 @@ static void handle_button_press_event(struct slot *p_slot)
p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
if (getstatus) {
p_slot->state = BLINKINGOFF_STATE;
- ctrl_info(ctrl, "PCI slot #%s - powering off due to "
- "button press.\n", slot_name(p_slot));
+ ctrl_info(ctrl, "PCI slot #%s - powering off due to button press\n",
+ slot_name(p_slot));
} else {
p_slot->state = BLINKINGON_STATE;
- ctrl_info(ctrl, "PCI slot #%s - powering on due to "
- "button press.\n", slot_name(p_slot));
+ ctrl_info(ctrl, "PCI slot #%s - powering on due to button press\n",
+ slot_name(p_slot));
}
/* blink green LED and turn off amber */
p_slot->hpc_ops->green_led_blink(p_slot);
p_slot->hpc_ops->set_attention_status(p_slot, 0);
- queue_delayed_work(shpchp_wq, &p_slot->work, 5*HZ);
+ queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
break;
case BLINKINGOFF_STATE:
case BLINKINGON_STATE:
@@ -521,8 +516,8 @@ static void handle_button_press_event(struct slot *p_slot)
else
p_slot->hpc_ops->green_led_off(p_slot);
p_slot->hpc_ops->set_attention_status(p_slot, 0);
- ctrl_info(ctrl, "PCI slot #%s - action canceled due to "
- "button press\n", slot_name(p_slot));
+ ctrl_info(ctrl, "PCI slot #%s - action canceled due to button press\n",
+ slot_name(p_slot));
p_slot->state = STATIC_STATE;
break;
case POWEROFF_STATE:
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 36547f0ce30..29e22352822 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -116,7 +116,7 @@
#define SLOT_REG_RSVDZ_MASK ((1 << 15) | (7 << 21))
/*
- * SHPC Command Code definitnions
+ * SHPC Command Code definitions
*
* Slot Operation 00h - 3Fh
* Set Bus Segment Speed/Mode A 40h - 47h
@@ -341,8 +341,7 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
cmd_status = hpc_check_cmd_status(slot->ctrl);
if (cmd_status) {
- ctrl_err(ctrl,
- "Failed to issued command 0x%x (error code = %d)\n",
+ ctrl_err(ctrl, "Failed to issued command 0x%x (error code = %d)\n",
cmd, cmd_status);
retval = -EIO;
}
@@ -404,7 +403,7 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_get_power_status(struct slot * slot, u8 *status)
+static int hpc_get_power_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -528,7 +527,7 @@ static int hpc_get_mode1_ECC_cap(struct slot *slot, u8 *mode)
return retval;
}
-static int hpc_query_power_fault(struct slot * slot)
+static int hpc_query_power_fault(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -614,7 +613,7 @@ static void hpc_release_ctlr(struct controller *ctrl)
release_mem_region(ctrl->mmio_base, ctrl->mmio_size);
}
-static int hpc_power_on_slot(struct slot * slot)
+static int hpc_power_on_slot(struct slot *slot)
{
int retval;
@@ -625,7 +624,7 @@ static int hpc_power_on_slot(struct slot * slot)
return retval;
}
-static int hpc_slot_enable(struct slot * slot)
+static int hpc_slot_enable(struct slot *slot)
{
int retval;
@@ -638,7 +637,7 @@ static int hpc_slot_enable(struct slot * slot)
return retval;
}
-static int hpc_slot_disable(struct slot * slot)
+static int hpc_slot_disable(struct slot *slot)
{
int retval;
@@ -720,7 +719,7 @@ static int shpc_get_cur_bus_speed(struct controller *ctrl)
}
-static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value)
+static int hpc_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed value)
{
int retval;
struct controller *ctrl = slot->ctrl;
@@ -944,8 +943,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */
ctrl_dbg(ctrl, "Hotplug Controller:\n");
- if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device ==
- PCI_DEVICE_ID_AMD_GOLAM_7450)) {
+ if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+ pdev->device == PCI_DEVICE_ID_AMD_GOLAM_7450) {
/* amd shpc driver doesn't use Base Offset; assume 0 */
ctrl->mmio_base = pci_resource_start(pdev, 0);
ctrl->mmio_size = pci_resource_len(pdev, 0);
@@ -974,8 +973,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
for (i = 0; i < 9 + num_slots; i++) {
rc = shpc_indirect_read(ctrl, i, &tempdword);
if (rc) {
- ctrl_err(ctrl,
- "Cannot read creg (index = %d)\n", i);
+ ctrl_err(ctrl, "Cannot read creg (index = %d)\n",
+ i);
goto abort;
}
ctrl_dbg(ctrl, " offset %d: value %x\n", i, tempdword);
@@ -1060,10 +1059,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
/* Installs the interrupt handler */
rc = pci_enable_msi(pdev);
if (rc) {
- ctrl_info(ctrl,
- "Can't get msi for the hotplug controller\n");
- ctrl_info(ctrl,
- "Use INTx for the hotplug controller\n");
+ ctrl_info(ctrl, "Can't get msi for the hotplug controller\n");
+ ctrl_info(ctrl, "Use INTx for the hotplug controller\n");
}
rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
@@ -1071,8 +1068,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
ctrl_dbg(ctrl, "request_irq %d (returns %d)\n",
ctrl->pci_dev->irq, rc);
if (rc) {
- ctrl_err(ctrl, "Can't get irq %d for the hotplug "
- "controller\n", ctrl->pci_dev->irq);
+ ctrl_err(ctrl, "Can't get irq %d for the hotplug controller\n",
+ ctrl->pci_dev->irq);
goto abort_iounmap;
}
}
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index a2ccfcd3c29..469454e0cc4 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -34,99 +34,89 @@
#include "../pci.h"
#include "shpchp.h"
-int __ref shpchp_configure_device(struct slot *p_slot)
+int shpchp_configure_device(struct slot *p_slot)
{
struct pci_dev *dev;
- struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
- int num, fn;
struct controller *ctrl = p_slot->ctrl;
+ struct pci_dev *bridge = ctrl->pci_dev;
+ struct pci_bus *parent = bridge->subordinate;
+ int num, ret = 0;
+
+ pci_lock_rescan_remove();
dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0));
if (dev) {
- ctrl_err(ctrl, "Device %s already exists "
- "at %04x:%02x:%02x, cannot hot-add\n", pci_name(dev),
- pci_domain_nr(parent), p_slot->bus, p_slot->device);
+ ctrl_err(ctrl, "Device %s already exists at %04x:%02x:%02x, cannot hot-add\n",
+ pci_name(dev), pci_domain_nr(parent),
+ p_slot->bus, p_slot->device);
pci_dev_put(dev);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0));
if (num == 0) {
ctrl_err(ctrl, "No new device found\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
}
- for (fn = 0; fn < 8; fn++) {
- dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, fn));
- if (!dev)
+ list_for_each_entry(dev, &parent->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != p_slot->device)
+ continue;
+ if (pci_is_bridge(dev))
+ pci_hp_add_bridge(dev);
+ }
+
+ pci_assign_unassigned_bridge_resources(bridge);
+
+ list_for_each_entry(dev, &parent->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != p_slot->device)
continue;
- if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
- (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
- /* Find an unused bus number for the new bridge */
- struct pci_bus *child;
- unsigned char busnr, start = parent->secondary;
- unsigned char end = parent->subordinate;
- for (busnr = start; busnr <= end; busnr++) {
- if (!pci_find_bus(pci_domain_nr(parent),
- busnr))
- break;
- }
- if (busnr > end) {
- ctrl_err(ctrl,
- "No free bus for hot-added bridge\n");
- pci_dev_put(dev);
- continue;
- }
- child = pci_add_new_bus(parent, dev, busnr);
- if (!child) {
- ctrl_err(ctrl, "Cannot add new bus for %s\n",
- pci_name(dev));
- pci_dev_put(dev);
- continue;
- }
- child->subordinate = pci_do_scan_bus(child);
- pci_bus_size_bridges(child);
- }
pci_configure_slot(dev);
- pci_dev_put(dev);
}
- pci_bus_assign_resources(parent);
pci_bus_add_devices(parent);
- pci_enable_bridges(parent);
- return 0;
+
+ out:
+ pci_unlock_rescan_remove();
+ return ret;
}
int shpchp_unconfigure_device(struct slot *p_slot)
{
int rc = 0;
- int j;
u8 bctl = 0;
struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
+ struct pci_dev *dev, *temp;
struct controller *ctrl = p_slot->ctrl;
ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n",
__func__, pci_domain_nr(parent), p_slot->bus, p_slot->device);
- for (j = 0; j < 8 ; j++) {
- struct pci_dev *temp = pci_get_slot(parent,
- (p_slot->device << 3) | j);
- if (!temp)
+ pci_lock_rescan_remove();
+
+ list_for_each_entry_safe(dev, temp, &parent->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != p_slot->device)
continue;
- if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl);
+
+ pci_dev_get(dev);
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl);
if (bctl & PCI_BRIDGE_CTL_VGA) {
ctrl_err(ctrl,
"Cannot remove display device %s\n",
- pci_name(temp));
- pci_dev_put(temp);
+ pci_name(dev));
+ pci_dev_put(dev);
rc = -EINVAL;
break;
}
}
- pci_remove_bus_device(temp);
- pci_dev_put(temp);
+ pci_stop_and_remove_bus_device(dev);
+ pci_dev_put(dev);
}
+
+ pci_unlock_rescan_remove();
return rc;
}
diff --git a/drivers/pci/hotplug/shpchp_sysfs.c b/drivers/pci/hotplug/shpchp_sysfs.c
index 071b7dc0094..52875b36046 100644
--- a/drivers/pci/hotplug/shpchp_sysfs.c
+++ b/drivers/pci/hotplug/shpchp_sysfs.c
@@ -38,7 +38,7 @@
static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
- char * out = buf;
+ char *out = buf;
int index, busnr;
struct resource *res;
struct pci_bus *bus;
@@ -50,45 +50,42 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha
pci_bus_for_each_resource(bus, res, index) {
if (res && (res->flags & IORESOURCE_MEM) &&
!(res->flags & IORESOURCE_PREFETCH)) {
- out += sprintf(out, "start = %8.8llx, "
- "length = %8.8llx\n",
- (unsigned long long)res->start,
- (unsigned long long)(res->end - res->start));
+ out += sprintf(out, "start = %8.8llx, length = %8.8llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)resource_size(res));
}
}
out += sprintf(out, "Free resources: prefetchable memory\n");
pci_bus_for_each_resource(bus, res, index) {
if (res && (res->flags & IORESOURCE_MEM) &&
(res->flags & IORESOURCE_PREFETCH)) {
- out += sprintf(out, "start = %8.8llx, "
- "length = %8.8llx\n",
- (unsigned long long)res->start,
- (unsigned long long)(res->end - res->start));
+ out += sprintf(out, "start = %8.8llx, length = %8.8llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)resource_size(res));
}
}
out += sprintf(out, "Free resources: IO\n");
pci_bus_for_each_resource(bus, res, index) {
if (res && (res->flags & IORESOURCE_IO)) {
- out += sprintf(out, "start = %8.8llx, "
- "length = %8.8llx\n",
- (unsigned long long)res->start,
- (unsigned long long)(res->end - res->start));
+ out += sprintf(out, "start = %8.8llx, length = %8.8llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)resource_size(res));
}
}
out += sprintf(out, "Free resources: bus numbers\n");
- for (busnr = bus->secondary; busnr <= bus->subordinate; busnr++) {
+ for (busnr = bus->busn_res.start; busnr <= bus->busn_res.end; busnr++) {
if (!pci_find_bus(pci_domain_nr(bus), busnr))
break;
}
- if (busnr < bus->subordinate)
+ if (busnr < bus->busn_res.end)
out += sprintf(out, "start = %8.8x, length = %8.8x\n",
- busnr, (bus->subordinate - busnr));
+ busnr, (int)(bus->busn_res.end - busnr));
return out - buf;
}
static DEVICE_ATTR (ctrl, S_IRUGO, show_ctrl, NULL);
-int __must_check shpchp_create_ctrl_files (struct controller *ctrl)
+int shpchp_create_ctrl_files (struct controller *ctrl)
{
return device_create_file (&ctrl->pci_dev->dev, &dev_attr_ctrl);
}
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
index 834842aa5bb..a94dd2c4183 100644
--- a/drivers/pci/htirq.c
+++ b/drivers/pci/htirq.c
@@ -9,6 +9,7 @@
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/htirq.h>
@@ -34,7 +35,7 @@ struct ht_irq_cfg {
void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
{
- struct ht_irq_cfg *cfg = get_irq_data(irq);
+ struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
unsigned long flags;
spin_lock_irqsave(&ht_irq_lock, flags);
if (cfg->msg.address_lo != msg->address_lo) {
@@ -53,13 +54,13 @@ void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
{
- struct ht_irq_cfg *cfg = get_irq_data(irq);
+ struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
*msg = cfg->msg;
}
void mask_ht_irq(struct irq_data *data)
{
- struct ht_irq_cfg *cfg = irq_data_get_irq_data(data);
+ struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
struct ht_irq_msg msg = cfg->msg;
msg.address_lo |= 1;
@@ -68,7 +69,7 @@ void mask_ht_irq(struct irq_data *data)
void unmask_ht_irq(struct irq_data *data)
{
- struct ht_irq_cfg *cfg = irq_data_get_irq_data(data);
+ struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
struct ht_irq_msg msg = cfg->msg;
msg.address_lo &= ~1;
@@ -86,12 +87,9 @@ void unmask_ht_irq(struct irq_data *data)
int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
{
struct ht_irq_cfg *cfg;
+ int max_irq, pos, irq;
unsigned long flags;
u32 data;
- int max_irq;
- int pos;
- int irq;
- int node;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ);
if (!pos)
@@ -104,7 +102,7 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
spin_unlock_irqrestore(&ht_irq_lock, flags);
max_irq = (data >> 16) & 0xff;
- if ( idx > max_irq)
+ if (idx > max_irq)
return -EINVAL;
cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
@@ -119,14 +117,12 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
cfg->msg.address_lo = 0xffffffff;
cfg->msg.address_hi = 0xffffffff;
- node = dev_to_node(&dev->dev);
- irq = create_irq_nr(0, node);
-
- if (irq <= 0) {
+ irq = irq_alloc_hwirq(dev_to_node(&dev->dev));
+ if (!irq) {
kfree(cfg);
return -EBUSY;
}
- set_irq_data(irq, cfg);
+ irq_set_handler_data(irq, cfg);
if (arch_setup_ht_irq(irq, dev) < 0) {
ht_destroy_irq(irq);
@@ -135,6 +131,7 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
return irq;
}
+EXPORT_SYMBOL(__ht_create_irq);
/**
* ht_create_irq - create an irq and attach it to a device.
@@ -150,6 +147,7 @@ int ht_create_irq(struct pci_dev *dev, int idx)
{
return __ht_create_irq(dev, idx, NULL);
}
+EXPORT_SYMBOL(ht_create_irq);
/**
* ht_destroy_irq - destroy an irq created with ht_create_irq
@@ -162,14 +160,11 @@ void ht_destroy_irq(unsigned int irq)
{
struct ht_irq_cfg *cfg;
- cfg = get_irq_data(irq);
- set_irq_chip(irq, NULL);
- set_irq_data(irq, NULL);
- destroy_irq(irq);
+ cfg = irq_get_handler_data(irq);
+ irq_set_chip(irq, NULL);
+ irq_set_handler_data(irq, NULL);
+ irq_free_hwirq(irq);
kfree(cfg);
}
-
-EXPORT_SYMBOL(__ht_create_irq);
-EXPORT_SYMBOL(ht_create_irq);
EXPORT_SYMBOL(ht_destroy_irq);
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
deleted file mode 100644
index 4789f8e8bf7..00000000000
--- a/drivers/pci/intel-iommu.c
+++ /dev/null
@@ -1,3843 +0,0 @@
-/*
- * Copyright (c) 2006, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Copyright (C) 2006-2008 Intel Corporation
- * Author: Ashok Raj <ashok.raj@intel.com>
- * Author: Shaohua Li <shaohua.li@intel.com>
- * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
- * Author: Fenghua Yu <fenghua.yu@intel.com>
- */
-
-#include <linux/init.h>
-#include <linux/bitmap.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/dmar.h>
-#include <linux/dma-mapping.h>
-#include <linux/mempool.h>
-#include <linux/timer.h>
-#include <linux/iova.h>
-#include <linux/iommu.h>
-#include <linux/intel-iommu.h>
-#include <linux/sysdev.h>
-#include <linux/tboot.h>
-#include <linux/dmi.h>
-#include <asm/cacheflush.h>
-#include <asm/iommu.h>
-#include "pci.h"
-
-#define ROOT_SIZE VTD_PAGE_SIZE
-#define CONTEXT_SIZE VTD_PAGE_SIZE
-
-#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
-#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
-#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
-
-#define IOAPIC_RANGE_START (0xfee00000)
-#define IOAPIC_RANGE_END (0xfeefffff)
-#define IOVA_START_ADDR (0x1000)
-
-#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
-
-#define MAX_AGAW_WIDTH 64
-
-#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
-#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
-
-/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
- to match. That way, we can use 'unsigned long' for PFNs with impunity. */
-#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
- __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
-#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
-
-#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
-#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
-#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
-
-/* page table handling */
-#define LEVEL_STRIDE (9)
-#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
-
-static inline int agaw_to_level(int agaw)
-{
- return agaw + 2;
-}
-
-static inline int agaw_to_width(int agaw)
-{
- return 30 + agaw * LEVEL_STRIDE;
-}
-
-static inline int width_to_agaw(int width)
-{
- return (width - 30) / LEVEL_STRIDE;
-}
-
-static inline unsigned int level_to_offset_bits(int level)
-{
- return (level - 1) * LEVEL_STRIDE;
-}
-
-static inline int pfn_level_offset(unsigned long pfn, int level)
-{
- return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
-}
-
-static inline unsigned long level_mask(int level)
-{
- return -1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long level_size(int level)
-{
- return 1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long align_to_level(unsigned long pfn, int level)
-{
- return (pfn + level_size(level) - 1) & level_mask(level);
-}
-
-/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
- are never going to work. */
-static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
-{
- return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
-}
-
-static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
-{
- return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
-}
-static inline unsigned long page_to_dma_pfn(struct page *pg)
-{
- return mm_to_dma_pfn(page_to_pfn(pg));
-}
-static inline unsigned long virt_to_dma_pfn(void *p)
-{
- return page_to_dma_pfn(virt_to_page(p));
-}
-
-/* global iommu list, set NULL for ignored DMAR units */
-static struct intel_iommu **g_iommus;
-
-static void __init check_tylersburg_isoch(void);
-static int rwbf_quirk;
-
-/*
- * 0: Present
- * 1-11: Reserved
- * 12-63: Context Ptr (12 - (haw-1))
- * 64-127: Reserved
- */
-struct root_entry {
- u64 val;
- u64 rsvd1;
-};
-#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
-static inline bool root_present(struct root_entry *root)
-{
- return (root->val & 1);
-}
-static inline void set_root_present(struct root_entry *root)
-{
- root->val |= 1;
-}
-static inline void set_root_value(struct root_entry *root, unsigned long value)
-{
- root->val |= value & VTD_PAGE_MASK;
-}
-
-static inline struct context_entry *
-get_context_addr_from_root(struct root_entry *root)
-{
- return (struct context_entry *)
- (root_present(root)?phys_to_virt(
- root->val & VTD_PAGE_MASK) :
- NULL);
-}
-
-/*
- * low 64 bits:
- * 0: present
- * 1: fault processing disable
- * 2-3: translation type
- * 12-63: address space root
- * high 64 bits:
- * 0-2: address width
- * 3-6: aval
- * 8-23: domain id
- */
-struct context_entry {
- u64 lo;
- u64 hi;
-};
-
-static inline bool context_present(struct context_entry *context)
-{
- return (context->lo & 1);
-}
-static inline void context_set_present(struct context_entry *context)
-{
- context->lo |= 1;
-}
-
-static inline void context_set_fault_enable(struct context_entry *context)
-{
- context->lo &= (((u64)-1) << 2) | 1;
-}
-
-static inline void context_set_translation_type(struct context_entry *context,
- unsigned long value)
-{
- context->lo &= (((u64)-1) << 4) | 3;
- context->lo |= (value & 3) << 2;
-}
-
-static inline void context_set_address_root(struct context_entry *context,
- unsigned long value)
-{
- context->lo |= value & VTD_PAGE_MASK;
-}
-
-static inline void context_set_address_width(struct context_entry *context,
- unsigned long value)
-{
- context->hi |= value & 7;
-}
-
-static inline void context_set_domain_id(struct context_entry *context,
- unsigned long value)
-{
- context->hi |= (value & ((1 << 16) - 1)) << 8;
-}
-
-static inline void context_clear_entry(struct context_entry *context)
-{
- context->lo = 0;
- context->hi = 0;
-}
-
-/*
- * 0: readable
- * 1: writable
- * 2-6: reserved
- * 7: super page
- * 8-10: available
- * 11: snoop behavior
- * 12-63: Host physcial address
- */
-struct dma_pte {
- u64 val;
-};
-
-static inline void dma_clear_pte(struct dma_pte *pte)
-{
- pte->val = 0;
-}
-
-static inline void dma_set_pte_readable(struct dma_pte *pte)
-{
- pte->val |= DMA_PTE_READ;
-}
-
-static inline void dma_set_pte_writable(struct dma_pte *pte)
-{
- pte->val |= DMA_PTE_WRITE;
-}
-
-static inline void dma_set_pte_snp(struct dma_pte *pte)
-{
- pte->val |= DMA_PTE_SNP;
-}
-
-static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
-{
- pte->val = (pte->val & ~3) | (prot & 3);
-}
-
-static inline u64 dma_pte_addr(struct dma_pte *pte)
-{
-#ifdef CONFIG_64BIT
- return pte->val & VTD_PAGE_MASK;
-#else
- /* Must have a full atomic 64-bit read */
- return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
-#endif
-}
-
-static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
-{
- pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
-}
-
-static inline bool dma_pte_present(struct dma_pte *pte)
-{
- return (pte->val & 3) != 0;
-}
-
-static inline int first_pte_in_page(struct dma_pte *pte)
-{
- return !((unsigned long)pte & ~VTD_PAGE_MASK);
-}
-
-/*
- * This domain is a statically identity mapping domain.
- * 1. This domain creats a static 1:1 mapping to all usable memory.
- * 2. It maps to each iommu if successful.
- * 3. Each iommu mapps to this domain if successful.
- */
-static struct dmar_domain *si_domain;
-static int hw_pass_through = 1;
-
-/* devices under the same p2p bridge are owned in one domain */
-#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
-
-/* domain represents a virtual machine, more than one devices
- * across iommus may be owned in one domain, e.g. kvm guest.
- */
-#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
-
-/* si_domain contains mulitple devices */
-#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
-
-struct dmar_domain {
- int id; /* domain id */
- int nid; /* node id */
- unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
-
- struct list_head devices; /* all devices' list */
- struct iova_domain iovad; /* iova's that belong to this domain */
-
- struct dma_pte *pgd; /* virtual address */
- int gaw; /* max guest address width */
-
- /* adjusted guest address width, 0 is level 2 30-bit */
- int agaw;
-
- int flags; /* flags to find out type of domain */
-
- int iommu_coherency;/* indicate coherency of iommu access */
- int iommu_snooping; /* indicate snooping control feature*/
- int iommu_count; /* reference count of iommu */
- spinlock_t iommu_lock; /* protect iommu set in domain */
- u64 max_addr; /* maximum mapped address */
-};
-
-/* PCI domain-device relationship */
-struct device_domain_info {
- struct list_head link; /* link to domain siblings */
- struct list_head global; /* link to global list */
- int segment; /* PCI domain */
- u8 bus; /* PCI bus number */
- u8 devfn; /* PCI devfn number */
- struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
- struct intel_iommu *iommu; /* IOMMU used by this device */
- struct dmar_domain *domain; /* pointer to domain */
-};
-
-static void flush_unmaps_timeout(unsigned long data);
-
-DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
-
-#define HIGH_WATER_MARK 250
-struct deferred_flush_tables {
- int next;
- struct iova *iova[HIGH_WATER_MARK];
- struct dmar_domain *domain[HIGH_WATER_MARK];
-};
-
-static struct deferred_flush_tables *deferred_flush;
-
-/* bitmap for indexing intel_iommus */
-static int g_num_of_iommus;
-
-static DEFINE_SPINLOCK(async_umap_flush_lock);
-static LIST_HEAD(unmaps_to_do);
-
-static int timer_on;
-static long list_size;
-
-static void domain_remove_dev_info(struct dmar_domain *domain);
-
-#ifdef CONFIG_DMAR_DEFAULT_ON
-int dmar_disabled = 0;
-#else
-int dmar_disabled = 1;
-#endif /*CONFIG_DMAR_DEFAULT_ON*/
-
-static int dmar_map_gfx = 1;
-static int dmar_forcedac;
-static int intel_iommu_strict;
-
-#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
-static DEFINE_SPINLOCK(device_domain_lock);
-static LIST_HEAD(device_domain_list);
-
-static struct iommu_ops intel_iommu_ops;
-
-static int __init intel_iommu_setup(char *str)
-{
- if (!str)
- return -EINVAL;
- while (*str) {
- if (!strncmp(str, "on", 2)) {
- dmar_disabled = 0;
- printk(KERN_INFO "Intel-IOMMU: enabled\n");
- } else if (!strncmp(str, "off", 3)) {
- dmar_disabled = 1;
- printk(KERN_INFO "Intel-IOMMU: disabled\n");
- } else if (!strncmp(str, "igfx_off", 8)) {
- dmar_map_gfx = 0;
- printk(KERN_INFO
- "Intel-IOMMU: disable GFX device mapping\n");
- } else if (!strncmp(str, "forcedac", 8)) {
- printk(KERN_INFO
- "Intel-IOMMU: Forcing DAC for PCI devices\n");
- dmar_forcedac = 1;
- } else if (!strncmp(str, "strict", 6)) {
- printk(KERN_INFO
- "Intel-IOMMU: disable batched IOTLB flush\n");
- intel_iommu_strict = 1;
- }
-
- str += strcspn(str, ",");
- while (*str == ',')
- str++;
- }
- return 0;
-}
-__setup("intel_iommu=", intel_iommu_setup);
-
-static struct kmem_cache *iommu_domain_cache;
-static struct kmem_cache *iommu_devinfo_cache;
-static struct kmem_cache *iommu_iova_cache;
-
-static inline void *alloc_pgtable_page(int node)
-{
- struct page *page;
- void *vaddr = NULL;
-
- page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
- if (page)
- vaddr = page_address(page);
- return vaddr;
-}
-
-static inline void free_pgtable_page(void *vaddr)
-{
- free_page((unsigned long)vaddr);
-}
-
-static inline void *alloc_domain_mem(void)
-{
- return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
-}
-
-static void free_domain_mem(void *vaddr)
-{
- kmem_cache_free(iommu_domain_cache, vaddr);
-}
-
-static inline void * alloc_devinfo_mem(void)
-{
- return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
-}
-
-static inline void free_devinfo_mem(void *vaddr)
-{
- kmem_cache_free(iommu_devinfo_cache, vaddr);
-}
-
-struct iova *alloc_iova_mem(void)
-{
- return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
-}
-
-void free_iova_mem(struct iova *iova)
-{
- kmem_cache_free(iommu_iova_cache, iova);
-}
-
-
-static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
-{
- unsigned long sagaw;
- int agaw = -1;
-
- sagaw = cap_sagaw(iommu->cap);
- for (agaw = width_to_agaw(max_gaw);
- agaw >= 0; agaw--) {
- if (test_bit(agaw, &sagaw))
- break;
- }
-
- return agaw;
-}
-
-/*
- * Calculate max SAGAW for each iommu.
- */
-int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
-{
- return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
-}
-
-/*
- * calculate agaw for each iommu.
- * "SAGAW" may be different across iommus, use a default agaw, and
- * get a supported less agaw for iommus that don't support the default agaw.
- */
-int iommu_calculate_agaw(struct intel_iommu *iommu)
-{
- return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
-}
-
-/* This functionin only returns single iommu in a domain */
-static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
-{
- int iommu_id;
-
- /* si_domain and vm domain should not get here. */
- BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
- BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
-
- iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
- if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
- return NULL;
-
- return g_iommus[iommu_id];
-}
-
-static void domain_update_iommu_coherency(struct dmar_domain *domain)
-{
- int i;
-
- domain->iommu_coherency = 1;
-
- for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
- if (!ecap_coherent(g_iommus[i]->ecap)) {
- domain->iommu_coherency = 0;
- break;
- }
- }
-}
-
-static void domain_update_iommu_snooping(struct dmar_domain *domain)
-{
- int i;
-
- domain->iommu_snooping = 1;
-
- for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
- if (!ecap_sc_support(g_iommus[i]->ecap)) {
- domain->iommu_snooping = 0;
- break;
- }
- }
-}
-
-/* Some capabilities may be different across iommus */
-static void domain_update_iommu_cap(struct dmar_domain *domain)
-{
- domain_update_iommu_coherency(domain);
- domain_update_iommu_snooping(domain);
-}
-
-static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
-{
- struct dmar_drhd_unit *drhd = NULL;
- int i;
-
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
- if (segment != drhd->segment)
- continue;
-
- for (i = 0; i < drhd->devices_cnt; i++) {
- if (drhd->devices[i] &&
- drhd->devices[i]->bus->number == bus &&
- drhd->devices[i]->devfn == devfn)
- return drhd->iommu;
- if (drhd->devices[i] &&
- drhd->devices[i]->subordinate &&
- drhd->devices[i]->subordinate->number <= bus &&
- drhd->devices[i]->subordinate->subordinate >= bus)
- return drhd->iommu;
- }
-
- if (drhd->include_all)
- return drhd->iommu;
- }
-
- return NULL;
-}
-
-static void domain_flush_cache(struct dmar_domain *domain,
- void *addr, int size)
-{
- if (!domain->iommu_coherency)
- clflush_cache_range(addr, size);
-}
-
-/* Gets context entry for a given bus and devfn */
-static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
- u8 bus, u8 devfn)
-{
- struct root_entry *root;
- struct context_entry *context;
- unsigned long phy_addr;
- unsigned long flags;
-
- spin_lock_irqsave(&iommu->lock, flags);
- root = &iommu->root_entry[bus];
- context = get_context_addr_from_root(root);
- if (!context) {
- context = (struct context_entry *)
- alloc_pgtable_page(iommu->node);
- if (!context) {
- spin_unlock_irqrestore(&iommu->lock, flags);
- return NULL;
- }
- __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
- phy_addr = virt_to_phys((void *)context);
- set_root_value(root, phy_addr);
- set_root_present(root);
- __iommu_flush_cache(iommu, root, sizeof(*root));
- }
- spin_unlock_irqrestore(&iommu->lock, flags);
- return &context[devfn];
-}
-
-static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
-{
- struct root_entry *root;
- struct context_entry *context;
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&iommu->lock, flags);
- root = &iommu->root_entry[bus];
- context = get_context_addr_from_root(root);
- if (!context) {
- ret = 0;
- goto out;
- }
- ret = context_present(&context[devfn]);
-out:
- spin_unlock_irqrestore(&iommu->lock, flags);
- return ret;
-}
-
-static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
-{
- struct root_entry *root;
- struct context_entry *context;
- unsigned long flags;
-
- spin_lock_irqsave(&iommu->lock, flags);
- root = &iommu->root_entry[bus];
- context = get_context_addr_from_root(root);
- if (context) {
- context_clear_entry(&context[devfn]);
- __iommu_flush_cache(iommu, &context[devfn], \
- sizeof(*context));
- }
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-static void free_context_table(struct intel_iommu *iommu)
-{
- struct root_entry *root;
- int i;
- unsigned long flags;
- struct context_entry *context;
-
- spin_lock_irqsave(&iommu->lock, flags);
- if (!iommu->root_entry) {
- goto out;
- }
- for (i = 0; i < ROOT_ENTRY_NR; i++) {
- root = &iommu->root_entry[i];
- context = get_context_addr_from_root(root);
- if (context)
- free_pgtable_page(context);
- }
- free_pgtable_page(iommu->root_entry);
- iommu->root_entry = NULL;
-out:
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
- unsigned long pfn)
-{
- int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
- struct dma_pte *parent, *pte = NULL;
- int level = agaw_to_level(domain->agaw);
- int offset;
-
- BUG_ON(!domain->pgd);
- BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
- parent = domain->pgd;
-
- while (level > 0) {
- void *tmp_page;
-
- offset = pfn_level_offset(pfn, level);
- pte = &parent[offset];
- if (level == 1)
- break;
-
- if (!dma_pte_present(pte)) {
- uint64_t pteval;
-
- tmp_page = alloc_pgtable_page(domain->nid);
-
- if (!tmp_page)
- return NULL;
-
- domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
- pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
- if (cmpxchg64(&pte->val, 0ULL, pteval)) {
- /* Someone else set it while we were thinking; use theirs. */
- free_pgtable_page(tmp_page);
- } else {
- dma_pte_addr(pte);
- domain_flush_cache(domain, pte, sizeof(*pte));
- }
- }
- parent = phys_to_virt(dma_pte_addr(pte));
- level--;
- }
-
- return pte;
-}
-
-/* return address's pte at specific level */
-static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
- unsigned long pfn,
- int level)
-{
- struct dma_pte *parent, *pte = NULL;
- int total = agaw_to_level(domain->agaw);
- int offset;
-
- parent = domain->pgd;
- while (level <= total) {
- offset = pfn_level_offset(pfn, total);
- pte = &parent[offset];
- if (level == total)
- return pte;
-
- if (!dma_pte_present(pte))
- break;
- parent = phys_to_virt(dma_pte_addr(pte));
- total--;
- }
- return NULL;
-}
-
-/* clear last level pte, a tlb flush should be followed */
-static void dma_pte_clear_range(struct dmar_domain *domain,
- unsigned long start_pfn,
- unsigned long last_pfn)
-{
- int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
- struct dma_pte *first_pte, *pte;
-
- BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
- BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
- BUG_ON(start_pfn > last_pfn);
-
- /* we don't need lock here; nobody else touches the iova range */
- do {
- first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
- if (!pte) {
- start_pfn = align_to_level(start_pfn + 1, 2);
- continue;
- }
- do {
- dma_clear_pte(pte);
- start_pfn++;
- pte++;
- } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
-
- domain_flush_cache(domain, first_pte,
- (void *)pte - (void *)first_pte);
-
- } while (start_pfn && start_pfn <= last_pfn);
-}
-
-/* free page table pages. last level pte should already be cleared */
-static void dma_pte_free_pagetable(struct dmar_domain *domain,
- unsigned long start_pfn,
- unsigned long last_pfn)
-{
- int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
- struct dma_pte *first_pte, *pte;
- int total = agaw_to_level(domain->agaw);
- int level;
- unsigned long tmp;
-
- BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
- BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
- BUG_ON(start_pfn > last_pfn);
-
- /* We don't need lock here; nobody else touches the iova range */
- level = 2;
- while (level <= total) {
- tmp = align_to_level(start_pfn, level);
-
- /* If we can't even clear one PTE at this level, we're done */
- if (tmp + level_size(level) - 1 > last_pfn)
- return;
-
- do {
- first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
- if (!pte) {
- tmp = align_to_level(tmp + 1, level + 1);
- continue;
- }
- do {
- if (dma_pte_present(pte)) {
- free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
- dma_clear_pte(pte);
- }
- pte++;
- tmp += level_size(level);
- } while (!first_pte_in_page(pte) &&
- tmp + level_size(level) - 1 <= last_pfn);
-
- domain_flush_cache(domain, first_pte,
- (void *)pte - (void *)first_pte);
-
- } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
- level++;
- }
- /* free pgd */
- if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
- free_pgtable_page(domain->pgd);
- domain->pgd = NULL;
- }
-}
-
-/* iommu handling */
-static int iommu_alloc_root_entry(struct intel_iommu *iommu)
-{
- struct root_entry *root;
- unsigned long flags;
-
- root = (struct root_entry *)alloc_pgtable_page(iommu->node);
- if (!root)
- return -ENOMEM;
-
- __iommu_flush_cache(iommu, root, ROOT_SIZE);
-
- spin_lock_irqsave(&iommu->lock, flags);
- iommu->root_entry = root;
- spin_unlock_irqrestore(&iommu->lock, flags);
-
- return 0;
-}
-
-static void iommu_set_root_entry(struct intel_iommu *iommu)
-{
- void *addr;
- u32 sts;
- unsigned long flag;
-
- addr = iommu->root_entry;
-
- spin_lock_irqsave(&iommu->register_lock, flag);
- dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
-
- writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
-
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_RTPS), sts);
-
- spin_unlock_irqrestore(&iommu->register_lock, flag);
-}
-
-static void iommu_flush_write_buffer(struct intel_iommu *iommu)
-{
- u32 val;
- unsigned long flag;
-
- if (!rwbf_quirk && !cap_rwbf(iommu->cap))
- return;
-
- spin_lock_irqsave(&iommu->register_lock, flag);
- writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
-
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (!(val & DMA_GSTS_WBFS)), val);
-
- spin_unlock_irqrestore(&iommu->register_lock, flag);
-}
-
-/* return value determine if we need a write buffer flush */
-static void __iommu_flush_context(struct intel_iommu *iommu,
- u16 did, u16 source_id, u8 function_mask,
- u64 type)
-{
- u64 val = 0;
- unsigned long flag;
-
- switch (type) {
- case DMA_CCMD_GLOBAL_INVL:
- val = DMA_CCMD_GLOBAL_INVL;
- break;
- case DMA_CCMD_DOMAIN_INVL:
- val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
- break;
- case DMA_CCMD_DEVICE_INVL:
- val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
- | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
- break;
- default:
- BUG();
- }
- val |= DMA_CCMD_ICC;
-
- spin_lock_irqsave(&iommu->register_lock, flag);
- dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
-
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
- dmar_readq, (!(val & DMA_CCMD_ICC)), val);
-
- spin_unlock_irqrestore(&iommu->register_lock, flag);
-}
-
-/* return value determine if we need a write buffer flush */
-static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
- u64 addr, unsigned int size_order, u64 type)
-{
- int tlb_offset = ecap_iotlb_offset(iommu->ecap);
- u64 val = 0, val_iva = 0;
- unsigned long flag;
-
- switch (type) {
- case DMA_TLB_GLOBAL_FLUSH:
- /* global flush doesn't need set IVA_REG */
- val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
- break;
- case DMA_TLB_DSI_FLUSH:
- val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
- break;
- case DMA_TLB_PSI_FLUSH:
- val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
- /* Note: always flush non-leaf currently */
- val_iva = size_order | addr;
- break;
- default:
- BUG();
- }
- /* Note: set drain read/write */
-#if 0
- /*
- * This is probably to be super secure.. Looks like we can
- * ignore it without any impact.
- */
- if (cap_read_drain(iommu->cap))
- val |= DMA_TLB_READ_DRAIN;
-#endif
- if (cap_write_drain(iommu->cap))
- val |= DMA_TLB_WRITE_DRAIN;
-
- spin_lock_irqsave(&iommu->register_lock, flag);
- /* Note: Only uses first TLB reg currently */
- if (val_iva)
- dmar_writeq(iommu->reg + tlb_offset, val_iva);
- dmar_writeq(iommu->reg + tlb_offset + 8, val);
-
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, tlb_offset + 8,
- dmar_readq, (!(val & DMA_TLB_IVT)), val);
-
- spin_unlock_irqrestore(&iommu->register_lock, flag);
-
- /* check IOTLB invalidation granularity */
- if (DMA_TLB_IAIG(val) == 0)
- printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
- if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
- pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
- (unsigned long long)DMA_TLB_IIRG(type),
- (unsigned long long)DMA_TLB_IAIG(val));
-}
-
-static struct device_domain_info *iommu_support_dev_iotlb(
- struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
-{
- int found = 0;
- unsigned long flags;
- struct device_domain_info *info;
- struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
-
- if (!ecap_dev_iotlb_support(iommu->ecap))
- return NULL;
-
- if (!iommu->qi)
- return NULL;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry(info, &domain->devices, link)
- if (info->bus == bus && info->devfn == devfn) {
- found = 1;
- break;
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- if (!found || !info->dev)
- return NULL;
-
- if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
- return NULL;
-
- if (!dmar_find_matched_atsr_unit(info->dev))
- return NULL;
-
- info->iommu = iommu;
-
- return info;
-}
-
-static void iommu_enable_dev_iotlb(struct device_domain_info *info)
-{
- if (!info)
- return;
-
- pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
-}
-
-static void iommu_disable_dev_iotlb(struct device_domain_info *info)
-{
- if (!info->dev || !pci_ats_enabled(info->dev))
- return;
-
- pci_disable_ats(info->dev);
-}
-
-static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
- u64 addr, unsigned mask)
-{
- u16 sid, qdep;
- unsigned long flags;
- struct device_domain_info *info;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry(info, &domain->devices, link) {
- if (!info->dev || !pci_ats_enabled(info->dev))
- continue;
-
- sid = info->bus << 8 | info->devfn;
- qdep = pci_ats_queue_depth(info->dev);
- qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
-}
-
-static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
- unsigned long pfn, unsigned int pages, int map)
-{
- unsigned int mask = ilog2(__roundup_pow_of_two(pages));
- uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
-
- BUG_ON(pages == 0);
-
- /*
- * Fallback to domain selective flush if no PSI support or the size is
- * too big.
- * PSI requires page size to be 2 ^ x, and the base address is naturally
- * aligned to the size
- */
- if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
- iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH);
- else
- iommu->flush.flush_iotlb(iommu, did, addr, mask,
- DMA_TLB_PSI_FLUSH);
-
- /*
- * In caching mode, changes of pages from non-present to present require
- * flush. However, device IOTLB doesn't need to be flushed in this case.
- */
- if (!cap_caching_mode(iommu->cap) || !map)
- iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
-}
-
-static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
-{
- u32 pmen;
- unsigned long flags;
-
- spin_lock_irqsave(&iommu->register_lock, flags);
- pmen = readl(iommu->reg + DMAR_PMEN_REG);
- pmen &= ~DMA_PMEN_EPM;
- writel(pmen, iommu->reg + DMAR_PMEN_REG);
-
- /* wait for the protected region status bit to clear */
- IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
- readl, !(pmen & DMA_PMEN_PRS), pmen);
-
- spin_unlock_irqrestore(&iommu->register_lock, flags);
-}
-
-static int iommu_enable_translation(struct intel_iommu *iommu)
-{
- u32 sts;
- unsigned long flags;
-
- spin_lock_irqsave(&iommu->register_lock, flags);
- iommu->gcmd |= DMA_GCMD_TE;
- writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_TES), sts);
-
- spin_unlock_irqrestore(&iommu->register_lock, flags);
- return 0;
-}
-
-static int iommu_disable_translation(struct intel_iommu *iommu)
-{
- u32 sts;
- unsigned long flag;
-
- spin_lock_irqsave(&iommu->register_lock, flag);
- iommu->gcmd &= ~DMA_GCMD_TE;
- writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (!(sts & DMA_GSTS_TES)), sts);
-
- spin_unlock_irqrestore(&iommu->register_lock, flag);
- return 0;
-}
-
-
-static int iommu_init_domains(struct intel_iommu *iommu)
-{
- unsigned long ndomains;
- unsigned long nlongs;
-
- ndomains = cap_ndoms(iommu->cap);
- pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
- ndomains);
- nlongs = BITS_TO_LONGS(ndomains);
-
- spin_lock_init(&iommu->lock);
-
- /* TBD: there might be 64K domains,
- * consider other allocation for future chip
- */
- iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
- if (!iommu->domain_ids) {
- printk(KERN_ERR "Allocating domain id array failed\n");
- return -ENOMEM;
- }
- iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
- GFP_KERNEL);
- if (!iommu->domains) {
- printk(KERN_ERR "Allocating domain array failed\n");
- return -ENOMEM;
- }
-
- /*
- * if Caching mode is set, then invalid translations are tagged
- * with domainid 0. Hence we need to pre-allocate it.
- */
- if (cap_caching_mode(iommu->cap))
- set_bit(0, iommu->domain_ids);
- return 0;
-}
-
-
-static void domain_exit(struct dmar_domain *domain);
-static void vm_domain_exit(struct dmar_domain *domain);
-
-void free_dmar_iommu(struct intel_iommu *iommu)
-{
- struct dmar_domain *domain;
- int i;
- unsigned long flags;
-
- if ((iommu->domains) && (iommu->domain_ids)) {
- for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
- domain = iommu->domains[i];
- clear_bit(i, iommu->domain_ids);
-
- spin_lock_irqsave(&domain->iommu_lock, flags);
- if (--domain->iommu_count == 0) {
- if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
- vm_domain_exit(domain);
- else
- domain_exit(domain);
- }
- spin_unlock_irqrestore(&domain->iommu_lock, flags);
- }
- }
-
- if (iommu->gcmd & DMA_GCMD_TE)
- iommu_disable_translation(iommu);
-
- if (iommu->irq) {
- set_irq_data(iommu->irq, NULL);
- /* This will mask the irq */
- free_irq(iommu->irq, iommu);
- destroy_irq(iommu->irq);
- }
-
- kfree(iommu->domains);
- kfree(iommu->domain_ids);
-
- g_iommus[iommu->seq_id] = NULL;
-
- /* if all iommus are freed, free g_iommus */
- for (i = 0; i < g_num_of_iommus; i++) {
- if (g_iommus[i])
- break;
- }
-
- if (i == g_num_of_iommus)
- kfree(g_iommus);
-
- /* free context mapping */
- free_context_table(iommu);
-}
-
-static struct dmar_domain *alloc_domain(void)
-{
- struct dmar_domain *domain;
-
- domain = alloc_domain_mem();
- if (!domain)
- return NULL;
-
- domain->nid = -1;
- memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
- domain->flags = 0;
-
- return domain;
-}
-
-static int iommu_attach_domain(struct dmar_domain *domain,
- struct intel_iommu *iommu)
-{
- int num;
- unsigned long ndomains;
- unsigned long flags;
-
- ndomains = cap_ndoms(iommu->cap);
-
- spin_lock_irqsave(&iommu->lock, flags);
-
- num = find_first_zero_bit(iommu->domain_ids, ndomains);
- if (num >= ndomains) {
- spin_unlock_irqrestore(&iommu->lock, flags);
- printk(KERN_ERR "IOMMU: no free domain ids\n");
- return -ENOMEM;
- }
-
- domain->id = num;
- set_bit(num, iommu->domain_ids);
- set_bit(iommu->seq_id, &domain->iommu_bmp);
- iommu->domains[num] = domain;
- spin_unlock_irqrestore(&iommu->lock, flags);
-
- return 0;
-}
-
-static void iommu_detach_domain(struct dmar_domain *domain,
- struct intel_iommu *iommu)
-{
- unsigned long flags;
- int num, ndomains;
- int found = 0;
-
- spin_lock_irqsave(&iommu->lock, flags);
- ndomains = cap_ndoms(iommu->cap);
- for_each_set_bit(num, iommu->domain_ids, ndomains) {
- if (iommu->domains[num] == domain) {
- found = 1;
- break;
- }
- }
-
- if (found) {
- clear_bit(num, iommu->domain_ids);
- clear_bit(iommu->seq_id, &domain->iommu_bmp);
- iommu->domains[num] = NULL;
- }
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-static struct iova_domain reserved_iova_list;
-static struct lock_class_key reserved_rbtree_key;
-
-static void dmar_init_reserved_ranges(void)
-{
- struct pci_dev *pdev = NULL;
- struct iova *iova;
- int i;
-
- init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
-
- lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
- &reserved_rbtree_key);
-
- /* IOAPIC ranges shouldn't be accessed by DMA */
- iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
- IOVA_PFN(IOAPIC_RANGE_END));
- if (!iova)
- printk(KERN_ERR "Reserve IOAPIC range failed\n");
-
- /* Reserve all PCI MMIO to avoid peer-to-peer access */
- for_each_pci_dev(pdev) {
- struct resource *r;
-
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- r = &pdev->resource[i];
- if (!r->flags || !(r->flags & IORESOURCE_MEM))
- continue;
- iova = reserve_iova(&reserved_iova_list,
- IOVA_PFN(r->start),
- IOVA_PFN(r->end));
- if (!iova)
- printk(KERN_ERR "Reserve iova failed\n");
- }
- }
-
-}
-
-static void domain_reserve_special_ranges(struct dmar_domain *domain)
-{
- copy_reserved_iova(&reserved_iova_list, &domain->iovad);
-}
-
-static inline int guestwidth_to_adjustwidth(int gaw)
-{
- int agaw;
- int r = (gaw - 12) % 9;
-
- if (r == 0)
- agaw = gaw;
- else
- agaw = gaw + 9 - r;
- if (agaw > 64)
- agaw = 64;
- return agaw;
-}
-
-static int domain_init(struct dmar_domain *domain, int guest_width)
-{
- struct intel_iommu *iommu;
- int adjust_width, agaw;
- unsigned long sagaw;
-
- init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
- spin_lock_init(&domain->iommu_lock);
-
- domain_reserve_special_ranges(domain);
-
- /* calculate AGAW */
- iommu = domain_get_iommu(domain);
- if (guest_width > cap_mgaw(iommu->cap))
- guest_width = cap_mgaw(iommu->cap);
- domain->gaw = guest_width;
- adjust_width = guestwidth_to_adjustwidth(guest_width);
- agaw = width_to_agaw(adjust_width);
- sagaw = cap_sagaw(iommu->cap);
- if (!test_bit(agaw, &sagaw)) {
- /* hardware doesn't support it, choose a bigger one */
- pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
- agaw = find_next_bit(&sagaw, 5, agaw);
- if (agaw >= 5)
- return -ENODEV;
- }
- domain->agaw = agaw;
- INIT_LIST_HEAD(&domain->devices);
-
- if (ecap_coherent(iommu->ecap))
- domain->iommu_coherency = 1;
- else
- domain->iommu_coherency = 0;
-
- if (ecap_sc_support(iommu->ecap))
- domain->iommu_snooping = 1;
- else
- domain->iommu_snooping = 0;
-
- domain->iommu_count = 1;
- domain->nid = iommu->node;
-
- /* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
- if (!domain->pgd)
- return -ENOMEM;
- __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
- return 0;
-}
-
-static void domain_exit(struct dmar_domain *domain)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
-
- /* Domain 0 is reserved, so dont process it */
- if (!domain)
- return;
-
- domain_remove_dev_info(domain);
- /* destroy iovas */
- put_iova_domain(&domain->iovad);
-
- /* clear ptes */
- dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
-
- /* free page tables */
- dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
-
- for_each_active_iommu(iommu, drhd)
- if (test_bit(iommu->seq_id, &domain->iommu_bmp))
- iommu_detach_domain(domain, iommu);
-
- free_domain_mem(domain);
-}
-
-static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
- u8 bus, u8 devfn, int translation)
-{
- struct context_entry *context;
- unsigned long flags;
- struct intel_iommu *iommu;
- struct dma_pte *pgd;
- unsigned long num;
- unsigned long ndomains;
- int id;
- int agaw;
- struct device_domain_info *info = NULL;
-
- pr_debug("Set context mapping for %02x:%02x.%d\n",
- bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
-
- BUG_ON(!domain->pgd);
- BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
- translation != CONTEXT_TT_MULTI_LEVEL);
-
- iommu = device_to_iommu(segment, bus, devfn);
- if (!iommu)
- return -ENODEV;
-
- context = device_to_context_entry(iommu, bus, devfn);
- if (!context)
- return -ENOMEM;
- spin_lock_irqsave(&iommu->lock, flags);
- if (context_present(context)) {
- spin_unlock_irqrestore(&iommu->lock, flags);
- return 0;
- }
-
- id = domain->id;
- pgd = domain->pgd;
-
- if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
- domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
- int found = 0;
-
- /* find an available domain id for this device in iommu */
- ndomains = cap_ndoms(iommu->cap);
- for_each_set_bit(num, iommu->domain_ids, ndomains) {
- if (iommu->domains[num] == domain) {
- id = num;
- found = 1;
- break;
- }
- }
-
- if (found == 0) {
- num = find_first_zero_bit(iommu->domain_ids, ndomains);
- if (num >= ndomains) {
- spin_unlock_irqrestore(&iommu->lock, flags);
- printk(KERN_ERR "IOMMU: no free domain ids\n");
- return -EFAULT;
- }
-
- set_bit(num, iommu->domain_ids);
- iommu->domains[num] = domain;
- id = num;
- }
-
- /* Skip top levels of page tables for
- * iommu which has less agaw than default.
- * Unnecessary for PT mode.
- */
- if (translation != CONTEXT_TT_PASS_THROUGH) {
- for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
- pgd = phys_to_virt(dma_pte_addr(pgd));
- if (!dma_pte_present(pgd)) {
- spin_unlock_irqrestore(&iommu->lock, flags);
- return -ENOMEM;
- }
- }
- }
- }
-
- context_set_domain_id(context, id);
-
- if (translation != CONTEXT_TT_PASS_THROUGH) {
- info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
- translation = info ? CONTEXT_TT_DEV_IOTLB :
- CONTEXT_TT_MULTI_LEVEL;
- }
- /*
- * In pass through mode, AW must be programmed to indicate the largest
- * AGAW value supported by hardware. And ASR is ignored by hardware.
- */
- if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
- context_set_address_width(context, iommu->msagaw);
- else {
- context_set_address_root(context, virt_to_phys(pgd));
- context_set_address_width(context, iommu->agaw);
- }
-
- context_set_translation_type(context, translation);
- context_set_fault_enable(context);
- context_set_present(context);
- domain_flush_cache(domain, context, sizeof(*context));
-
- /*
- * It's a non-present to present mapping. If hardware doesn't cache
- * non-present entry we only need to flush the write-buffer. If the
- * _does_ cache non-present entries, then it does so in the special
- * domain #0, which we have to flush:
- */
- if (cap_caching_mode(iommu->cap)) {
- iommu->flush.flush_context(iommu, 0,
- (((u16)bus) << 8) | devfn,
- DMA_CCMD_MASK_NOBIT,
- DMA_CCMD_DEVICE_INVL);
- iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
- } else {
- iommu_flush_write_buffer(iommu);
- }
- iommu_enable_dev_iotlb(info);
- spin_unlock_irqrestore(&iommu->lock, flags);
-
- spin_lock_irqsave(&domain->iommu_lock, flags);
- if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
- domain->iommu_count++;
- if (domain->iommu_count == 1)
- domain->nid = iommu->node;
- domain_update_iommu_cap(domain);
- }
- spin_unlock_irqrestore(&domain->iommu_lock, flags);
- return 0;
-}
-
-static int
-domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
- int translation)
-{
- int ret;
- struct pci_dev *tmp, *parent;
-
- ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
- pdev->bus->number, pdev->devfn,
- translation);
- if (ret)
- return ret;
-
- /* dependent device mapping */
- tmp = pci_find_upstream_pcie_bridge(pdev);
- if (!tmp)
- return 0;
- /* Secondary interface's bus number and devfn 0 */
- parent = pdev->bus->self;
- while (parent != tmp) {
- ret = domain_context_mapping_one(domain,
- pci_domain_nr(parent->bus),
- parent->bus->number,
- parent->devfn, translation);
- if (ret)
- return ret;
- parent = parent->bus->self;
- }
- if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
- return domain_context_mapping_one(domain,
- pci_domain_nr(tmp->subordinate),
- tmp->subordinate->number, 0,
- translation);
- else /* this is a legacy PCI bridge */
- return domain_context_mapping_one(domain,
- pci_domain_nr(tmp->bus),
- tmp->bus->number,
- tmp->devfn,
- translation);
-}
-
-static int domain_context_mapped(struct pci_dev *pdev)
-{
- int ret;
- struct pci_dev *tmp, *parent;
- struct intel_iommu *iommu;
-
- iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
- pdev->devfn);
- if (!iommu)
- return -ENODEV;
-
- ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
- if (!ret)
- return ret;
- /* dependent device mapping */
- tmp = pci_find_upstream_pcie_bridge(pdev);
- if (!tmp)
- return ret;
- /* Secondary interface's bus number and devfn 0 */
- parent = pdev->bus->self;
- while (parent != tmp) {
- ret = device_context_mapped(iommu, parent->bus->number,
- parent->devfn);
- if (!ret)
- return ret;
- parent = parent->bus->self;
- }
- if (pci_is_pcie(tmp))
- return device_context_mapped(iommu, tmp->subordinate->number,
- 0);
- else
- return device_context_mapped(iommu, tmp->bus->number,
- tmp->devfn);
-}
-
-/* Returns a number of VTD pages, but aligned to MM page size */
-static inline unsigned long aligned_nrpages(unsigned long host_addr,
- size_t size)
-{
- host_addr &= ~PAGE_MASK;
- return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
-}
-
-static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- struct scatterlist *sg, unsigned long phys_pfn,
- unsigned long nr_pages, int prot)
-{
- struct dma_pte *first_pte = NULL, *pte = NULL;
- phys_addr_t uninitialized_var(pteval);
- int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
- unsigned long sg_res;
-
- BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
-
- if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
- return -EINVAL;
-
- prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
-
- if (sg)
- sg_res = 0;
- else {
- sg_res = nr_pages + 1;
- pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
- }
-
- while (nr_pages--) {
- uint64_t tmp;
-
- if (!sg_res) {
- sg_res = aligned_nrpages(sg->offset, sg->length);
- sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
- sg->dma_length = sg->length;
- pteval = page_to_phys(sg_page(sg)) | prot;
- }
- if (!pte) {
- first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
- if (!pte)
- return -ENOMEM;
- }
- /* We don't need lock here, nobody else
- * touches the iova range
- */
- tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
- if (tmp) {
- static int dumps = 5;
- printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
- iov_pfn, tmp, (unsigned long long)pteval);
- if (dumps) {
- dumps--;
- debug_dma_dump_mappings(NULL);
- }
- WARN_ON(1);
- }
- pte++;
- if (!nr_pages || first_pte_in_page(pte)) {
- domain_flush_cache(domain, first_pte,
- (void *)pte - (void *)first_pte);
- pte = NULL;
- }
- iov_pfn++;
- pteval += VTD_PAGE_SIZE;
- sg_res--;
- if (!sg_res)
- sg = sg_next(sg);
- }
- return 0;
-}
-
-static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- struct scatterlist *sg, unsigned long nr_pages,
- int prot)
-{
- return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
-}
-
-static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- unsigned long phys_pfn, unsigned long nr_pages,
- int prot)
-{
- return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
-}
-
-static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
-{
- if (!iommu)
- return;
-
- clear_context_table(iommu, bus, devfn);
- iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
-}
-
-static void domain_remove_dev_info(struct dmar_domain *domain)
-{
- struct device_domain_info *info;
- unsigned long flags;
- struct intel_iommu *iommu;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- while (!list_empty(&domain->devices)) {
- info = list_entry(domain->devices.next,
- struct device_domain_info, link);
- list_del(&info->link);
- list_del(&info->global);
- if (info->dev)
- info->dev->dev.archdata.iommu = NULL;
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- iommu_disable_dev_iotlb(info);
- iommu = device_to_iommu(info->segment, info->bus, info->devfn);
- iommu_detach_dev(iommu, info->bus, info->devfn);
- free_devinfo_mem(info);
-
- spin_lock_irqsave(&device_domain_lock, flags);
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
-}
-
-/*
- * find_domain
- * Note: we use struct pci_dev->dev.archdata.iommu stores the info
- */
-static struct dmar_domain *
-find_domain(struct pci_dev *pdev)
-{
- struct device_domain_info *info;
-
- /* No lock here, assumes no domain exit in normal case */
- info = pdev->dev.archdata.iommu;
- if (info)
- return info->domain;
- return NULL;
-}
-
-/* domain is initialized */
-static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
-{
- struct dmar_domain *domain, *found = NULL;
- struct intel_iommu *iommu;
- struct dmar_drhd_unit *drhd;
- struct device_domain_info *info, *tmp;
- struct pci_dev *dev_tmp;
- unsigned long flags;
- int bus = 0, devfn = 0;
- int segment;
- int ret;
-
- domain = find_domain(pdev);
- if (domain)
- return domain;
-
- segment = pci_domain_nr(pdev->bus);
-
- dev_tmp = pci_find_upstream_pcie_bridge(pdev);
- if (dev_tmp) {
- if (pci_is_pcie(dev_tmp)) {
- bus = dev_tmp->subordinate->number;
- devfn = 0;
- } else {
- bus = dev_tmp->bus->number;
- devfn = dev_tmp->devfn;
- }
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry(info, &device_domain_list, global) {
- if (info->segment == segment &&
- info->bus == bus && info->devfn == devfn) {
- found = info->domain;
- break;
- }
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
- /* pcie-pci bridge already has a domain, uses it */
- if (found) {
- domain = found;
- goto found_domain;
- }
- }
-
- domain = alloc_domain();
- if (!domain)
- goto error;
-
- /* Allocate new domain for the device */
- drhd = dmar_find_matched_drhd_unit(pdev);
- if (!drhd) {
- printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
- pci_name(pdev));
- return NULL;
- }
- iommu = drhd->iommu;
-
- ret = iommu_attach_domain(domain, iommu);
- if (ret) {
- domain_exit(domain);
- goto error;
- }
-
- if (domain_init(domain, gaw)) {
- domain_exit(domain);
- goto error;
- }
-
- /* register pcie-to-pci device */
- if (dev_tmp) {
- info = alloc_devinfo_mem();
- if (!info) {
- domain_exit(domain);
- goto error;
- }
- info->segment = segment;
- info->bus = bus;
- info->devfn = devfn;
- info->dev = NULL;
- info->domain = domain;
- /* This domain is shared by devices under p2p bridge */
- domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
-
- /* pcie-to-pci bridge already has a domain, uses it */
- found = NULL;
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry(tmp, &device_domain_list, global) {
- if (tmp->segment == segment &&
- tmp->bus == bus && tmp->devfn == devfn) {
- found = tmp->domain;
- break;
- }
- }
- if (found) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
- free_devinfo_mem(info);
- domain_exit(domain);
- domain = found;
- } else {
- list_add(&info->link, &domain->devices);
- list_add(&info->global, &device_domain_list);
- spin_unlock_irqrestore(&device_domain_lock, flags);
- }
- }
-
-found_domain:
- info = alloc_devinfo_mem();
- if (!info)
- goto error;
- info->segment = segment;
- info->bus = pdev->bus->number;
- info->devfn = pdev->devfn;
- info->dev = pdev;
- info->domain = domain;
- spin_lock_irqsave(&device_domain_lock, flags);
- /* somebody is fast */
- found = find_domain(pdev);
- if (found != NULL) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
- if (found != domain) {
- domain_exit(domain);
- domain = found;
- }
- free_devinfo_mem(info);
- return domain;
- }
- list_add(&info->link, &domain->devices);
- list_add(&info->global, &device_domain_list);
- pdev->dev.archdata.iommu = info;
- spin_unlock_irqrestore(&device_domain_lock, flags);
- return domain;
-error:
- /* recheck it here, maybe others set it */
- return find_domain(pdev);
-}
-
-static int iommu_identity_mapping;
-#define IDENTMAP_ALL 1
-#define IDENTMAP_GFX 2
-#define IDENTMAP_AZALIA 4
-
-static int iommu_domain_identity_map(struct dmar_domain *domain,
- unsigned long long start,
- unsigned long long end)
-{
- unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
- unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
-
- if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
- dma_to_mm_pfn(last_vpfn))) {
- printk(KERN_ERR "IOMMU: reserve iova failed\n");
- return -ENOMEM;
- }
-
- pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
- start, end, domain->id);
- /*
- * RMRR range might have overlap with physical memory range,
- * clear it first
- */
- dma_pte_clear_range(domain, first_vpfn, last_vpfn);
-
- return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
- last_vpfn - first_vpfn + 1,
- DMA_PTE_READ|DMA_PTE_WRITE);
-}
-
-static int iommu_prepare_identity_map(struct pci_dev *pdev,
- unsigned long long start,
- unsigned long long end)
-{
- struct dmar_domain *domain;
- int ret;
-
- domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
- if (!domain)
- return -ENOMEM;
-
- /* For _hardware_ passthrough, don't bother. But for software
- passthrough, we do it anyway -- it may indicate a memory
- range which is reserved in E820, so which didn't get set
- up to start with in si_domain */
- if (domain == si_domain && hw_pass_through) {
- printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
- pci_name(pdev), start, end);
- return 0;
- }
-
- printk(KERN_INFO
- "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
- pci_name(pdev), start, end);
-
- if (end < start) {
- WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- ret = -EIO;
- goto error;
- }
-
- if (end >> agaw_to_width(domain->agaw)) {
- WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- agaw_to_width(domain->agaw),
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- ret = -EIO;
- goto error;
- }
-
- ret = iommu_domain_identity_map(domain, start, end);
- if (ret)
- goto error;
-
- /* context entry init */
- ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
- if (ret)
- goto error;
-
- return 0;
-
- error:
- domain_exit(domain);
- return ret;
-}
-
-static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
- struct pci_dev *pdev)
-{
- if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
- return 0;
- return iommu_prepare_identity_map(pdev, rmrr->base_address,
- rmrr->end_address + 1);
-}
-
-#ifdef CONFIG_DMAR_FLOPPY_WA
-static inline void iommu_prepare_isa(void)
-{
- struct pci_dev *pdev;
- int ret;
-
- pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
- if (!pdev)
- return;
-
- printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
- ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
-
- if (ret)
- printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
- "floppy might not work\n");
-
-}
-#else
-static inline void iommu_prepare_isa(void)
-{
- return;
-}
-#endif /* !CONFIG_DMAR_FLPY_WA */
-
-static int md_domain_init(struct dmar_domain *domain, int guest_width);
-
-static int __init si_domain_work_fn(unsigned long start_pfn,
- unsigned long end_pfn, void *datax)
-{
- int *ret = datax;
-
- *ret = iommu_domain_identity_map(si_domain,
- (uint64_t)start_pfn << PAGE_SHIFT,
- (uint64_t)end_pfn << PAGE_SHIFT);
- return *ret;
-
-}
-
-static int __init si_domain_init(int hw)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- int nid, ret = 0;
-
- si_domain = alloc_domain();
- if (!si_domain)
- return -EFAULT;
-
- pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
-
- for_each_active_iommu(iommu, drhd) {
- ret = iommu_attach_domain(si_domain, iommu);
- if (ret) {
- domain_exit(si_domain);
- return -EFAULT;
- }
- }
-
- if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
- domain_exit(si_domain);
- return -EFAULT;
- }
-
- si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
-
- if (hw)
- return 0;
-
- for_each_online_node(nid) {
- work_with_active_regions(nid, si_domain_work_fn, &ret);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static void domain_remove_one_dev_info(struct dmar_domain *domain,
- struct pci_dev *pdev);
-static int identity_mapping(struct pci_dev *pdev)
-{
- struct device_domain_info *info;
-
- if (likely(!iommu_identity_mapping))
- return 0;
-
-
- list_for_each_entry(info, &si_domain->devices, link)
- if (info->dev == pdev)
- return 1;
- return 0;
-}
-
-static int domain_add_dev_info(struct dmar_domain *domain,
- struct pci_dev *pdev,
- int translation)
-{
- struct device_domain_info *info;
- unsigned long flags;
- int ret;
-
- info = alloc_devinfo_mem();
- if (!info)
- return -ENOMEM;
-
- ret = domain_context_mapping(domain, pdev, translation);
- if (ret) {
- free_devinfo_mem(info);
- return ret;
- }
-
- info->segment = pci_domain_nr(pdev->bus);
- info->bus = pdev->bus->number;
- info->devfn = pdev->devfn;
- info->dev = pdev;
- info->domain = domain;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- list_add(&info->link, &domain->devices);
- list_add(&info->global, &device_domain_list);
- pdev->dev.archdata.iommu = info;
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- return 0;
-}
-
-static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
-{
- if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
- return 1;
-
- if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
- return 1;
-
- if (!(iommu_identity_mapping & IDENTMAP_ALL))
- return 0;
-
- /*
- * We want to start off with all devices in the 1:1 domain, and
- * take them out later if we find they can't access all of memory.
- *
- * However, we can't do this for PCI devices behind bridges,
- * because all PCI devices behind the same bridge will end up
- * with the same source-id on their transactions.
- *
- * Practically speaking, we can't change things around for these
- * devices at run-time, because we can't be sure there'll be no
- * DMA transactions in flight for any of their siblings.
- *
- * So PCI devices (unless they're on the root bus) as well as
- * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
- * the 1:1 domain, just in _case_ one of their siblings turns out
- * not to be able to map all of memory.
- */
- if (!pci_is_pcie(pdev)) {
- if (!pci_is_root_bus(pdev->bus))
- return 0;
- if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
- return 0;
- } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
- return 0;
-
- /*
- * At boot time, we don't yet know if devices will be 64-bit capable.
- * Assume that they will -- if they turn out not to be, then we can
- * take them out of the 1:1 domain later.
- */
- if (!startup)
- return pdev->dma_mask > DMA_BIT_MASK(32);
-
- return 1;
-}
-
-static int __init iommu_prepare_static_identity_mapping(int hw)
-{
- struct pci_dev *pdev = NULL;
- int ret;
-
- ret = si_domain_init(hw);
- if (ret)
- return -EFAULT;
-
- for_each_pci_dev(pdev) {
- if (iommu_should_identity_map(pdev, 1)) {
- printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
- hw ? "hardware" : "software", pci_name(pdev));
-
- ret = domain_add_dev_info(si_domain, pdev,
- hw ? CONTEXT_TT_PASS_THROUGH :
- CONTEXT_TT_MULTI_LEVEL);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-int __init init_dmars(void)
-{
- struct dmar_drhd_unit *drhd;
- struct dmar_rmrr_unit *rmrr;
- struct pci_dev *pdev;
- struct intel_iommu *iommu;
- int i, ret;
-
- /*
- * for each drhd
- * allocate root
- * initialize and program root entry to not present
- * endfor
- */
- for_each_drhd_unit(drhd) {
- g_num_of_iommus++;
- /*
- * lock not needed as this is only incremented in the single
- * threaded kernel __init code path all other access are read
- * only
- */
- }
-
- g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
- GFP_KERNEL);
- if (!g_iommus) {
- printk(KERN_ERR "Allocating global iommu array failed\n");
- ret = -ENOMEM;
- goto error;
- }
-
- deferred_flush = kzalloc(g_num_of_iommus *
- sizeof(struct deferred_flush_tables), GFP_KERNEL);
- if (!deferred_flush) {
- ret = -ENOMEM;
- goto error;
- }
-
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
-
- iommu = drhd->iommu;
- g_iommus[iommu->seq_id] = iommu;
-
- ret = iommu_init_domains(iommu);
- if (ret)
- goto error;
-
- /*
- * TBD:
- * we could share the same root & context tables
- * amoung all IOMMU's. Need to Split it later.
- */
- ret = iommu_alloc_root_entry(iommu);
- if (ret) {
- printk(KERN_ERR "IOMMU: allocate root entry failed\n");
- goto error;
- }
- if (!ecap_pass_through(iommu->ecap))
- hw_pass_through = 0;
- }
-
- /*
- * Start from the sane iommu hardware state.
- */
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
-
- iommu = drhd->iommu;
-
- /*
- * If the queued invalidation is already initialized by us
- * (for example, while enabling interrupt-remapping) then
- * we got the things already rolling from a sane state.
- */
- if (iommu->qi)
- continue;
-
- /*
- * Clear any previous faults.
- */
- dmar_fault(-1, iommu);
- /*
- * Disable queued invalidation if supported and already enabled
- * before OS handover.
- */
- dmar_disable_qi(iommu);
- }
-
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
-
- iommu = drhd->iommu;
-
- if (dmar_enable_qi(iommu)) {
- /*
- * Queued Invalidate not enabled, use Register Based
- * Invalidate
- */
- iommu->flush.flush_context = __iommu_flush_context;
- iommu->flush.flush_iotlb = __iommu_flush_iotlb;
- printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
- "invalidation\n",
- iommu->seq_id,
- (unsigned long long)drhd->reg_base_addr);
- } else {
- iommu->flush.flush_context = qi_flush_context;
- iommu->flush.flush_iotlb = qi_flush_iotlb;
- printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
- "invalidation\n",
- iommu->seq_id,
- (unsigned long long)drhd->reg_base_addr);
- }
- }
-
- if (iommu_pass_through)
- iommu_identity_mapping |= IDENTMAP_ALL;
-
-#ifdef CONFIG_DMAR_BROKEN_GFX_WA
- iommu_identity_mapping |= IDENTMAP_GFX;
-#endif
-
- check_tylersburg_isoch();
-
- /*
- * If pass through is not set or not enabled, setup context entries for
- * identity mappings for rmrr, gfx, and isa and may fall back to static
- * identity mapping if iommu_identity_mapping is set.
- */
- if (iommu_identity_mapping) {
- ret = iommu_prepare_static_identity_mapping(hw_pass_through);
- if (ret) {
- printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
- goto error;
- }
- }
- /*
- * For each rmrr
- * for each dev attached to rmrr
- * do
- * locate drhd for dev, alloc domain for dev
- * allocate free domain
- * allocate page table entries for rmrr
- * if context not allocated for bus
- * allocate and init context
- * set present in root table for this bus
- * init context with domain, translation etc
- * endfor
- * endfor
- */
- printk(KERN_INFO "IOMMU: Setting RMRR:\n");
- for_each_rmrr_units(rmrr) {
- for (i = 0; i < rmrr->devices_cnt; i++) {
- pdev = rmrr->devices[i];
- /*
- * some BIOS lists non-exist devices in DMAR
- * table.
- */
- if (!pdev)
- continue;
- ret = iommu_prepare_rmrr_dev(rmrr, pdev);
- if (ret)
- printk(KERN_ERR
- "IOMMU: mapping reserved region failed\n");
- }
- }
-
- iommu_prepare_isa();
-
- /*
- * for each drhd
- * enable fault log
- * global invalidate context cache
- * global invalidate iotlb
- * enable translation
- */
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
- iommu = drhd->iommu;
-
- iommu_flush_write_buffer(iommu);
-
- ret = dmar_set_interrupt(iommu);
- if (ret)
- goto error;
-
- iommu_set_root_entry(iommu);
-
- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
-
- ret = iommu_enable_translation(iommu);
- if (ret)
- goto error;
-
- iommu_disable_protect_mem_regions(iommu);
- }
-
- return 0;
-error:
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
- iommu = drhd->iommu;
- free_iommu(iommu);
- }
- kfree(g_iommus);
- return ret;
-}
-
-/* This takes a number of _MM_ pages, not VTD pages */
-static struct iova *intel_alloc_iova(struct device *dev,
- struct dmar_domain *domain,
- unsigned long nrpages, uint64_t dma_mask)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct iova *iova = NULL;
-
- /* Restrict dma_mask to the width that the iommu can handle */
- dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
-
- if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
- /*
- * First try to allocate an io virtual address in
- * DMA_BIT_MASK(32) and if that fails then try allocating
- * from higher range
- */
- iova = alloc_iova(&domain->iovad, nrpages,
- IOVA_PFN(DMA_BIT_MASK(32)), 1);
- if (iova)
- return iova;
- }
- iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
- if (unlikely(!iova)) {
- printk(KERN_ERR "Allocating %ld-page iova for %s failed",
- nrpages, pci_name(pdev));
- return NULL;
- }
-
- return iova;
-}
-
-static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
-{
- struct dmar_domain *domain;
- int ret;
-
- domain = get_domain_for_dev(pdev,
- DEFAULT_DOMAIN_ADDRESS_WIDTH);
- if (!domain) {
- printk(KERN_ERR
- "Allocating domain for %s failed", pci_name(pdev));
- return NULL;
- }
-
- /* make sure context mapping is ok */
- if (unlikely(!domain_context_mapped(pdev))) {
- ret = domain_context_mapping(domain, pdev,
- CONTEXT_TT_MULTI_LEVEL);
- if (ret) {
- printk(KERN_ERR
- "Domain context map for %s failed",
- pci_name(pdev));
- return NULL;
- }
- }
-
- return domain;
-}
-
-static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
-{
- struct device_domain_info *info;
-
- /* No lock here, assumes no domain exit in normal case */
- info = dev->dev.archdata.iommu;
- if (likely(info))
- return info->domain;
-
- return __get_valid_domain_for_dev(dev);
-}
-
-static int iommu_dummy(struct pci_dev *pdev)
-{
- return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
-}
-
-/* Check if the pdev needs to go through non-identity map and unmap process.*/
-static int iommu_no_mapping(struct device *dev)
-{
- struct pci_dev *pdev;
- int found;
-
- if (unlikely(dev->bus != &pci_bus_type))
- return 1;
-
- pdev = to_pci_dev(dev);
- if (iommu_dummy(pdev))
- return 1;
-
- if (!iommu_identity_mapping)
- return 0;
-
- found = identity_mapping(pdev);
- if (found) {
- if (iommu_should_identity_map(pdev, 0))
- return 1;
- else {
- /*
- * 32 bit DMA is removed from si_domain and fall back
- * to non-identity mapping.
- */
- domain_remove_one_dev_info(si_domain, pdev);
- printk(KERN_INFO "32bit %s uses non-identity mapping\n",
- pci_name(pdev));
- return 0;
- }
- } else {
- /*
- * In case of a detached 64 bit DMA device from vm, the device
- * is put into si_domain for identity mapping.
- */
- if (iommu_should_identity_map(pdev, 0)) {
- int ret;
- ret = domain_add_dev_info(si_domain, pdev,
- hw_pass_through ?
- CONTEXT_TT_PASS_THROUGH :
- CONTEXT_TT_MULTI_LEVEL);
- if (!ret) {
- printk(KERN_INFO "64bit %s uses identity mapping\n",
- pci_name(pdev));
- return 1;
- }
- }
- }
-
- return 0;
-}
-
-static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
- size_t size, int dir, u64 dma_mask)
-{
- struct pci_dev *pdev = to_pci_dev(hwdev);
- struct dmar_domain *domain;
- phys_addr_t start_paddr;
- struct iova *iova;
- int prot = 0;
- int ret;
- struct intel_iommu *iommu;
- unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
-
- BUG_ON(dir == DMA_NONE);
-
- if (iommu_no_mapping(hwdev))
- return paddr;
-
- domain = get_valid_domain_for_dev(pdev);
- if (!domain)
- return 0;
-
- iommu = domain_get_iommu(domain);
- size = aligned_nrpages(paddr, size);
-
- iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
- pdev->dma_mask);
- if (!iova)
- goto error;
-
- /*
- * Check if DMAR supports zero-length reads on write only
- * mappings..
- */
- if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
- !cap_zlr(iommu->cap))
- prot |= DMA_PTE_READ;
- if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
- prot |= DMA_PTE_WRITE;
- /*
- * paddr - (paddr + size) might be partial page, we should map the whole
- * page. Note: if two part of one page are separately mapped, we
- * might have two guest_addr mapping to the same host paddr, but this
- * is not a big problem
- */
- ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
- mm_to_dma_pfn(paddr_pfn), size, prot);
- if (ret)
- goto error;
-
- /* it's a non-present to present mapping. Only flush if caching mode */
- if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
- else
- iommu_flush_write_buffer(iommu);
-
- start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
- start_paddr += paddr & ~PAGE_MASK;
- return start_paddr;
-
-error:
- if (iova)
- __free_iova(&domain->iovad, iova);
- printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
- pci_name(pdev), size, (unsigned long long)paddr, dir);
- return 0;
-}
-
-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- return __intel_map_single(dev, page_to_phys(page) + offset, size,
- dir, to_pci_dev(dev)->dma_mask);
-}
-
-static void flush_unmaps(void)
-{
- int i, j;
-
- timer_on = 0;
-
- /* just flush them all */
- for (i = 0; i < g_num_of_iommus; i++) {
- struct intel_iommu *iommu = g_iommus[i];
- if (!iommu)
- continue;
-
- if (!deferred_flush[i].next)
- continue;
-
- /* In caching mode, global flushes turn emulation expensive */
- if (!cap_caching_mode(iommu->cap))
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH);
- for (j = 0; j < deferred_flush[i].next; j++) {
- unsigned long mask;
- struct iova *iova = deferred_flush[i].iova[j];
- struct dmar_domain *domain = deferred_flush[i].domain[j];
-
- /* On real hardware multiple invalidations are expensive */
- if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain->id,
- iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
- else {
- mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
- iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
- (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
- }
- __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
- }
- deferred_flush[i].next = 0;
- }
-
- list_size = 0;
-}
-
-static void flush_unmaps_timeout(unsigned long data)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&async_umap_flush_lock, flags);
- flush_unmaps();
- spin_unlock_irqrestore(&async_umap_flush_lock, flags);
-}
-
-static void add_unmap(struct dmar_domain *dom, struct iova *iova)
-{
- unsigned long flags;
- int next, iommu_id;
- struct intel_iommu *iommu;
-
- spin_lock_irqsave(&async_umap_flush_lock, flags);
- if (list_size == HIGH_WATER_MARK)
- flush_unmaps();
-
- iommu = domain_get_iommu(dom);
- iommu_id = iommu->seq_id;
-
- next = deferred_flush[iommu_id].next;
- deferred_flush[iommu_id].domain[next] = dom;
- deferred_flush[iommu_id].iova[next] = iova;
- deferred_flush[iommu_id].next++;
-
- if (!timer_on) {
- mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
- timer_on = 1;
- }
- list_size++;
- spin_unlock_irqrestore(&async_umap_flush_lock, flags);
-}
-
-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct dmar_domain *domain;
- unsigned long start_pfn, last_pfn;
- struct iova *iova;
- struct intel_iommu *iommu;
-
- if (iommu_no_mapping(dev))
- return;
-
- domain = find_domain(pdev);
- BUG_ON(!domain);
-
- iommu = domain_get_iommu(domain);
-
- iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
- if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
- (unsigned long long)dev_addr))
- return;
-
- start_pfn = mm_to_dma_pfn(iova->pfn_lo);
- last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
-
- pr_debug("Device %s unmapping: pfn %lx-%lx\n",
- pci_name(pdev), start_pfn, last_pfn);
-
- /* clear the whole page */
- dma_pte_clear_range(domain, start_pfn, last_pfn);
-
- /* free page tables */
- dma_pte_free_pagetable(domain, start_pfn, last_pfn);
-
- if (intel_iommu_strict) {
- iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
- last_pfn - start_pfn + 1, 0);
- /* free iova */
- __free_iova(&domain->iovad, iova);
- } else {
- add_unmap(domain, iova);
- /*
- * queue up the release of the unmap to save the 1/6th of the
- * cpu used up by the iotlb flush operation...
- */
- }
-}
-
-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags)
-{
- void *vaddr;
- int order;
-
- size = PAGE_ALIGN(size);
- order = get_order(size);
-
- if (!iommu_no_mapping(hwdev))
- flags &= ~(GFP_DMA | GFP_DMA32);
- else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
- if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
- flags |= GFP_DMA;
- else
- flags |= GFP_DMA32;
- }
-
- vaddr = (void *)__get_free_pages(flags, order);
- if (!vaddr)
- return NULL;
- memset(vaddr, 0, size);
-
- *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
- DMA_BIDIRECTIONAL,
- hwdev->coherent_dma_mask);
- if (*dma_handle)
- return vaddr;
- free_pages((unsigned long)vaddr, order);
- return NULL;
-}
-
-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
-{
- int order;
-
- size = PAGE_ALIGN(size);
- order = get_order(size);
-
- intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
- free_pages((unsigned long)vaddr, order);
-}
-
-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- struct pci_dev *pdev = to_pci_dev(hwdev);
- struct dmar_domain *domain;
- unsigned long start_pfn, last_pfn;
- struct iova *iova;
- struct intel_iommu *iommu;
-
- if (iommu_no_mapping(hwdev))
- return;
-
- domain = find_domain(pdev);
- BUG_ON(!domain);
-
- iommu = domain_get_iommu(domain);
-
- iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
- if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
- (unsigned long long)sglist[0].dma_address))
- return;
-
- start_pfn = mm_to_dma_pfn(iova->pfn_lo);
- last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
-
- /* clear the whole page */
- dma_pte_clear_range(domain, start_pfn, last_pfn);
-
- /* free page tables */
- dma_pte_free_pagetable(domain, start_pfn, last_pfn);
-
- if (intel_iommu_strict) {
- iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
- last_pfn - start_pfn + 1, 0);
- /* free iova */
- __free_iova(&domain->iovad, iova);
- } else {
- add_unmap(domain, iova);
- /*
- * queue up the release of the unmap to save the 1/6th of the
- * cpu used up by the iotlb flush operation...
- */
- }
-}
-
-static int intel_nontranslate_map_sg(struct device *hddev,
- struct scatterlist *sglist, int nelems, int dir)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nelems, i) {
- BUG_ON(!sg_page(sg));
- sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
- sg->dma_length = sg->length;
- }
- return nelems;
-}
-
-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
- enum dma_data_direction dir, struct dma_attrs *attrs)
-{
- int i;
- struct pci_dev *pdev = to_pci_dev(hwdev);
- struct dmar_domain *domain;
- size_t size = 0;
- int prot = 0;
- struct iova *iova = NULL;
- int ret;
- struct scatterlist *sg;
- unsigned long start_vpfn;
- struct intel_iommu *iommu;
-
- BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(hwdev))
- return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
-
- domain = get_valid_domain_for_dev(pdev);
- if (!domain)
- return 0;
-
- iommu = domain_get_iommu(domain);
-
- for_each_sg(sglist, sg, nelems, i)
- size += aligned_nrpages(sg->offset, sg->length);
-
- iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
- pdev->dma_mask);
- if (!iova) {
- sglist->dma_length = 0;
- return 0;
- }
-
- /*
- * Check if DMAR supports zero-length reads on write only
- * mappings..
- */
- if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
- !cap_zlr(iommu->cap))
- prot |= DMA_PTE_READ;
- if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
- prot |= DMA_PTE_WRITE;
-
- start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
-
- ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
- if (unlikely(ret)) {
- /* clear the page */
- dma_pte_clear_range(domain, start_vpfn,
- start_vpfn + size - 1);
- /* free page tables */
- dma_pte_free_pagetable(domain, start_vpfn,
- start_vpfn + size - 1);
- /* free iova */
- __free_iova(&domain->iovad, iova);
- return 0;
- }
-
- /* it's a non-present to present mapping. Only flush if caching mode */
- if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
- else
- iommu_flush_write_buffer(iommu);
-
- return nelems;
-}
-
-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return !dma_addr;
-}
-
-struct dma_map_ops intel_dma_ops = {
- .alloc_coherent = intel_alloc_coherent,
- .free_coherent = intel_free_coherent,
- .map_sg = intel_map_sg,
- .unmap_sg = intel_unmap_sg,
- .map_page = intel_map_page,
- .unmap_page = intel_unmap_page,
- .mapping_error = intel_mapping_error,
-};
-
-static inline int iommu_domain_cache_init(void)
-{
- int ret = 0;
-
- iommu_domain_cache = kmem_cache_create("iommu_domain",
- sizeof(struct dmar_domain),
- 0,
- SLAB_HWCACHE_ALIGN,
-
- NULL);
- if (!iommu_domain_cache) {
- printk(KERN_ERR "Couldn't create iommu_domain cache\n");
- ret = -ENOMEM;
- }
-
- return ret;
-}
-
-static inline int iommu_devinfo_cache_init(void)
-{
- int ret = 0;
-
- iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
- sizeof(struct device_domain_info),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!iommu_devinfo_cache) {
- printk(KERN_ERR "Couldn't create devinfo cache\n");
- ret = -ENOMEM;
- }
-
- return ret;
-}
-
-static inline int iommu_iova_cache_init(void)
-{
- int ret = 0;
-
- iommu_iova_cache = kmem_cache_create("iommu_iova",
- sizeof(struct iova),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!iommu_iova_cache) {
- printk(KERN_ERR "Couldn't create iova cache\n");
- ret = -ENOMEM;
- }
-
- return ret;
-}
-
-static int __init iommu_init_mempool(void)
-{
- int ret;
- ret = iommu_iova_cache_init();
- if (ret)
- return ret;
-
- ret = iommu_domain_cache_init();
- if (ret)
- goto domain_error;
-
- ret = iommu_devinfo_cache_init();
- if (!ret)
- return ret;
-
- kmem_cache_destroy(iommu_domain_cache);
-domain_error:
- kmem_cache_destroy(iommu_iova_cache);
-
- return -ENOMEM;
-}
-
-static void __init iommu_exit_mempool(void)
-{
- kmem_cache_destroy(iommu_devinfo_cache);
- kmem_cache_destroy(iommu_domain_cache);
- kmem_cache_destroy(iommu_iova_cache);
-
-}
-
-static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
-{
- struct dmar_drhd_unit *drhd;
- u32 vtbar;
- int rc;
-
- /* We know that this device on this chipset has its own IOMMU.
- * If we find it under a different IOMMU, then the BIOS is lying
- * to us. Hope that the IOMMU for this device is actually
- * disabled, and it needs no translation...
- */
- rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
- if (rc) {
- /* "can't" happen */
- dev_info(&pdev->dev, "failed to run vt-d quirk\n");
- return;
- }
- vtbar &= 0xffff0000;
-
- /* we know that the this iommu should be at offset 0xa000 from vtbar */
- drhd = dmar_find_matched_drhd_unit(pdev);
- if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
- TAINT_FIRMWARE_WORKAROUND,
- "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
- pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
-}
-DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
-
-static void __init init_no_remapping_devices(void)
-{
- struct dmar_drhd_unit *drhd;
-
- for_each_drhd_unit(drhd) {
- if (!drhd->include_all) {
- int i;
- for (i = 0; i < drhd->devices_cnt; i++)
- if (drhd->devices[i] != NULL)
- break;
- /* ignore DMAR unit if no pci devices exist */
- if (i == drhd->devices_cnt)
- drhd->ignored = 1;
- }
- }
-
- if (dmar_map_gfx)
- return;
-
- for_each_drhd_unit(drhd) {
- int i;
- if (drhd->ignored || drhd->include_all)
- continue;
-
- for (i = 0; i < drhd->devices_cnt; i++)
- if (drhd->devices[i] &&
- !IS_GFX_DEVICE(drhd->devices[i]))
- break;
-
- if (i < drhd->devices_cnt)
- continue;
-
- /* bypass IOMMU if it is just for gfx devices */
- drhd->ignored = 1;
- for (i = 0; i < drhd->devices_cnt; i++) {
- if (!drhd->devices[i])
- continue;
- drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
- }
- }
-}
-
-#ifdef CONFIG_SUSPEND
-static int init_iommu_hw(void)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu = NULL;
-
- for_each_active_iommu(iommu, drhd)
- if (iommu->qi)
- dmar_reenable_qi(iommu);
-
- for_each_active_iommu(iommu, drhd) {
- iommu_flush_write_buffer(iommu);
-
- iommu_set_root_entry(iommu);
-
- iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH);
- iommu_enable_translation(iommu);
- iommu_disable_protect_mem_regions(iommu);
- }
-
- return 0;
-}
-
-static void iommu_flush_all(void)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
-
- for_each_active_iommu(iommu, drhd) {
- iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH);
- }
-}
-
-static int iommu_suspend(struct sys_device *dev, pm_message_t state)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu = NULL;
- unsigned long flag;
-
- for_each_active_iommu(iommu, drhd) {
- iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
- GFP_ATOMIC);
- if (!iommu->iommu_state)
- goto nomem;
- }
-
- iommu_flush_all();
-
- for_each_active_iommu(iommu, drhd) {
- iommu_disable_translation(iommu);
-
- spin_lock_irqsave(&iommu->register_lock, flag);
-
- iommu->iommu_state[SR_DMAR_FECTL_REG] =
- readl(iommu->reg + DMAR_FECTL_REG);
- iommu->iommu_state[SR_DMAR_FEDATA_REG] =
- readl(iommu->reg + DMAR_FEDATA_REG);
- iommu->iommu_state[SR_DMAR_FEADDR_REG] =
- readl(iommu->reg + DMAR_FEADDR_REG);
- iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
- readl(iommu->reg + DMAR_FEUADDR_REG);
-
- spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
- return 0;
-
-nomem:
- for_each_active_iommu(iommu, drhd)
- kfree(iommu->iommu_state);
-
- return -ENOMEM;
-}
-
-static int iommu_resume(struct sys_device *dev)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu = NULL;
- unsigned long flag;
-
- if (init_iommu_hw()) {
- WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
- return -EIO;
- }
-
- for_each_active_iommu(iommu, drhd) {
-
- spin_lock_irqsave(&iommu->register_lock, flag);
-
- writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
- iommu->reg + DMAR_FECTL_REG);
- writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
- iommu->reg + DMAR_FEDATA_REG);
- writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
- iommu->reg + DMAR_FEADDR_REG);
- writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
- iommu->reg + DMAR_FEUADDR_REG);
-
- spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
-
- for_each_active_iommu(iommu, drhd)
- kfree(iommu->iommu_state);
-
- return 0;
-}
-
-static struct sysdev_class iommu_sysclass = {
- .name = "iommu",
- .resume = iommu_resume,
- .suspend = iommu_suspend,
-};
-
-static struct sys_device device_iommu = {
- .cls = &iommu_sysclass,
-};
-
-static int __init init_iommu_sysfs(void)
-{
- int error;
-
- error = sysdev_class_register(&iommu_sysclass);
- if (error)
- return error;
-
- error = sysdev_register(&device_iommu);
- if (error)
- sysdev_class_unregister(&iommu_sysclass);
-
- return error;
-}
-
-#else
-static int __init init_iommu_sysfs(void)
-{
- return 0;
-}
-#endif /* CONFIG_PM */
-
-/*
- * Here we only respond to action of unbound device from driver.
- *
- * Added device is not attached to its DMAR domain here yet. That will happen
- * when mapping the device to iova.
- */
-static int device_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct device *dev = data;
- struct pci_dev *pdev = to_pci_dev(dev);
- struct dmar_domain *domain;
-
- if (iommu_no_mapping(dev))
- return 0;
-
- domain = find_domain(pdev);
- if (!domain)
- return 0;
-
- if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through)
- domain_remove_one_dev_info(domain, pdev);
-
- return 0;
-}
-
-static struct notifier_block device_nb = {
- .notifier_call = device_notifier,
-};
-
-int __init intel_iommu_init(void)
-{
- int ret = 0;
- int force_on = 0;
-
- /* VT-d is required for a TXT/tboot launch, so enforce that */
- force_on = tboot_force_iommu();
-
- if (dmar_table_init()) {
- if (force_on)
- panic("tboot: Failed to initialize DMAR table\n");
- return -ENODEV;
- }
-
- if (dmar_dev_scope_init()) {
- if (force_on)
- panic("tboot: Failed to initialize DMAR device scope\n");
- return -ENODEV;
- }
-
- /*
- * Check the need for DMA-remapping initialization now.
- * Above initialization will also be used by Interrupt-remapping.
- */
- if (no_iommu || dmar_disabled)
- return -ENODEV;
-
- iommu_init_mempool();
- dmar_init_reserved_ranges();
-
- init_no_remapping_devices();
-
- ret = init_dmars();
- if (ret) {
- if (force_on)
- panic("tboot: Failed to initialize DMARs\n");
- printk(KERN_ERR "IOMMU: dmar init failed\n");
- put_iova_domain(&reserved_iova_list);
- iommu_exit_mempool();
- return ret;
- }
- printk(KERN_INFO
- "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
-
- init_timer(&unmap_timer);
-#ifdef CONFIG_SWIOTLB
- swiotlb = 0;
-#endif
- dma_ops = &intel_dma_ops;
-
- init_iommu_sysfs();
-
- register_iommu(&intel_iommu_ops);
-
- bus_register_notifier(&pci_bus_type, &device_nb);
-
- return 0;
-}
-
-static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
- struct pci_dev *pdev)
-{
- struct pci_dev *tmp, *parent;
-
- if (!iommu || !pdev)
- return;
-
- /* dependent device detach */
- tmp = pci_find_upstream_pcie_bridge(pdev);
- /* Secondary interface's bus number and devfn 0 */
- if (tmp) {
- parent = pdev->bus->self;
- while (parent != tmp) {
- iommu_detach_dev(iommu, parent->bus->number,
- parent->devfn);
- parent = parent->bus->self;
- }
- if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
- iommu_detach_dev(iommu,
- tmp->subordinate->number, 0);
- else /* this is a legacy PCI bridge */
- iommu_detach_dev(iommu, tmp->bus->number,
- tmp->devfn);
- }
-}
-
-static void domain_remove_one_dev_info(struct dmar_domain *domain,
- struct pci_dev *pdev)
-{
- struct device_domain_info *info;
- struct intel_iommu *iommu;
- unsigned long flags;
- int found = 0;
- struct list_head *entry, *tmp;
-
- iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
- pdev->devfn);
- if (!iommu)
- return;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_safe(entry, tmp, &domain->devices) {
- info = list_entry(entry, struct device_domain_info, link);
- /* No need to compare PCI domain; it has to be the same */
- if (info->bus == pdev->bus->number &&
- info->devfn == pdev->devfn) {
- list_del(&info->link);
- list_del(&info->global);
- if (info->dev)
- info->dev->dev.archdata.iommu = NULL;
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- iommu_disable_dev_iotlb(info);
- iommu_detach_dev(iommu, info->bus, info->devfn);
- iommu_detach_dependent_devices(iommu, pdev);
- free_devinfo_mem(info);
-
- spin_lock_irqsave(&device_domain_lock, flags);
-
- if (found)
- break;
- else
- continue;
- }
-
- /* if there is no other devices under the same iommu
- * owned by this domain, clear this iommu in iommu_bmp
- * update iommu count and coherency
- */
- if (iommu == device_to_iommu(info->segment, info->bus,
- info->devfn))
- found = 1;
- }
-
- if (found == 0) {
- unsigned long tmp_flags;
- spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
- clear_bit(iommu->seq_id, &domain->iommu_bmp);
- domain->iommu_count--;
- domain_update_iommu_cap(domain);
- spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
- }
-
- spin_unlock_irqrestore(&device_domain_lock, flags);
-}
-
-static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
-{
- struct device_domain_info *info;
- struct intel_iommu *iommu;
- unsigned long flags1, flags2;
-
- spin_lock_irqsave(&device_domain_lock, flags1);
- while (!list_empty(&domain->devices)) {
- info = list_entry(domain->devices.next,
- struct device_domain_info, link);
- list_del(&info->link);
- list_del(&info->global);
- if (info->dev)
- info->dev->dev.archdata.iommu = NULL;
-
- spin_unlock_irqrestore(&device_domain_lock, flags1);
-
- iommu_disable_dev_iotlb(info);
- iommu = device_to_iommu(info->segment, info->bus, info->devfn);
- iommu_detach_dev(iommu, info->bus, info->devfn);
- iommu_detach_dependent_devices(iommu, info->dev);
-
- /* clear this iommu in iommu_bmp, update iommu count
- * and capabilities
- */
- spin_lock_irqsave(&domain->iommu_lock, flags2);
- if (test_and_clear_bit(iommu->seq_id,
- &domain->iommu_bmp)) {
- domain->iommu_count--;
- domain_update_iommu_cap(domain);
- }
- spin_unlock_irqrestore(&domain->iommu_lock, flags2);
-
- free_devinfo_mem(info);
- spin_lock_irqsave(&device_domain_lock, flags1);
- }
- spin_unlock_irqrestore(&device_domain_lock, flags1);
-}
-
-/* domain id for virtual machine, it won't be set in context */
-static unsigned long vm_domid;
-
-static struct dmar_domain *iommu_alloc_vm_domain(void)
-{
- struct dmar_domain *domain;
-
- domain = alloc_domain_mem();
- if (!domain)
- return NULL;
-
- domain->id = vm_domid++;
- domain->nid = -1;
- memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
- domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
-
- return domain;
-}
-
-static int md_domain_init(struct dmar_domain *domain, int guest_width)
-{
- int adjust_width;
-
- init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
- spin_lock_init(&domain->iommu_lock);
-
- domain_reserve_special_ranges(domain);
-
- /* calculate AGAW */
- domain->gaw = guest_width;
- adjust_width = guestwidth_to_adjustwidth(guest_width);
- domain->agaw = width_to_agaw(adjust_width);
-
- INIT_LIST_HEAD(&domain->devices);
-
- domain->iommu_count = 0;
- domain->iommu_coherency = 0;
- domain->iommu_snooping = 0;
- domain->max_addr = 0;
- domain->nid = -1;
-
- /* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
- if (!domain->pgd)
- return -ENOMEM;
- domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
- return 0;
-}
-
-static void iommu_free_vm_domain(struct dmar_domain *domain)
-{
- unsigned long flags;
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- unsigned long i;
- unsigned long ndomains;
-
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
- iommu = drhd->iommu;
-
- ndomains = cap_ndoms(iommu->cap);
- for_each_set_bit(i, iommu->domain_ids, ndomains) {
- if (iommu->domains[i] == domain) {
- spin_lock_irqsave(&iommu->lock, flags);
- clear_bit(i, iommu->domain_ids);
- iommu->domains[i] = NULL;
- spin_unlock_irqrestore(&iommu->lock, flags);
- break;
- }
- }
- }
-}
-
-static void vm_domain_exit(struct dmar_domain *domain)
-{
- /* Domain 0 is reserved, so dont process it */
- if (!domain)
- return;
-
- vm_domain_remove_all_dev_info(domain);
- /* destroy iovas */
- put_iova_domain(&domain->iovad);
-
- /* clear ptes */
- dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
-
- /* free page tables */
- dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
-
- iommu_free_vm_domain(domain);
- free_domain_mem(domain);
-}
-
-static int intel_iommu_domain_init(struct iommu_domain *domain)
-{
- struct dmar_domain *dmar_domain;
-
- dmar_domain = iommu_alloc_vm_domain();
- if (!dmar_domain) {
- printk(KERN_ERR
- "intel_iommu_domain_init: dmar_domain == NULL\n");
- return -ENOMEM;
- }
- if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
- printk(KERN_ERR
- "intel_iommu_domain_init() failed\n");
- vm_domain_exit(dmar_domain);
- return -ENOMEM;
- }
- domain->priv = dmar_domain;
-
- return 0;
-}
-
-static void intel_iommu_domain_destroy(struct iommu_domain *domain)
-{
- struct dmar_domain *dmar_domain = domain->priv;
-
- domain->priv = NULL;
- vm_domain_exit(dmar_domain);
-}
-
-static int intel_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
-{
- struct dmar_domain *dmar_domain = domain->priv;
- struct pci_dev *pdev = to_pci_dev(dev);
- struct intel_iommu *iommu;
- int addr_width;
-
- /* normally pdev is not mapped */
- if (unlikely(domain_context_mapped(pdev))) {
- struct dmar_domain *old_domain;
-
- old_domain = find_domain(pdev);
- if (old_domain) {
- if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
- dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
- domain_remove_one_dev_info(old_domain, pdev);
- else
- domain_remove_dev_info(old_domain);
- }
- }
-
- iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
- pdev->devfn);
- if (!iommu)
- return -ENODEV;
-
- /* check if this iommu agaw is sufficient for max mapped address */
- addr_width = agaw_to_width(iommu->agaw);
- if (addr_width > cap_mgaw(iommu->cap))
- addr_width = cap_mgaw(iommu->cap);
-
- if (dmar_domain->max_addr > (1LL << addr_width)) {
- printk(KERN_ERR "%s: iommu width (%d) is not "
- "sufficient for the mapped address (%llx)\n",
- __func__, addr_width, dmar_domain->max_addr);
- return -EFAULT;
- }
- dmar_domain->gaw = addr_width;
-
- /*
- * Knock out extra levels of page tables if necessary
- */
- while (iommu->agaw < dmar_domain->agaw) {
- struct dma_pte *pte;
-
- pte = dmar_domain->pgd;
- if (dma_pte_present(pte)) {
- free_pgtable_page(dmar_domain->pgd);
- dmar_domain->pgd = (struct dma_pte *)
- phys_to_virt(dma_pte_addr(pte));
- }
- dmar_domain->agaw--;
- }
-
- return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
-}
-
-static void intel_iommu_detach_device(struct iommu_domain *domain,
- struct device *dev)
-{
- struct dmar_domain *dmar_domain = domain->priv;
- struct pci_dev *pdev = to_pci_dev(dev);
-
- domain_remove_one_dev_info(dmar_domain, pdev);
-}
-
-static int intel_iommu_map(struct iommu_domain *domain,
- unsigned long iova, phys_addr_t hpa,
- int gfp_order, int iommu_prot)
-{
- struct dmar_domain *dmar_domain = domain->priv;
- u64 max_addr;
- int prot = 0;
- size_t size;
- int ret;
-
- if (iommu_prot & IOMMU_READ)
- prot |= DMA_PTE_READ;
- if (iommu_prot & IOMMU_WRITE)
- prot |= DMA_PTE_WRITE;
- if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
- prot |= DMA_PTE_SNP;
-
- size = PAGE_SIZE << gfp_order;
- max_addr = iova + size;
- if (dmar_domain->max_addr < max_addr) {
- u64 end;
-
- /* check if minimum agaw is sufficient for mapped address */
- end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
- if (end < max_addr) {
- printk(KERN_ERR "%s: iommu width (%d) is not "
- "sufficient for the mapped address (%llx)\n",
- __func__, dmar_domain->gaw, max_addr);
- return -EFAULT;
- }
- dmar_domain->max_addr = max_addr;
- }
- /* Round up size to next multiple of PAGE_SIZE, if it and
- the low bits of hpa would take us onto the next page */
- size = aligned_nrpages(hpa, size);
- ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
- hpa >> VTD_PAGE_SHIFT, size, prot);
- return ret;
-}
-
-static int intel_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, int gfp_order)
-{
- struct dmar_domain *dmar_domain = domain->priv;
- size_t size = PAGE_SIZE << gfp_order;
-
- dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
- (iova + size - 1) >> VTD_PAGE_SHIFT);
-
- if (dmar_domain->max_addr == iova + size)
- dmar_domain->max_addr = iova;
-
- return gfp_order;
-}
-
-static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long iova)
-{
- struct dmar_domain *dmar_domain = domain->priv;
- struct dma_pte *pte;
- u64 phys = 0;
-
- pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
- if (pte)
- phys = dma_pte_addr(pte);
-
- return phys;
-}
-
-static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
- unsigned long cap)
-{
- struct dmar_domain *dmar_domain = domain->priv;
-
- if (cap == IOMMU_CAP_CACHE_COHERENCY)
- return dmar_domain->iommu_snooping;
- if (cap == IOMMU_CAP_INTR_REMAP)
- return intr_remapping_enabled;
-
- return 0;
-}
-
-static struct iommu_ops intel_iommu_ops = {
- .domain_init = intel_iommu_domain_init,
- .domain_destroy = intel_iommu_domain_destroy,
- .attach_dev = intel_iommu_attach_device,
- .detach_dev = intel_iommu_detach_device,
- .map = intel_iommu_map,
- .unmap = intel_iommu_unmap,
- .iova_to_phys = intel_iommu_iova_to_phys,
- .domain_has_cap = intel_iommu_domain_has_cap,
-};
-
-static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
-{
- /*
- * Mobile 4 Series Chipset neglects to set RWBF capability,
- * but needs it:
- */
- printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
- rwbf_quirk = 1;
-
- /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
- if (dev->revision == 0x07) {
- printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
- dmar_map_gfx = 0;
- }
-}
-
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
-
-#define GGC 0x52
-#define GGC_MEMORY_SIZE_MASK (0xf << 8)
-#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
-#define GGC_MEMORY_SIZE_1M (0x1 << 8)
-#define GGC_MEMORY_SIZE_2M (0x3 << 8)
-#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
-#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
-#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
-#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
-
-static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
-{
- unsigned short ggc;
-
- if (pci_read_config_word(dev, GGC, &ggc))
- return;
-
- if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
- printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
- dmar_map_gfx = 0;
- }
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
-
-/* On Tylersburg chipsets, some BIOSes have been known to enable the
- ISOCH DMAR unit for the Azalia sound device, but not give it any
- TLB entries, which causes it to deadlock. Check for that. We do
- this in a function called from init_dmars(), instead of in a PCI
- quirk, because we don't want to print the obnoxious "BIOS broken"
- message if VT-d is actually disabled.
-*/
-static void __init check_tylersburg_isoch(void)
-{
- struct pci_dev *pdev;
- uint32_t vtisochctrl;
-
- /* If there's no Azalia in the system anyway, forget it. */
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
- if (!pdev)
- return;
- pci_dev_put(pdev);
-
- /* System Management Registers. Might be hidden, in which case
- we can't do the sanity check. But that's OK, because the
- known-broken BIOSes _don't_ actually hide it, so far. */
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
- if (!pdev)
- return;
-
- if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
- pci_dev_put(pdev);
- return;
- }
-
- pci_dev_put(pdev);
-
- /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
- if (vtisochctrl & 1)
- return;
-
- /* Drop all bits other than the number of TLB entries */
- vtisochctrl &= 0x1c;
-
- /* If we have the recommended number of TLB entries (16), fine. */
- if (vtisochctrl == 0x10)
- return;
-
- /* Zero TLB entries? You get to ride the short bus to school. */
- if (!vtisochctrl) {
- WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- iommu_identity_mapping |= IDENTMAP_AZALIA;
- return;
- }
-
- printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
- vtisochctrl);
-}
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
deleted file mode 100644
index ec87cd66f3e..00000000000
--- a/drivers/pci/intr_remapping.c
+++ /dev/null
@@ -1,798 +0,0 @@
-#include <linux/interrupt.h>
-#include <linux/dmar.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/hpet.h>
-#include <linux/pci.h>
-#include <linux/irq.h>
-#include <asm/io_apic.h>
-#include <asm/smp.h>
-#include <asm/cpu.h>
-#include <linux/intel-iommu.h>
-#include "intr_remapping.h"
-#include <acpi/acpi.h>
-#include <asm/pci-direct.h>
-#include "pci.h"
-
-static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
-static struct hpet_scope ir_hpet[MAX_HPET_TBS];
-static int ir_ioapic_num, ir_hpet_num;
-int intr_remapping_enabled;
-
-static int disable_intremap;
-static int disable_sourceid_checking;
-
-static __init int setup_nointremap(char *str)
-{
- disable_intremap = 1;
- return 0;
-}
-early_param("nointremap", setup_nointremap);
-
-static __init int setup_intremap(char *str)
-{
- if (!str)
- return -EINVAL;
-
- if (!strncmp(str, "on", 2))
- disable_intremap = 0;
- else if (!strncmp(str, "off", 3))
- disable_intremap = 1;
- else if (!strncmp(str, "nosid", 5))
- disable_sourceid_checking = 1;
-
- return 0;
-}
-early_param("intremap", setup_intremap);
-
-static DEFINE_SPINLOCK(irq_2_ir_lock);
-
-static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
-{
- struct irq_cfg *cfg = get_irq_chip_data(irq);
- return cfg ? &cfg->irq_2_iommu : NULL;
-}
-
-int get_irte(int irq, struct irte *entry)
-{
- struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
- unsigned long flags;
- int index;
-
- if (!entry || !irq_iommu)
- return -1;
-
- spin_lock_irqsave(&irq_2_ir_lock, flags);
-
- index = irq_iommu->irte_index + irq_iommu->sub_handle;
- *entry = *(irq_iommu->iommu->ir_table->base + index);
-
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
- return 0;
-}
-
-int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
-{
- struct ir_table *table = iommu->ir_table;
- struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
- u16 index, start_index;
- unsigned int mask = 0;
- unsigned long flags;
- int i;
-
- if (!count || !irq_iommu)
- return -1;
-
- /*
- * start the IRTE search from index 0.
- */
- index = start_index = 0;
-
- if (count > 1) {
- count = __roundup_pow_of_two(count);
- mask = ilog2(count);
- }
-
- if (mask > ecap_max_handle_mask(iommu->ecap)) {
- printk(KERN_ERR
- "Requested mask %x exceeds the max invalidation handle"
- " mask value %Lx\n", mask,
- ecap_max_handle_mask(iommu->ecap));
- return -1;
- }
-
- spin_lock_irqsave(&irq_2_ir_lock, flags);
- do {
- for (i = index; i < index + count; i++)
- if (table->base[i].present)
- break;
- /* empty index found */
- if (i == index + count)
- break;
-
- index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
-
- if (index == start_index) {
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
- printk(KERN_ERR "can't allocate an IRTE\n");
- return -1;
- }
- } while (1);
-
- for (i = index; i < index + count; i++)
- table->base[i].present = 1;
-
- irq_iommu->iommu = iommu;
- irq_iommu->irte_index = index;
- irq_iommu->sub_handle = 0;
- irq_iommu->irte_mask = mask;
-
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-
- return index;
-}
-
-static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
-{
- struct qi_desc desc;
-
- desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
- | QI_IEC_SELECTIVE;
- desc.high = 0;
-
- return qi_submit_sync(&desc, iommu);
-}
-
-int map_irq_to_irte_handle(int irq, u16 *sub_handle)
-{
- struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
- unsigned long flags;
- int index;
-
- if (!irq_iommu)
- return -1;
-
- spin_lock_irqsave(&irq_2_ir_lock, flags);
- *sub_handle = irq_iommu->sub_handle;
- index = irq_iommu->irte_index;
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
- return index;
-}
-
-int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
-{
- struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
- unsigned long flags;
-
- if (!irq_iommu)
- return -1;
-
- spin_lock_irqsave(&irq_2_ir_lock, flags);
-
- irq_iommu->iommu = iommu;
- irq_iommu->irte_index = index;
- irq_iommu->sub_handle = subhandle;
- irq_iommu->irte_mask = 0;
-
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-
- return 0;
-}
-
-int modify_irte(int irq, struct irte *irte_modified)
-{
- struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
- struct intel_iommu *iommu;
- unsigned long flags;
- struct irte *irte;
- int rc, index;
-
- if (!irq_iommu)
- return -1;
-
- spin_lock_irqsave(&irq_2_ir_lock, flags);
-
- iommu = irq_iommu->iommu;
-
- index = irq_iommu->irte_index + irq_iommu->sub_handle;
- irte = &iommu->ir_table->base[index];
-
- set_64bit(&irte->low, irte_modified->low);
- set_64bit(&irte->high, irte_modified->high);
- __iommu_flush_cache(iommu, irte, sizeof(*irte));
-
- rc = qi_flush_iec(iommu, index, 0);
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-
- return rc;
-}
-
-struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
-{
- int i;
-
- for (i = 0; i < MAX_HPET_TBS; i++)
- if (ir_hpet[i].id == hpet_id)
- return ir_hpet[i].iommu;
- return NULL;
-}
-
-struct intel_iommu *map_ioapic_to_ir(int apic)
-{
- int i;
-
- for (i = 0; i < MAX_IO_APICS; i++)
- if (ir_ioapic[i].id == apic)
- return ir_ioapic[i].iommu;
- return NULL;
-}
-
-struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
-{
- struct dmar_drhd_unit *drhd;
-
- drhd = dmar_find_matched_drhd_unit(dev);
- if (!drhd)
- return NULL;
-
- return drhd->iommu;
-}
-
-static int clear_entries(struct irq_2_iommu *irq_iommu)
-{
- struct irte *start, *entry, *end;
- struct intel_iommu *iommu;
- int index;
-
- if (irq_iommu->sub_handle)
- return 0;
-
- iommu = irq_iommu->iommu;
- index = irq_iommu->irte_index + irq_iommu->sub_handle;
-
- start = iommu->ir_table->base + index;
- end = start + (1 << irq_iommu->irte_mask);
-
- for (entry = start; entry < end; entry++) {
- set_64bit(&entry->low, 0);
- set_64bit(&entry->high, 0);
- }
-
- return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
-}
-
-int free_irte(int irq)
-{
- struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
- unsigned long flags;
- int rc;
-
- if (!irq_iommu)
- return -1;
-
- spin_lock_irqsave(&irq_2_ir_lock, flags);
-
- rc = clear_entries(irq_iommu);
-
- irq_iommu->iommu = NULL;
- irq_iommu->irte_index = 0;
- irq_iommu->sub_handle = 0;
- irq_iommu->irte_mask = 0;
-
- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-
- return rc;
-}
-
-/*
- * source validation type
- */
-#define SVT_NO_VERIFY 0x0 /* no verification is required */
-#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
-#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
-
-/*
- * source-id qualifier
- */
-#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
-#define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
- * the third least significant bit
- */
-#define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
- * the second and third least significant bits
- */
-#define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
- * the least three significant bits
- */
-
-/*
- * set SVT, SQ and SID fields of irte to verify
- * source ids of interrupt requests
- */
-static void set_irte_sid(struct irte *irte, unsigned int svt,
- unsigned int sq, unsigned int sid)
-{
- if (disable_sourceid_checking)
- svt = SVT_NO_VERIFY;
- irte->svt = svt;
- irte->sq = sq;
- irte->sid = sid;
-}
-
-int set_ioapic_sid(struct irte *irte, int apic)
-{
- int i;
- u16 sid = 0;
-
- if (!irte)
- return -1;
-
- for (i = 0; i < MAX_IO_APICS; i++) {
- if (ir_ioapic[i].id == apic) {
- sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
- break;
- }
- }
-
- if (sid == 0) {
- pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
- return -1;
- }
-
- set_irte_sid(irte, 1, 0, sid);
-
- return 0;
-}
-
-int set_hpet_sid(struct irte *irte, u8 id)
-{
- int i;
- u16 sid = 0;
-
- if (!irte)
- return -1;
-
- for (i = 0; i < MAX_HPET_TBS; i++) {
- if (ir_hpet[i].id == id) {
- sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
- break;
- }
- }
-
- if (sid == 0) {
- pr_warning("Failed to set source-id of HPET block (%d)\n", id);
- return -1;
- }
-
- /*
- * Should really use SQ_ALL_16. Some platforms are broken.
- * While we figure out the right quirks for these broken platforms, use
- * SQ_13_IGNORE_3 for now.
- */
- set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
-
- return 0;
-}
-
-int set_msi_sid(struct irte *irte, struct pci_dev *dev)
-{
- struct pci_dev *bridge;
-
- if (!irte || !dev)
- return -1;
-
- /* PCIe device or Root Complex integrated PCI device */
- if (pci_is_pcie(dev) || !dev->bus->parent) {
- set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
- (dev->bus->number << 8) | dev->devfn);
- return 0;
- }
-
- bridge = pci_find_upstream_pcie_bridge(dev);
- if (bridge) {
- if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
- set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
- (bridge->bus->number << 8) | dev->bus->number);
- else /* this is a legacy PCI bridge */
- set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
- (bridge->bus->number << 8) | bridge->devfn);
- }
-
- return 0;
-}
-
-static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
-{
- u64 addr;
- u32 sts;
- unsigned long flags;
-
- addr = virt_to_phys((void *)iommu->ir_table->base);
-
- spin_lock_irqsave(&iommu->register_lock, flags);
-
- dmar_writeq(iommu->reg + DMAR_IRTA_REG,
- (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
-
- /* Set interrupt-remapping table pointer */
- iommu->gcmd |= DMA_GCMD_SIRTP;
- writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_IRTPS), sts);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
-
- /*
- * global invalidation of interrupt entry cache before enabling
- * interrupt-remapping.
- */
- qi_global_iec(iommu);
-
- spin_lock_irqsave(&iommu->register_lock, flags);
-
- /* Enable interrupt-remapping */
- iommu->gcmd |= DMA_GCMD_IRE;
- writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_IRES), sts);
-
- spin_unlock_irqrestore(&iommu->register_lock, flags);
-}
-
-
-static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
-{
- struct ir_table *ir_table;
- struct page *pages;
-
- ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
- GFP_ATOMIC);
-
- if (!iommu->ir_table)
- return -ENOMEM;
-
- pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
- INTR_REMAP_PAGE_ORDER);
-
- if (!pages) {
- printk(KERN_ERR "failed to allocate pages of order %d\n",
- INTR_REMAP_PAGE_ORDER);
- kfree(iommu->ir_table);
- return -ENOMEM;
- }
-
- ir_table->base = page_address(pages);
-
- iommu_set_intr_remapping(iommu, mode);
- return 0;
-}
-
-/*
- * Disable Interrupt Remapping.
- */
-static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
-{
- unsigned long flags;
- u32 sts;
-
- if (!ecap_ir_support(iommu->ecap))
- return;
-
- /*
- * global invalidation of interrupt entry cache before disabling
- * interrupt-remapping.
- */
- qi_global_iec(iommu);
-
- spin_lock_irqsave(&iommu->register_lock, flags);
-
- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
- if (!(sts & DMA_GSTS_IRES))
- goto end;
-
- iommu->gcmd &= ~DMA_GCMD_IRE;
- writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, !(sts & DMA_GSTS_IRES), sts);
-
-end:
- spin_unlock_irqrestore(&iommu->register_lock, flags);
-}
-
-int __init intr_remapping_supported(void)
-{
- struct dmar_drhd_unit *drhd;
-
- if (disable_intremap)
- return 0;
-
- if (!dmar_ir_support())
- return 0;
-
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
- if (!ecap_ir_support(iommu->ecap))
- return 0;
- }
-
- return 1;
-}
-
-int __init enable_intr_remapping(int eim)
-{
- struct dmar_drhd_unit *drhd;
- int setup = 0;
-
- if (parse_ioapics_under_ir() != 1) {
- printk(KERN_INFO "Not enable interrupt remapping\n");
- return -1;
- }
-
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
- /*
- * If the queued invalidation is already initialized,
- * shouldn't disable it.
- */
- if (iommu->qi)
- continue;
-
- /*
- * Clear previous faults.
- */
- dmar_fault(-1, iommu);
-
- /*
- * Disable intr remapping and queued invalidation, if already
- * enabled prior to OS handover.
- */
- iommu_disable_intr_remapping(iommu);
-
- dmar_disable_qi(iommu);
- }
-
- /*
- * check for the Interrupt-remapping support
- */
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
- if (!ecap_ir_support(iommu->ecap))
- continue;
-
- if (eim && !ecap_eim_support(iommu->ecap)) {
- printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
- " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
- return -1;
- }
- }
-
- /*
- * Enable queued invalidation for all the DRHD's.
- */
- for_each_drhd_unit(drhd) {
- int ret;
- struct intel_iommu *iommu = drhd->iommu;
- ret = dmar_enable_qi(iommu);
-
- if (ret) {
- printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
- " invalidation, ecap %Lx, ret %d\n",
- drhd->reg_base_addr, iommu->ecap, ret);
- return -1;
- }
- }
-
- /*
- * Setup Interrupt-remapping for all the DRHD's now.
- */
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
- if (!ecap_ir_support(iommu->ecap))
- continue;
-
- if (setup_intr_remapping(iommu, eim))
- goto error;
-
- setup = 1;
- }
-
- if (!setup)
- goto error;
-
- intr_remapping_enabled = 1;
-
- return 0;
-
-error:
- /*
- * handle error condition gracefully here!
- */
- return -1;
-}
-
-static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
- struct intel_iommu *iommu)
-{
- struct acpi_dmar_pci_path *path;
- u8 bus;
- int count;
-
- bus = scope->bus;
- path = (struct acpi_dmar_pci_path *)(scope + 1);
- count = (scope->length - sizeof(struct acpi_dmar_device_scope))
- / sizeof(struct acpi_dmar_pci_path);
-
- while (--count > 0) {
- /*
- * Access PCI directly due to the PCI
- * subsystem isn't initialized yet.
- */
- bus = read_pci_config_byte(bus, path->dev, path->fn,
- PCI_SECONDARY_BUS);
- path++;
- }
- ir_hpet[ir_hpet_num].bus = bus;
- ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
- ir_hpet[ir_hpet_num].iommu = iommu;
- ir_hpet[ir_hpet_num].id = scope->enumeration_id;
- ir_hpet_num++;
-}
-
-static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
- struct intel_iommu *iommu)
-{
- struct acpi_dmar_pci_path *path;
- u8 bus;
- int count;
-
- bus = scope->bus;
- path = (struct acpi_dmar_pci_path *)(scope + 1);
- count = (scope->length - sizeof(struct acpi_dmar_device_scope))
- / sizeof(struct acpi_dmar_pci_path);
-
- while (--count > 0) {
- /*
- * Access PCI directly due to the PCI
- * subsystem isn't initialized yet.
- */
- bus = read_pci_config_byte(bus, path->dev, path->fn,
- PCI_SECONDARY_BUS);
- path++;
- }
-
- ir_ioapic[ir_ioapic_num].bus = bus;
- ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
- ir_ioapic[ir_ioapic_num].iommu = iommu;
- ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
- ir_ioapic_num++;
-}
-
-static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
- struct intel_iommu *iommu)
-{
- struct acpi_dmar_hardware_unit *drhd;
- struct acpi_dmar_device_scope *scope;
- void *start, *end;
-
- drhd = (struct acpi_dmar_hardware_unit *)header;
-
- start = (void *)(drhd + 1);
- end = ((void *)drhd) + header->length;
-
- while (start < end) {
- scope = start;
- if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
- if (ir_ioapic_num == MAX_IO_APICS) {
- printk(KERN_WARNING "Exceeded Max IO APICS\n");
- return -1;
- }
-
- printk(KERN_INFO "IOAPIC id %d under DRHD base "
- " 0x%Lx IOMMU %d\n", scope->enumeration_id,
- drhd->address, iommu->seq_id);
-
- ir_parse_one_ioapic_scope(scope, iommu);
- } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
- if (ir_hpet_num == MAX_HPET_TBS) {
- printk(KERN_WARNING "Exceeded Max HPET blocks\n");
- return -1;
- }
-
- printk(KERN_INFO "HPET id %d under DRHD base"
- " 0x%Lx\n", scope->enumeration_id,
- drhd->address);
-
- ir_parse_one_hpet_scope(scope, iommu);
- }
- start += scope->length;
- }
-
- return 0;
-}
-
-/*
- * Finds the assocaition between IOAPIC's and its Interrupt-remapping
- * hardware unit.
- */
-int __init parse_ioapics_under_ir(void)
-{
- struct dmar_drhd_unit *drhd;
- int ir_supported = 0;
-
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
- if (ecap_ir_support(iommu->ecap)) {
- if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
- return -1;
-
- ir_supported = 1;
- }
- }
-
- if (ir_supported && ir_ioapic_num != nr_ioapics) {
- printk(KERN_WARNING
- "Not all IO-APIC's listed under remapping hardware\n");
- return -1;
- }
-
- return ir_supported;
-}
-
-void disable_intr_remapping(void)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu = NULL;
-
- /*
- * Disable Interrupt-remapping for all the DRHD's now.
- */
- for_each_iommu(iommu, drhd) {
- if (!ecap_ir_support(iommu->ecap))
- continue;
-
- iommu_disable_intr_remapping(iommu);
- }
-}
-
-int reenable_intr_remapping(int eim)
-{
- struct dmar_drhd_unit *drhd;
- int setup = 0;
- struct intel_iommu *iommu = NULL;
-
- for_each_iommu(iommu, drhd)
- if (iommu->qi)
- dmar_reenable_qi(iommu);
-
- /*
- * Setup Interrupt-remapping for all the DRHD's now.
- */
- for_each_iommu(iommu, drhd) {
- if (!ecap_ir_support(iommu->ecap))
- continue;
-
- /* Set up interrupt remapping for iommu.*/
- iommu_set_intr_remapping(iommu, eim);
- setup = 1;
- }
-
- if (!setup)
- goto error;
-
- return 0;
-
-error:
- /*
- * handle error condition gracefully here!
- */
- return -1;
-}
-
diff --git a/drivers/pci/intr_remapping.h b/drivers/pci/intr_remapping.h
deleted file mode 100644
index 5662fecfee6..00000000000
--- a/drivers/pci/intr_remapping.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#include <linux/intel-iommu.h>
-
-struct ioapic_scope {
- struct intel_iommu *iommu;
- unsigned int id;
- unsigned int bus; /* PCI bus number */
- unsigned int devfn; /* PCI devfn number */
-};
-
-struct hpet_scope {
- struct intel_iommu *iommu;
- u8 id;
- unsigned int bus;
- unsigned int devfn;
-};
-
-#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
index 203508b227b..6b2b7dddbbd 100644
--- a/drivers/pci/ioapic.c
+++ b/drivers/pci/ioapic.c
@@ -17,9 +17,9 @@
*/
#include <linux/pci.h>
+#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <acpi/acpi_bus.h>
struct ioapic {
acpi_handle handle;
@@ -36,7 +36,7 @@ static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
char *type;
struct resource *res;
- handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ handle = ACPI_HANDLE(&dev->dev);
if (!handle)
return -EINVAL;
@@ -98,30 +98,24 @@ static void ioapic_remove(struct pci_dev *dev)
}
-static struct pci_device_id ioapic_devices[] = {
- { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_SYSTEM_PIC_IOAPIC << 8, 0xffff00, },
- { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_SYSTEM_PIC_IOXAPIC << 8, 0xffff00, },
+static DEFINE_PCI_DEVICE_TABLE(ioapic_devices) = {
+ { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOAPIC, ~0) },
+ { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOXAPIC, ~0) },
{ }
};
+MODULE_DEVICE_TABLE(pci, ioapic_devices);
static struct pci_driver ioapic_driver = {
.name = "ioapic",
.id_table = ioapic_devices,
.probe = ioapic_probe,
- .remove = __devexit_p(ioapic_remove),
+ .remove = ioapic_remove,
};
static int __init ioapic_init(void)
{
return pci_register_driver(&ioapic_driver);
}
-
-static void __exit ioapic_exit(void)
-{
- pci_unregister_driver(&ioapic_driver);
-}
-
module_init(ioapic_init);
-module_exit(ioapic_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 553d8ee55c1..cb6f24740ee 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -11,8 +11,10 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/mutex.h>
+#include <linux/export.h>
#include <linux/string.h>
#include <linux/delay.h>
+#include <linux/pci-ats.h>
#include "pci.h"
#define VIRTFN_ID_LEN 16
@@ -31,7 +33,6 @@ static inline u8 virtfn_devfn(struct pci_dev *dev, int id)
static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
{
- int rc;
struct pci_bus *child;
if (bus->number == busnr)
@@ -45,57 +46,45 @@ static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
if (!child)
return NULL;
- child->subordinate = busnr;
- child->dev.parent = bus->bridge;
- rc = pci_bus_add_child(child);
- if (rc) {
- pci_remove_bus(child);
- return NULL;
- }
+ pci_bus_insert_busn_res(child, busnr, busnr);
return child;
}
-static void virtfn_remove_bus(struct pci_bus *bus, int busnr)
+static void virtfn_remove_bus(struct pci_bus *physbus, struct pci_bus *virtbus)
{
- struct pci_bus *child;
-
- if (bus->number == busnr)
- return;
-
- child = pci_find_bus(pci_domain_nr(bus), busnr);
- BUG_ON(!child);
-
- if (list_empty(&child->devices))
- pci_remove_bus(child);
+ if (physbus != virtbus && list_empty(&virtbus->devices))
+ pci_remove_bus(virtbus);
}
static int virtfn_add(struct pci_dev *dev, int id, int reset)
{
int i;
- int rc;
+ int rc = -ENOMEM;
u64 size;
char buf[VIRTFN_ID_LEN];
struct pci_dev *virtfn;
struct resource *res;
struct pci_sriov *iov = dev->sriov;
+ struct pci_bus *bus;
+
+ mutex_lock(&iov->dev->sriov->lock);
+ bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id));
+ if (!bus)
+ goto failed;
- virtfn = alloc_pci_dev();
+ virtfn = pci_alloc_dev(bus);
if (!virtfn)
- return -ENOMEM;
+ goto failed0;
- mutex_lock(&iov->dev->sriov->lock);
- virtfn->bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id));
- if (!virtfn->bus) {
- kfree(virtfn);
- mutex_unlock(&iov->dev->sriov->lock);
- return -ENOMEM;
- }
virtfn->devfn = virtfn_devfn(dev, id);
virtfn->vendor = dev->vendor;
pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
pci_setup_device(virtfn);
virtfn->dev.parent = dev->dev.parent;
+ virtfn->physfn = pci_dev_get(dev);
+ virtfn->is_virtfn = 1;
+ virtfn->multifunction = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = dev->resource + PCI_IOV_RESOURCES + i;
@@ -104,7 +93,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
virtfn->resource[i].name = pci_name(virtfn);
virtfn->resource[i].flags = res->flags;
size = resource_size(res);
- do_div(size, iov->total);
+ do_div(size, iov->total_VFs);
virtfn->resource[i].start = res->start + size * id;
virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
rc = request_resource(res, &virtfn->resource[i]);
@@ -117,12 +106,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
pci_device_add(virtfn, virtfn->bus);
mutex_unlock(&iov->dev->sriov->lock);
- virtfn->physfn = pci_dev_get(dev);
- virtfn->is_virtfn = 1;
-
- rc = pci_bus_add_device(virtfn);
- if (rc)
- goto failed1;
+ pci_bus_add_device(virtfn);
sprintf(buf, "virtfn%u", id);
rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
if (rc)
@@ -140,8 +124,10 @@ failed2:
failed1:
pci_dev_put(dev);
mutex_lock(&iov->dev->sriov->lock);
- pci_remove_bus_device(virtfn);
- virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
+ pci_stop_and_remove_bus_device(virtfn);
+failed0:
+ virtfn_remove_bus(dev->bus, bus);
+failed:
mutex_unlock(&iov->dev->sriov->lock);
return rc;
@@ -150,20 +136,15 @@ failed1:
static void virtfn_remove(struct pci_dev *dev, int id, int reset)
{
char buf[VIRTFN_ID_LEN];
- struct pci_bus *bus;
struct pci_dev *virtfn;
struct pci_sriov *iov = dev->sriov;
- bus = pci_find_bus(pci_domain_nr(dev->bus), virtfn_bus(dev, id));
- if (!bus)
- return;
-
- virtfn = pci_get_slot(bus, virtfn_devfn(dev, id));
+ virtfn = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
+ virtfn_bus(dev, id),
+ virtfn_devfn(dev, id));
if (!virtfn)
return;
- pci_dev_put(virtfn);
-
if (reset) {
device_release_driver(&virtfn->dev);
__pci_reset_function(virtfn);
@@ -171,107 +152,24 @@ static void virtfn_remove(struct pci_dev *dev, int id, int reset)
sprintf(buf, "virtfn%u", id);
sysfs_remove_link(&dev->dev.kobj, buf);
- sysfs_remove_link(&virtfn->dev.kobj, "physfn");
+ /*
+ * pci_stop_dev() could have been called for this virtfn already,
+ * so the directory for the virtfn may have been removed before.
+ * Double check to avoid spurious sysfs warnings.
+ */
+ if (virtfn->dev.kobj.sd)
+ sysfs_remove_link(&virtfn->dev.kobj, "physfn");
mutex_lock(&iov->dev->sriov->lock);
- pci_remove_bus_device(virtfn);
- virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
+ pci_stop_and_remove_bus_device(virtfn);
+ virtfn_remove_bus(dev->bus, virtfn->bus);
mutex_unlock(&iov->dev->sriov->lock);
+ /* balance pci_get_domain_bus_and_slot() */
+ pci_dev_put(virtfn);
pci_dev_put(dev);
}
-static int sriov_migration(struct pci_dev *dev)
-{
- u16 status;
- struct pci_sriov *iov = dev->sriov;
-
- if (!iov->nr_virtfn)
- return 0;
-
- if (!(iov->cap & PCI_SRIOV_CAP_VFM))
- return 0;
-
- pci_read_config_word(dev, iov->pos + PCI_SRIOV_STATUS, &status);
- if (!(status & PCI_SRIOV_STATUS_VFM))
- return 0;
-
- schedule_work(&iov->mtask);
-
- return 1;
-}
-
-static void sriov_migration_task(struct work_struct *work)
-{
- int i;
- u8 state;
- u16 status;
- struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask);
-
- for (i = iov->initial; i < iov->nr_virtfn; i++) {
- state = readb(iov->mstate + i);
- if (state == PCI_SRIOV_VFM_MI) {
- writeb(PCI_SRIOV_VFM_AV, iov->mstate + i);
- state = readb(iov->mstate + i);
- if (state == PCI_SRIOV_VFM_AV)
- virtfn_add(iov->self, i, 1);
- } else if (state == PCI_SRIOV_VFM_MO) {
- virtfn_remove(iov->self, i, 1);
- writeb(PCI_SRIOV_VFM_UA, iov->mstate + i);
- state = readb(iov->mstate + i);
- if (state == PCI_SRIOV_VFM_AV)
- virtfn_add(iov->self, i, 0);
- }
- }
-
- pci_read_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, &status);
- status &= ~PCI_SRIOV_STATUS_VFM;
- pci_write_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, status);
-}
-
-static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn)
-{
- int bir;
- u32 table;
- resource_size_t pa;
- struct pci_sriov *iov = dev->sriov;
-
- if (nr_virtfn <= iov->initial)
- return 0;
-
- pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table);
- bir = PCI_SRIOV_VFM_BIR(table);
- if (bir > PCI_STD_RESOURCE_END)
- return -EIO;
-
- table = PCI_SRIOV_VFM_OFFSET(table);
- if (table + nr_virtfn > pci_resource_len(dev, bir))
- return -EIO;
-
- pa = pci_resource_start(dev, bir) + table;
- iov->mstate = ioremap(pa, nr_virtfn);
- if (!iov->mstate)
- return -ENOMEM;
-
- INIT_WORK(&iov->mtask, sriov_migration_task);
-
- iov->ctrl |= PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR;
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
-
- return 0;
-}
-
-static void sriov_disable_migration(struct pci_dev *dev)
-{
- struct pci_sriov *iov = dev->sriov;
-
- iov->ctrl &= ~(PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR);
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
-
- cancel_work_sync(&iov->mtask);
- iounmap(iov->mstate);
-}
-
static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
{
int rc;
@@ -281,23 +179,23 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
struct resource *res;
struct pci_dev *pdev;
struct pci_sriov *iov = dev->sriov;
+ int bars = 0;
if (!nr_virtfn)
return 0;
- if (iov->nr_virtfn)
+ if (iov->num_VFs)
return -EINVAL;
pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
- if (initial > iov->total ||
- (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total)))
+ if (initial > iov->total_VFs ||
+ (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total_VFs)))
return -EIO;
- if (nr_virtfn < 0 || nr_virtfn > iov->total ||
+ if (nr_virtfn < 0 || nr_virtfn > iov->total_VFs ||
(!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
return -EINVAL;
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
if (!offset || (nr_virtfn > 1 && !stride))
@@ -305,6 +203,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ bars |= (1 << (i + PCI_IOV_RESOURCES));
res = dev->resource + PCI_IOV_RESOURCES + i;
if (res->parent)
nres++;
@@ -317,34 +216,41 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
iov->offset = offset;
iov->stride = stride;
- if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->subordinate) {
+ if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->busn_res.end) {
dev_err(&dev->dev, "SR-IOV: bus number out of range\n");
return -ENOMEM;
}
+ if (pci_enable_resources(dev, bars)) {
+ dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
+ return -ENOMEM;
+ }
+
if (iov->link != dev->devfn) {
pdev = pci_get_slot(dev->bus, iov->link);
if (!pdev)
return -ENODEV;
- pci_dev_put(pdev);
-
- if (!pdev->is_physfn)
- return -ENODEV;
+ if (!pdev->is_physfn) {
+ pci_dev_put(pdev);
+ return -ENOSYS;
+ }
rc = sysfs_create_link(&dev->dev.kobj,
&pdev->dev.kobj, "dep_link");
+ pci_dev_put(pdev);
if (rc)
return rc;
}
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
- pci_block_user_cfg_access(dev);
+ pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
msleep(100);
- pci_unblock_user_cfg_access(dev);
+ pci_cfg_access_unlock(dev);
- iov->initial = initial;
+ iov->initial_VFs = initial;
if (nr_virtfn < initial)
initial = nr_virtfn;
@@ -354,14 +260,8 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
goto failed;
}
- if (iov->cap & PCI_SRIOV_CAP_VFM) {
- rc = sriov_enable_migration(dev, nr_virtfn);
- if (rc)
- goto failed;
- }
-
kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
- iov->nr_virtfn = nr_virtfn;
+ iov->num_VFs = nr_virtfn;
return 0;
@@ -370,10 +270,11 @@ failed:
virtfn_remove(dev, j, 0);
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
- pci_block_user_cfg_access(dev);
+ pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0);
ssleep(1);
- pci_unblock_user_cfg_access(dev);
+ pci_cfg_access_unlock(dev);
if (iov->link != dev->devfn)
sysfs_remove_link(&dev->dev.kobj, "dep_link");
@@ -386,25 +287,23 @@ static void sriov_disable(struct pci_dev *dev)
int i;
struct pci_sriov *iov = dev->sriov;
- if (!iov->nr_virtfn)
+ if (!iov->num_VFs)
return;
- if (iov->cap & PCI_SRIOV_CAP_VFM)
- sriov_disable_migration(dev);
-
- for (i = 0; i < iov->nr_virtfn; i++)
+ for (i = 0; i < iov->num_VFs; i++)
virtfn_remove(dev, i, 0);
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
- pci_block_user_cfg_access(dev);
+ pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
ssleep(1);
- pci_unblock_user_cfg_access(dev);
+ pci_cfg_access_unlock(dev);
if (iov->link != dev->devfn)
sysfs_remove_link(&dev->dev.kobj, "dep_link");
- iov->nr_virtfn = 0;
+ iov->num_VFs = 0;
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0);
}
static int sriov_init(struct pci_dev *dev, int pos)
@@ -418,8 +317,8 @@ static int sriov_init(struct pci_dev *dev, int pos)
struct resource *res;
struct pci_dev *pdev;
- if (dev->pcie_type != PCI_EXP_TYPE_RC_END &&
- dev->pcie_type != PCI_EXP_TYPE_ENDPOINT)
+ if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END &&
+ pci_pcie_type(dev) != PCI_EXP_TYPE_ENDPOINT)
return -ENODEV;
pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
@@ -443,7 +342,7 @@ static int sriov_init(struct pci_dev *dev, int pos)
found:
pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
- pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, total);
+ pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, 0);
pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
if (!offset || (total > 1 && !stride))
@@ -482,14 +381,14 @@ found:
iov->pos = pos;
iov->nres = nres;
iov->ctrl = ctrl;
- iov->total = total;
+ iov->total_VFs = total;
iov->offset = offset;
iov->stride = stride;
iov->pgsz = pgsz;
iov->self = dev;
pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
- if (dev->pcie_type == PCI_EXP_TYPE_RC_END)
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
if (pdev)
@@ -515,7 +414,7 @@ failed:
static void sriov_release(struct pci_dev *dev)
{
- BUG_ON(dev->sriov->nr_virtfn);
+ BUG_ON(dev->sriov->num_VFs);
if (dev != dev->sriov->dev)
pci_dev_put(dev->sriov->dev);
@@ -540,7 +439,7 @@ static void sriov_restore_state(struct pci_dev *dev)
pci_update_resource(dev, i);
pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->nr_virtfn);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->num_VFs);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
msleep(100);
@@ -613,7 +512,7 @@ resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
struct resource tmp;
enum pci_bar_type type;
int reg = pci_iov_resource_bar(dev, resno, &type);
-
+
if (!reg)
return 0;
@@ -647,7 +546,7 @@ int pci_iov_bus_range(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
if (!dev->is_physfn)
continue;
- busnr = virtfn_bus(dev, dev->sriov->total - 1);
+ busnr = virtfn_bus(dev, dev->sriov->total_VFs - 1);
if (busnr > max)
max = busnr;
}
@@ -667,7 +566,7 @@ int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
might_sleep();
if (!dev->is_physfn)
- return -ENODEV;
+ return -ENOSYS;
return sriov_enable(dev, nr_virtfn);
}
@@ -689,25 +588,6 @@ void pci_disable_sriov(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(pci_disable_sriov);
/**
- * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration
- * @dev: the PCI device
- *
- * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not.
- *
- * Physical Function driver is responsible to register IRQ handler using
- * VF Migration Interrupt Message Number, and call this function when the
- * interrupt is generated by the hardware.
- */
-irqreturn_t pci_sriov_migration(struct pci_dev *dev)
-{
- if (!dev->is_physfn)
- return IRQ_NONE;
-
- return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
-}
-EXPORT_SYMBOL_GPL(pci_sriov_migration);
-
-/**
* pci_num_vf - return number of VFs associated with a PF device_release_driver
* @dev: the PCI device
*
@@ -715,151 +595,100 @@ EXPORT_SYMBOL_GPL(pci_sriov_migration);
*/
int pci_num_vf(struct pci_dev *dev)
{
- if (!dev || !dev->is_physfn)
+ if (!dev->is_physfn)
return 0;
- else
- return dev->sriov->nr_virtfn;
-}
-EXPORT_SYMBOL_GPL(pci_num_vf);
-
-static int ats_alloc_one(struct pci_dev *dev, int ps)
-{
- int pos;
- u16 cap;
- struct pci_ats *ats;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
- if (!pos)
- return -ENODEV;
-
- ats = kzalloc(sizeof(*ats), GFP_KERNEL);
- if (!ats)
- return -ENOMEM;
-
- ats->pos = pos;
- ats->stu = ps;
- pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
- ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
- PCI_ATS_MAX_QDEP;
- dev->ats = ats;
-
- return 0;
-}
-
-static void ats_free_one(struct pci_dev *dev)
-{
- kfree(dev->ats);
- dev->ats = NULL;
+ return dev->sriov->num_VFs;
}
+EXPORT_SYMBOL_GPL(pci_num_vf);
/**
- * pci_enable_ats - enable the ATS capability
+ * pci_vfs_assigned - returns number of VFs are assigned to a guest
* @dev: the PCI device
- * @ps: the IOMMU page shift
*
- * Returns 0 on success, or negative on failure.
+ * Returns number of VFs belonging to this device that are assigned to a guest.
+ * If device is not a physical function returns 0.
*/
-int pci_enable_ats(struct pci_dev *dev, int ps)
+int pci_vfs_assigned(struct pci_dev *dev)
{
- int rc;
- u16 ctrl;
-
- BUG_ON(dev->ats && dev->ats->is_enabled);
-
- if (ps < PCI_ATS_MIN_STU)
- return -EINVAL;
-
- if (dev->is_physfn || dev->is_virtfn) {
- struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
-
- mutex_lock(&pdev->sriov->lock);
- if (pdev->ats)
- rc = pdev->ats->stu == ps ? 0 : -EINVAL;
- else
- rc = ats_alloc_one(pdev, ps);
+ struct pci_dev *vfdev;
+ unsigned int vfs_assigned = 0;
+ unsigned short dev_id;
- if (!rc)
- pdev->ats->ref_cnt++;
- mutex_unlock(&pdev->sriov->lock);
- if (rc)
- return rc;
- }
+ /* only search if we are a PF */
+ if (!dev->is_physfn)
+ return 0;
- if (!dev->is_physfn) {
- rc = ats_alloc_one(dev, ps);
- if (rc)
- return rc;
+ /*
+ * determine the device ID for the VFs, the vendor ID will be the
+ * same as the PF so there is no need to check for that one
+ */
+ pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_VF_DID, &dev_id);
+
+ /* loop through all the VFs to see if we own any that are assigned */
+ vfdev = pci_get_device(dev->vendor, dev_id, NULL);
+ while (vfdev) {
+ /*
+ * It is considered assigned if it is a virtual function with
+ * our dev as the physical function and the assigned bit is set
+ */
+ if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
+ (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
+ vfs_assigned++;
+
+ vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
}
- ctrl = PCI_ATS_CTRL_ENABLE;
- if (!dev->is_virtfn)
- ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
- pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
-
- dev->ats->is_enabled = 1;
-
- return 0;
+ return vfs_assigned;
}
+EXPORT_SYMBOL_GPL(pci_vfs_assigned);
/**
- * pci_disable_ats - disable the ATS capability
- * @dev: the PCI device
+ * pci_sriov_set_totalvfs -- reduce the TotalVFs available
+ * @dev: the PCI PF device
+ * @numvfs: number that should be used for TotalVFs supported
+ *
+ * Should be called from PF driver's probe routine with
+ * device's mutex held.
+ *
+ * Returns 0 if PF is an SRIOV-capable device and
+ * value of numvfs valid. If not a PF return -ENOSYS;
+ * if numvfs is invalid return -EINVAL;
+ * if VFs already enabled, return -EBUSY.
*/
-void pci_disable_ats(struct pci_dev *dev)
+int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
{
- u16 ctrl;
-
- BUG_ON(!dev->ats || !dev->ats->is_enabled);
-
- pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
- ctrl &= ~PCI_ATS_CTRL_ENABLE;
- pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
-
- dev->ats->is_enabled = 0;
-
- if (dev->is_physfn || dev->is_virtfn) {
- struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
+ if (!dev->is_physfn)
+ return -ENOSYS;
+ if (numvfs > dev->sriov->total_VFs)
+ return -EINVAL;
- mutex_lock(&pdev->sriov->lock);
- pdev->ats->ref_cnt--;
- if (!pdev->ats->ref_cnt)
- ats_free_one(pdev);
- mutex_unlock(&pdev->sriov->lock);
- }
+ /* Shouldn't change if VFs already enabled */
+ if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE)
+ return -EBUSY;
+ else
+ dev->sriov->driver_max_VFs = numvfs;
- if (!dev->is_physfn)
- ats_free_one(dev);
+ return 0;
}
+EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);
/**
- * pci_ats_queue_depth - query the ATS Invalidate Queue Depth
- * @dev: the PCI device
+ * pci_sriov_get_totalvfs -- get total VFs supported on this device
+ * @dev: the PCI PF device
*
- * Returns the queue depth on success, or negative on failure.
- *
- * The ATS spec uses 0 in the Invalidate Queue Depth field to
- * indicate that the function can accept 32 Invalidate Request.
- * But here we use the `real' values (i.e. 1~32) for the Queue
- * Depth; and 0 indicates the function shares the Queue with
- * other functions (doesn't exclusively own a Queue).
+ * For a PCIe device with SRIOV support, return the PCIe
+ * SRIOV capability value of TotalVFs or the value of driver_max_VFs
+ * if the driver reduced it. Otherwise 0.
*/
-int pci_ats_queue_depth(struct pci_dev *dev)
+int pci_sriov_get_totalvfs(struct pci_dev *dev)
{
- int pos;
- u16 cap;
-
- if (dev->is_virtfn)
+ if (!dev->is_physfn)
return 0;
- if (dev->ats)
- return dev->ats->qdep;
-
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
- if (!pos)
- return -ENODEV;
-
- pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
+ if (dev->sriov->driver_max_VFs)
+ return dev->sriov->driver_max_VFs;
- return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
- PCI_ATS_MAX_QDEP;
+ return dev->sriov->total_VFs;
}
+EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs);
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
deleted file mode 100644
index 7914951ef29..00000000000
--- a/drivers/pci/iova.c
+++ /dev/null
@@ -1,427 +0,0 @@
-/*
- * Copyright © 2006-2009, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
- */
-
-#include <linux/iova.h>
-
-void
-init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
-{
- spin_lock_init(&iovad->iova_rbtree_lock);
- iovad->rbroot = RB_ROOT;
- iovad->cached32_node = NULL;
- iovad->dma_32bit_pfn = pfn_32bit;
-}
-
-static struct rb_node *
-__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
-{
- if ((*limit_pfn != iovad->dma_32bit_pfn) ||
- (iovad->cached32_node == NULL))
- return rb_last(&iovad->rbroot);
- else {
- struct rb_node *prev_node = rb_prev(iovad->cached32_node);
- struct iova *curr_iova =
- container_of(iovad->cached32_node, struct iova, node);
- *limit_pfn = curr_iova->pfn_lo - 1;
- return prev_node;
- }
-}
-
-static void
-__cached_rbnode_insert_update(struct iova_domain *iovad,
- unsigned long limit_pfn, struct iova *new)
-{
- if (limit_pfn != iovad->dma_32bit_pfn)
- return;
- iovad->cached32_node = &new->node;
-}
-
-static void
-__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
-{
- struct iova *cached_iova;
- struct rb_node *curr;
-
- if (!iovad->cached32_node)
- return;
- curr = iovad->cached32_node;
- cached_iova = container_of(curr, struct iova, node);
-
- if (free->pfn_lo >= cached_iova->pfn_lo)
- iovad->cached32_node = rb_next(&free->node);
-}
-
-/* Computes the padding size required, to make the
- * the start address naturally aligned on its size
- */
-static int
-iova_get_pad_size(int size, unsigned int limit_pfn)
-{
- unsigned int pad_size = 0;
- unsigned int order = ilog2(size);
-
- if (order)
- pad_size = (limit_pfn + 1) % (1 << order);
-
- return pad_size;
-}
-
-static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
- unsigned long size, unsigned long limit_pfn,
- struct iova *new, bool size_aligned)
-{
- struct rb_node *prev, *curr = NULL;
- unsigned long flags;
- unsigned long saved_pfn;
- unsigned int pad_size = 0;
-
- /* Walk the tree backwards */
- spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- saved_pfn = limit_pfn;
- curr = __get_cached_rbnode(iovad, &limit_pfn);
- prev = curr;
- while (curr) {
- struct iova *curr_iova = container_of(curr, struct iova, node);
-
- if (limit_pfn < curr_iova->pfn_lo)
- goto move_left;
- else if (limit_pfn < curr_iova->pfn_hi)
- goto adjust_limit_pfn;
- else {
- if (size_aligned)
- pad_size = iova_get_pad_size(size, limit_pfn);
- if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
- break; /* found a free slot */
- }
-adjust_limit_pfn:
- limit_pfn = curr_iova->pfn_lo - 1;
-move_left:
- prev = curr;
- curr = rb_prev(curr);
- }
-
- if (!curr) {
- if (size_aligned)
- pad_size = iova_get_pad_size(size, limit_pfn);
- if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- return -ENOMEM;
- }
- }
-
- /* pfn_lo will point to size aligned address if size_aligned is set */
- new->pfn_lo = limit_pfn - (size + pad_size) + 1;
- new->pfn_hi = new->pfn_lo + size - 1;
-
- /* Insert the new_iova into domain rbtree by holding writer lock */
- /* Add new node and rebalance tree. */
- {
- struct rb_node **entry, *parent = NULL;
-
- /* If we have 'prev', it's a valid place to start the
- insertion. Otherwise, start from the root. */
- if (prev)
- entry = &prev;
- else
- entry = &iovad->rbroot.rb_node;
-
- /* Figure out where to put new node */
- while (*entry) {
- struct iova *this = container_of(*entry,
- struct iova, node);
- parent = *entry;
-
- if (new->pfn_lo < this->pfn_lo)
- entry = &((*entry)->rb_left);
- else if (new->pfn_lo > this->pfn_lo)
- entry = &((*entry)->rb_right);
- else
- BUG(); /* this should not happen */
- }
-
- /* Add new node and rebalance tree. */
- rb_link_node(&new->node, parent, entry);
- rb_insert_color(&new->node, &iovad->rbroot);
- }
- __cached_rbnode_insert_update(iovad, saved_pfn, new);
-
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-
-
- return 0;
-}
-
-static void
-iova_insert_rbtree(struct rb_root *root, struct iova *iova)
-{
- struct rb_node **new = &(root->rb_node), *parent = NULL;
- /* Figure out where to put new node */
- while (*new) {
- struct iova *this = container_of(*new, struct iova, node);
- parent = *new;
-
- if (iova->pfn_lo < this->pfn_lo)
- new = &((*new)->rb_left);
- else if (iova->pfn_lo > this->pfn_lo)
- new = &((*new)->rb_right);
- else
- BUG(); /* this should not happen */
- }
- /* Add new node and rebalance tree. */
- rb_link_node(&iova->node, parent, new);
- rb_insert_color(&iova->node, root);
-}
-
-/**
- * alloc_iova - allocates an iova
- * @iovad - iova domain in question
- * @size - size of page frames to allocate
- * @limit_pfn - max limit address
- * @size_aligned - set if size_aligned address range is required
- * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
- * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
- * flag is set then the allocated address iova->pfn_lo will be naturally
- * aligned on roundup_power_of_two(size).
- */
-struct iova *
-alloc_iova(struct iova_domain *iovad, unsigned long size,
- unsigned long limit_pfn,
- bool size_aligned)
-{
- struct iova *new_iova;
- int ret;
-
- new_iova = alloc_iova_mem();
- if (!new_iova)
- return NULL;
-
- /* If size aligned is set then round the size to
- * to next power of two.
- */
- if (size_aligned)
- size = __roundup_pow_of_two(size);
-
- ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
- new_iova, size_aligned);
-
- if (ret) {
- free_iova_mem(new_iova);
- return NULL;
- }
-
- return new_iova;
-}
-
-/**
- * find_iova - find's an iova for a given pfn
- * @iovad - iova domain in question.
- * pfn - page frame number
- * This function finds and returns an iova belonging to the
- * given doamin which matches the given pfn.
- */
-struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
-{
- unsigned long flags;
- struct rb_node *node;
-
- /* Take the lock so that no other thread is manipulating the rbtree */
- spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- node = iovad->rbroot.rb_node;
- while (node) {
- struct iova *iova = container_of(node, struct iova, node);
-
- /* If pfn falls within iova's range, return iova */
- if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- /* We are not holding the lock while this iova
- * is referenced by the caller as the same thread
- * which called this function also calls __free_iova()
- * and it is by desing that only one thread can possibly
- * reference a particular iova and hence no conflict.
- */
- return iova;
- }
-
- if (pfn < iova->pfn_lo)
- node = node->rb_left;
- else if (pfn > iova->pfn_lo)
- node = node->rb_right;
- }
-
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- return NULL;
-}
-
-/**
- * __free_iova - frees the given iova
- * @iovad: iova domain in question.
- * @iova: iova in question.
- * Frees the given iova belonging to the giving domain
- */
-void
-__free_iova(struct iova_domain *iovad, struct iova *iova)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- __cached_rbnode_delete_update(iovad, iova);
- rb_erase(&iova->node, &iovad->rbroot);
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- free_iova_mem(iova);
-}
-
-/**
- * free_iova - finds and frees the iova for a given pfn
- * @iovad: - iova domain in question.
- * @pfn: - pfn that is allocated previously
- * This functions finds an iova for a given pfn and then
- * frees the iova from that domain.
- */
-void
-free_iova(struct iova_domain *iovad, unsigned long pfn)
-{
- struct iova *iova = find_iova(iovad, pfn);
- if (iova)
- __free_iova(iovad, iova);
-
-}
-
-/**
- * put_iova_domain - destroys the iova doamin
- * @iovad: - iova domain in question.
- * All the iova's in that domain are destroyed.
- */
-void put_iova_domain(struct iova_domain *iovad)
-{
- struct rb_node *node;
- unsigned long flags;
-
- spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- node = rb_first(&iovad->rbroot);
- while (node) {
- struct iova *iova = container_of(node, struct iova, node);
- rb_erase(node, &iovad->rbroot);
- free_iova_mem(iova);
- node = rb_first(&iovad->rbroot);
- }
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-}
-
-static int
-__is_range_overlap(struct rb_node *node,
- unsigned long pfn_lo, unsigned long pfn_hi)
-{
- struct iova *iova = container_of(node, struct iova, node);
-
- if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
- return 1;
- return 0;
-}
-
-static struct iova *
-__insert_new_range(struct iova_domain *iovad,
- unsigned long pfn_lo, unsigned long pfn_hi)
-{
- struct iova *iova;
-
- iova = alloc_iova_mem();
- if (!iova)
- return iova;
-
- iova->pfn_hi = pfn_hi;
- iova->pfn_lo = pfn_lo;
- iova_insert_rbtree(&iovad->rbroot, iova);
- return iova;
-}
-
-static void
-__adjust_overlap_range(struct iova *iova,
- unsigned long *pfn_lo, unsigned long *pfn_hi)
-{
- if (*pfn_lo < iova->pfn_lo)
- iova->pfn_lo = *pfn_lo;
- if (*pfn_hi > iova->pfn_hi)
- *pfn_lo = iova->pfn_hi + 1;
-}
-
-/**
- * reserve_iova - reserves an iova in the given range
- * @iovad: - iova domain pointer
- * @pfn_lo: - lower page frame address
- * @pfn_hi:- higher pfn adderss
- * This function allocates reserves the address range from pfn_lo to pfn_hi so
- * that this address is not dished out as part of alloc_iova.
- */
-struct iova *
-reserve_iova(struct iova_domain *iovad,
- unsigned long pfn_lo, unsigned long pfn_hi)
-{
- struct rb_node *node;
- unsigned long flags;
- struct iova *iova;
- unsigned int overlap = 0;
-
- spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
- if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
- iova = container_of(node, struct iova, node);
- __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
- if ((pfn_lo >= iova->pfn_lo) &&
- (pfn_hi <= iova->pfn_hi))
- goto finish;
- overlap = 1;
-
- } else if (overlap)
- break;
- }
-
- /* We are here either becasue this is the first reserver node
- * or need to insert remaining non overlap addr range
- */
- iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
-finish:
-
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- return iova;
-}
-
-/**
- * copy_reserved_iova - copies the reserved between domains
- * @from: - source doamin from where to copy
- * @to: - destination domin where to copy
- * This function copies reserved iova's from one doamin to
- * other.
- */
-void
-copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
-{
- unsigned long flags;
- struct rb_node *node;
-
- spin_lock_irqsave(&from->iova_rbtree_lock, flags);
- for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
- struct iova *iova = container_of(node, struct iova, node);
- struct iova *new_iova;
- new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
- if (!new_iova)
- printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
- iova->pfn_lo, iova->pfn_lo);
- }
- spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
-}
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
index de01174aff0..6684f153ab5 100644
--- a/drivers/pci/irq.c
+++ b/drivers/pci/irq.c
@@ -7,24 +7,25 @@
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/pci.h>
static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason)
{
struct pci_dev *parent = to_pci_dev(pdev->dev.parent);
- dev_printk(KERN_ERR, &pdev->dev,
- "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n",
- dev_name(&parent->dev), parent->vendor, parent->device);
- dev_printk(KERN_ERR, &pdev->dev, "%s\n", reason);
- dev_printk(KERN_ERR, &pdev->dev, "Please report to linux-kernel@vger.kernel.org\n");
+ dev_err(&pdev->dev,
+ "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n",
+ dev_name(&parent->dev), parent->vendor, parent->device);
+ dev_err(&pdev->dev, "%s\n", reason);
+ dev_err(&pdev->dev, "Please report to linux-kernel@vger.kernel.org\n");
WARN_ON(1);
}
/**
* pci_lost_interrupt - reports a lost PCI interrupt
* @pdev: device whose interrupt is lost
- *
+ *
* The primary function of this routine is to report a lost interrupt
* in a standard way which users can recognise (instead of blaming the
* driver).
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7c24dcef298..13f3d303727 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -10,7 +10,7 @@
#include <linux/mm.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
+#include <linux/export.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
@@ -21,26 +21,52 @@
#include <linux/slab.h>
#include "pci.h"
-#include "msi.h"
static int pci_msi_enable = 1;
+#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1)
+
+
/* Arch hooks */
-#ifndef arch_msi_check_device
-int arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
+int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
{
+ struct msi_chip *chip = dev->bus->msi;
+ int err;
+
+ if (!chip || !chip->setup_irq)
+ return -EINVAL;
+
+ err = chip->setup_irq(chip, dev, desc);
+ if (err < 0)
+ return err;
+
+ irq_set_chip_data(desc->irq, chip);
+
return 0;
}
-#endif
-#ifndef arch_setup_msi_irqs
-# define arch_setup_msi_irqs default_setup_msi_irqs
-# define HAVE_DEFAULT_MSI_SETUP_IRQS
-#endif
+void __weak arch_teardown_msi_irq(unsigned int irq)
+{
+ struct msi_chip *chip = irq_get_chip_data(irq);
-#ifdef HAVE_DEFAULT_MSI_SETUP_IRQS
-int default_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+ if (!chip || !chip->teardown_irq)
+ return;
+
+ chip->teardown_irq(chip, irq);
+}
+
+int __weak arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
+{
+ struct msi_chip *chip = dev->bus->msi;
+
+ if (!chip || !chip->check_device)
+ return 0;
+
+ return chip->check_device(chip, dev, nvec, type);
+}
+
+int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
struct msi_desc *entry;
int ret;
@@ -62,14 +88,11 @@ int default_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
return 0;
}
-#endif
-
-#ifndef arch_teardown_msi_irqs
-# define arch_teardown_msi_irqs default_teardown_msi_irqs
-# define HAVE_DEFAULT_MSI_TEARDOWN_IRQS
-#endif
-#ifdef HAVE_DEFAULT_MSI_TEARDOWN_IRQS
+/*
+ * We have a default implementation available as a separate non-weak
+ * function, as it is used by the Xen x86 PCI code
+ */
void default_teardown_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *entry;
@@ -78,39 +101,63 @@ void default_teardown_msi_irqs(struct pci_dev *dev)
int i, nvec;
if (entry->irq == 0)
continue;
- nvec = 1 << entry->msi_attrib.multiple;
+ if (entry->nvec_used)
+ nvec = entry->nvec_used;
+ else
+ nvec = 1 << entry->msi_attrib.multiple;
for (i = 0; i < nvec; i++)
arch_teardown_msi_irq(entry->irq + i);
}
}
-#endif
-static void msi_set_enable(struct pci_dev *dev, int pos, int enable)
+void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
{
- u16 control;
+ return default_teardown_msi_irqs(dev);
+}
+
+static void default_restore_msi_irq(struct pci_dev *dev, int irq)
+{
+ struct msi_desc *entry;
+
+ entry = NULL;
+ if (dev->msix_enabled) {
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ if (irq == entry->irq)
+ break;
+ }
+ } else if (dev->msi_enabled) {
+ entry = irq_get_msi_desc(irq);
+ }
- BUG_ON(!pos);
+ if (entry)
+ write_msi_msg(irq, &entry->msg);
+}
+
+void __weak arch_restore_msi_irqs(struct pci_dev *dev)
+{
+ return default_restore_msi_irqs(dev);
+}
+
+static void msi_set_enable(struct pci_dev *dev, int enable)
+{
+ u16 control;
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
control &= ~PCI_MSI_FLAGS_ENABLE;
if (enable)
control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
}
static void msix_set_enable(struct pci_dev *dev, int enable)
{
- int pos;
u16 control;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
- control &= ~PCI_MSIX_FLAGS_ENABLE;
- if (enable)
- control |= PCI_MSIX_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
- }
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
+ control &= ~PCI_MSIX_FLAGS_ENABLE;
+ if (enable)
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
}
static inline __attribute_const__ u32 msi_mask(unsigned x)
@@ -137,7 +184,7 @@ static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
* reliably as devices without an INTx disable bit will then generate a
* level IRQ which will never be cleared.
*/
-static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
u32 mask_bits = desc->masked;
@@ -151,9 +198,14 @@ static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
return mask_bits;
}
+__weak u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+{
+ return default_msi_mask_irq(desc, mask, flag);
+}
+
static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
- desc->masked = __msi_mask_irq(desc, mask, flag);
+ desc->masked = arch_msi_mask_irq(desc, mask, flag);
}
/*
@@ -163,21 +215,27 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
* file. This saves a few milliseconds when initialising devices with lots
* of MSI-X interrupts.
*/
-static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag)
+u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag)
{
u32 mask_bits = desc->masked;
unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_VECTOR_CTRL;
- mask_bits &= ~1;
- mask_bits |= flag;
+ mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ if (flag)
+ mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
writel(mask_bits, desc->mask_base + offset);
return mask_bits;
}
+__weak u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
+{
+ return default_msix_mask_irq(desc, flag);
+}
+
static void msix_mask_irq(struct msi_desc *desc, u32 flag)
{
- desc->masked = __msix_mask_irq(desc, flag);
+ desc->masked = arch_msix_mask_irq(desc, flag);
}
static void msi_set_mask_bit(struct irq_data *data, u32 flag)
@@ -203,6 +261,15 @@ void unmask_msi_irq(struct irq_data *data)
msi_set_mask_bit(data, 0);
}
+void default_restore_msi_irqs(struct pci_dev *dev)
+{
+ struct msi_desc *entry;
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ default_restore_msi_irq(dev, entry->irq);
+ }
+}
+
void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
BUG_ON(entry->dev->current_state != PCI_D0);
@@ -216,18 +283,18 @@ void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
} else {
struct pci_dev *dev = entry->dev;
- int pos = entry->msi_attrib.pos;
+ int pos = dev->msi_cap;
u16 data;
- pci_read_config_dword(dev, msi_lower_address_reg(pos),
- &msg->address_lo);
+ pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
+ &msg->address_lo);
if (entry->msi_attrib.is_64) {
- pci_read_config_dword(dev, msi_upper_address_reg(pos),
- &msg->address_hi);
- pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
+ pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
+ &msg->address_hi);
+ pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data);
} else {
msg->address_hi = 0;
- pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
+ pci_read_config_word(dev, pos + PCI_MSI_DATA_32, &data);
}
msg->data = data;
}
@@ -235,7 +302,7 @@ void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
void read_msi_msg(unsigned int irq, struct msi_msg *msg)
{
- struct msi_desc *entry = get_irq_msi(irq);
+ struct msi_desc *entry = irq_get_msi_desc(irq);
__read_msi_msg(entry, msg);
}
@@ -252,7 +319,7 @@ void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
{
- struct msi_desc *entry = get_irq_msi(irq);
+ struct msi_desc *entry = irq_get_msi_desc(irq);
__get_cached_msi_msg(entry, msg);
}
@@ -271,24 +338,24 @@ void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
} else {
struct pci_dev *dev = entry->dev;
- int pos = entry->msi_attrib.pos;
+ int pos = dev->msi_cap;
u16 msgctl;
- pci_read_config_word(dev, msi_control_reg(pos), &msgctl);
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
msgctl &= ~PCI_MSI_FLAGS_QSIZE;
msgctl |= entry->msi_attrib.multiple << 4;
- pci_write_config_word(dev, msi_control_reg(pos), msgctl);
+ pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl);
- pci_write_config_dword(dev, msi_lower_address_reg(pos),
- msg->address_lo);
+ pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
+ msg->address_lo);
if (entry->msi_attrib.is_64) {
- pci_write_config_dword(dev, msi_upper_address_reg(pos),
- msg->address_hi);
- pci_write_config_word(dev, msi_data_reg(pos, 1),
- msg->data);
+ pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
+ msg->address_hi);
+ pci_write_config_word(dev, pos + PCI_MSI_DATA_64,
+ msg->data);
} else {
- pci_write_config_word(dev, msi_data_reg(pos, 0),
- msg->data);
+ pci_write_config_word(dev, pos + PCI_MSI_DATA_32,
+ msg->data);
}
}
entry->msg = *msg;
@@ -296,7 +363,7 @@ void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
void write_msi_msg(unsigned int irq, struct msi_msg *msg)
{
- struct msi_desc *entry = get_irq_msi(irq);
+ struct msi_desc *entry = irq_get_msi_desc(irq);
__write_msi_msg(entry, msg);
}
@@ -304,12 +371,18 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
static void free_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *entry, *tmp;
+ struct attribute **msi_attrs;
+ struct device_attribute *dev_attr;
+ int count = 0;
list_for_each_entry(entry, &dev->msi_list, list) {
int i, nvec;
if (!entry->irq)
continue;
- nvec = 1 << entry->msi_attrib.multiple;
+ if (entry->nvec_used)
+ nvec = entry->nvec_used;
+ else
+ nvec = 1 << entry->msi_attrib.multiple;
for (i = 0; i < nvec; i++)
BUG_ON(irq_has_action(entry->irq + i));
}
@@ -321,9 +394,37 @@ static void free_msi_irqs(struct pci_dev *dev)
if (list_is_last(&entry->list, &dev->msi_list))
iounmap(entry->mask_base);
}
+
+ /*
+ * Its possible that we get into this path
+ * When populate_msi_sysfs fails, which means the entries
+ * were not registered with sysfs. In that case don't
+ * unregister them.
+ */
+ if (entry->kobj.parent) {
+ kobject_del(&entry->kobj);
+ kobject_put(&entry->kobj);
+ }
+
list_del(&entry->list);
kfree(entry);
}
+
+ if (dev->msi_irq_groups) {
+ sysfs_remove_groups(&dev->dev.kobj, dev->msi_irq_groups);
+ msi_attrs = dev->msi_irq_groups[0]->attrs;
+ while (msi_attrs[count]) {
+ dev_attr = container_of(msi_attrs[count],
+ struct device_attribute, attr);
+ kfree(dev_attr->attr.name);
+ kfree(dev_attr);
+ ++count;
+ }
+ kfree(msi_attrs);
+ kfree(dev->msi_irq_groups[0]);
+ kfree(dev->msi_irq_groups);
+ dev->msi_irq_groups = NULL;
+ }
}
static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
@@ -346,30 +447,27 @@ static void pci_intx_for_msi(struct pci_dev *dev, int enable)
static void __pci_restore_msi_state(struct pci_dev *dev)
{
- int pos;
u16 control;
struct msi_desc *entry;
if (!dev->msi_enabled)
return;
- entry = get_irq_msi(dev->irq);
- pos = entry->msi_attrib.pos;
+ entry = irq_get_msi_desc(dev->irq);
pci_intx_for_msi(dev, 0);
- msi_set_enable(dev, pos, 0);
- write_msi_msg(dev->irq, &entry->msg);
+ msi_set_enable(dev, 0);
+ arch_restore_msi_irqs(dev);
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
control &= ~PCI_MSI_FLAGS_QSIZE;
control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
}
static void __pci_restore_msix_state(struct pci_dev *dev)
{
- int pos;
struct msi_desc *entry;
u16 control;
@@ -377,21 +475,20 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
return;
BUG_ON(list_empty(&dev->msi_list));
entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
- pos = entry->msi_attrib.pos;
- pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
/* route the table */
pci_intx_for_msi(dev, 0);
control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
+ arch_restore_msi_irqs(dev);
list_for_each_entry(entry, &dev->msi_list, list) {
- write_msi_msg(entry->irq, &entry->msg);
msix_mask_irq(entry, entry->masked);
}
control &= ~PCI_MSIX_FLAGS_MASKALL;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
}
void pci_restore_msi_state(struct pci_dev *dev)
@@ -401,6 +498,102 @@ void pci_restore_msi_state(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_restore_msi_state);
+static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct msi_desc *entry;
+ unsigned long irq;
+ int retval;
+
+ retval = kstrtoul(attr->attr.name, 10, &irq);
+ if (retval)
+ return retval;
+
+ list_for_each_entry(entry, &pdev->msi_list, list) {
+ if (entry->irq == irq) {
+ return sprintf(buf, "%s\n",
+ entry->msi_attrib.is_msix ? "msix" : "msi");
+ }
+ }
+ return -ENODEV;
+}
+
+static int populate_msi_sysfs(struct pci_dev *pdev)
+{
+ struct attribute **msi_attrs;
+ struct attribute *msi_attr;
+ struct device_attribute *msi_dev_attr;
+ struct attribute_group *msi_irq_group;
+ const struct attribute_group **msi_irq_groups;
+ struct msi_desc *entry;
+ int ret = -ENOMEM;
+ int num_msi = 0;
+ int count = 0;
+
+ /* Determine how many msi entries we have */
+ list_for_each_entry(entry, &pdev->msi_list, list) {
+ ++num_msi;
+ }
+ if (!num_msi)
+ return 0;
+
+ /* Dynamically create the MSI attributes for the PCI device */
+ msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL);
+ if (!msi_attrs)
+ return -ENOMEM;
+ list_for_each_entry(entry, &pdev->msi_list, list) {
+ msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
+ if (!msi_dev_attr)
+ goto error_attrs;
+ msi_attrs[count] = &msi_dev_attr->attr;
+
+ sysfs_attr_init(&msi_dev_attr->attr);
+ msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
+ entry->irq);
+ if (!msi_dev_attr->attr.name)
+ goto error_attrs;
+ msi_dev_attr->attr.mode = S_IRUGO;
+ msi_dev_attr->show = msi_mode_show;
+ ++count;
+ }
+
+ msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
+ if (!msi_irq_group)
+ goto error_attrs;
+ msi_irq_group->name = "msi_irqs";
+ msi_irq_group->attrs = msi_attrs;
+
+ msi_irq_groups = kzalloc(sizeof(void *) * 2, GFP_KERNEL);
+ if (!msi_irq_groups)
+ goto error_irq_group;
+ msi_irq_groups[0] = msi_irq_group;
+
+ ret = sysfs_create_groups(&pdev->dev.kobj, msi_irq_groups);
+ if (ret)
+ goto error_irq_groups;
+ pdev->msi_irq_groups = msi_irq_groups;
+
+ return 0;
+
+error_irq_groups:
+ kfree(msi_irq_groups);
+error_irq_group:
+ kfree(msi_irq_group);
+error_attrs:
+ count = 0;
+ msi_attr = msi_attrs[count];
+ while (msi_attr) {
+ msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
+ kfree(msi_attr->name);
+ kfree(msi_dev_attr);
+ ++count;
+ msi_attr = msi_attrs[count];
+ }
+ kfree(msi_attrs);
+ return ret;
+}
+
/**
* msi_capability_init - configure device's MSI capability structure
* @dev: pointer to the pci_dev data structure of MSI device function
@@ -415,27 +608,29 @@ EXPORT_SYMBOL_GPL(pci_restore_msi_state);
static int msi_capability_init(struct pci_dev *dev, int nvec)
{
struct msi_desc *entry;
- int pos, ret;
+ int ret;
u16 control;
unsigned mask;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- msi_set_enable(dev, pos, 0); /* Disable MSI during set up */
+ msi_set_enable(dev, 0); /* Disable MSI during set up */
- pci_read_config_word(dev, msi_control_reg(pos), &control);
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
/* MSI Entry Initialization */
entry = alloc_msi_entry(dev);
if (!entry)
return -ENOMEM;
entry->msi_attrib.is_msix = 0;
- entry->msi_attrib.is_64 = is_64bit_address(control);
+ entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
entry->msi_attrib.entry_nr = 0;
- entry->msi_attrib.maskbit = is_mask_bit_support(control);
+ entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT);
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
- entry->msi_attrib.pos = pos;
+ entry->msi_attrib.pos = dev->msi_cap;
- entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64);
+ if (control & PCI_MSI_FLAGS_64BIT)
+ entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
+ else
+ entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
/* All MSIs are unmasked by default, Mask them all */
if (entry->msi_attrib.maskbit)
pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
@@ -452,33 +647,39 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
return ret;
}
+ ret = populate_msi_sysfs(dev);
+ if (ret) {
+ msi_mask_irq(entry, mask, ~mask);
+ free_msi_irqs(dev);
+ return ret;
+ }
+
/* Set MSI enabled bits */
pci_intx_for_msi(dev, 0);
- msi_set_enable(dev, pos, 1);
+ msi_set_enable(dev, 1);
dev->msi_enabled = 1;
dev->irq = entry->irq;
return 0;
}
-static void __iomem *msix_map_region(struct pci_dev *dev, unsigned pos,
- unsigned nr_entries)
+static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
{
resource_size_t phys_addr;
u32 table_offset;
u8 bir;
- pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
- bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
- table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
+ pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE,
+ &table_offset);
+ bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
+ table_offset &= PCI_MSIX_TABLE_OFFSET;
phys_addr = pci_resource_start(dev, bir) + table_offset;
return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
}
-static int msix_setup_entries(struct pci_dev *dev, unsigned pos,
- void __iomem *base, struct msix_entry *entries,
- int nvec)
+static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
+ struct msix_entry *entries, int nvec)
{
struct msi_desc *entry;
int i;
@@ -498,7 +699,7 @@ static int msix_setup_entries(struct pci_dev *dev, unsigned pos,
entry->msi_attrib.is_64 = 1;
entry->msi_attrib.entry_nr = entries[i].entry;
entry->msi_attrib.default_irq = dev->irq;
- entry->msi_attrib.pos = pos;
+ entry->msi_attrib.pos = dev->msix_cap;
entry->mask_base = base;
list_add_tail(&entry->list, &dev->msi_list);
@@ -508,7 +709,7 @@ static int msix_setup_entries(struct pci_dev *dev, unsigned pos,
}
static void msix_program_entries(struct pci_dev *dev,
- struct msix_entry *entries)
+ struct msix_entry *entries)
{
struct msi_desc *entry;
int i = 0;
@@ -518,7 +719,7 @@ static void msix_program_entries(struct pci_dev *dev,
PCI_MSIX_ENTRY_VECTOR_CTRL;
entries[i].vector = entry->irq;
- set_irq_msi(entry->irq, entry);
+ irq_set_msi_desc(entry->irq, entry);
entry->masked = readl(entry->mask_base + offset);
msix_mask_irq(entry, 1);
i++;
@@ -538,29 +739,28 @@ static void msix_program_entries(struct pci_dev *dev,
static int msix_capability_init(struct pci_dev *dev,
struct msix_entry *entries, int nvec)
{
- int pos, ret;
+ int ret;
u16 control;
void __iomem *base;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
/* Ensure MSI-X is disabled while it is set up */
control &= ~PCI_MSIX_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
/* Request & Map MSI-X table region */
- base = msix_map_region(dev, pos, multi_msix_capable(control));
+ base = msix_map_region(dev, msix_table_size(control));
if (!base)
return -ENOMEM;
- ret = msix_setup_entries(dev, pos, base, entries, nvec);
+ ret = msix_setup_entries(dev, base, entries, nvec);
if (ret)
return ret;
ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
if (ret)
- goto error;
+ goto out_avail;
/*
* Some devices require MSI-X to be enabled before we can touch the
@@ -568,20 +768,24 @@ static int msix_capability_init(struct pci_dev *dev,
* interrupts coming in before they're fully set up.
*/
control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
msix_program_entries(dev, entries);
+ ret = populate_msi_sysfs(dev);
+ if (ret)
+ goto out_free;
+
/* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
dev->msix_enabled = 1;
control &= ~PCI_MSIX_FLAGS_MASKALL;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
return 0;
-error:
+out_avail:
if (ret < 0) {
/*
* If we had some success, report the number of irqs
@@ -598,6 +802,7 @@ error:
ret = avail;
}
+out_free:
free_msi_irqs(dev);
return ret;
@@ -609,7 +814,7 @@ error:
* @nvec: how many MSIs have been requested ?
* @type: are we checking for MSI or MSI-X ?
*
- * Look at global flags, the device itself, and its parent busses
+ * Look at global flags, the device itself, and its parent buses
* to determine if MSI/-X are supported for the device. If MSI/-X is
* supported return 0, else return an error code.
**/
@@ -645,79 +850,55 @@ static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
if (ret)
return ret;
- if (!pci_find_capability(dev, type))
- return -EINVAL;
-
return 0;
}
/**
- * pci_enable_msi_block - configure device's MSI capability structure
- * @dev: device to configure
- * @nvec: number of interrupts to configure
+ * pci_msi_vec_count - Return the number of MSI vectors a device can send
+ * @dev: device to report about
*
- * Allocate IRQs for a device with the MSI capability.
- * This function returns a negative errno if an error occurs. If it
- * is unable to allocate the number of interrupts requested, it returns
- * the number of interrupts it might be able to allocate. If it successfully
- * allocates at least the number of interrupts requested, it returns 0 and
- * updates the @dev's irq member to the lowest new interrupt number; the
- * other interrupt numbers allocated to this device are consecutive.
- */
-int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
+ * This function returns the number of MSI vectors a device requested via
+ * Multiple Message Capable register. It returns a negative errno if the
+ * device is not capable sending MSI interrupts. Otherwise, the call succeeds
+ * and returns a power of two, up to a maximum of 2^5 (32), according to the
+ * MSI specification.
+ **/
+int pci_msi_vec_count(struct pci_dev *dev)
{
- int status, pos, maxvec;
+ int ret;
u16 msgctl;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- if (!pos)
+ if (!dev->msi_cap)
return -EINVAL;
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
- maxvec = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
- if (nvec > maxvec)
- return maxvec;
-
- status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI);
- if (status)
- return status;
- WARN_ON(!!dev->msi_enabled);
-
- /* Check whether driver already requested MSI-X irqs */
- if (dev->msix_enabled) {
- dev_info(&dev->dev, "can't enable MSI "
- "(MSI-X already enabled)\n");
- return -EINVAL;
- }
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
+ ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
- status = msi_capability_init(dev, nvec);
- return status;
+ return ret;
}
-EXPORT_SYMBOL(pci_enable_msi_block);
+EXPORT_SYMBOL(pci_msi_vec_count);
void pci_msi_shutdown(struct pci_dev *dev)
{
struct msi_desc *desc;
u32 mask;
u16 ctrl;
- unsigned pos;
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
BUG_ON(list_empty(&dev->msi_list));
desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
- pos = desc->msi_attrib.pos;
- msi_set_enable(dev, pos, 0);
+ msi_set_enable(dev, 0);
pci_intx_for_msi(dev, 1);
dev->msi_enabled = 0;
/* Return the device with MSI unmasked as initial states */
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl);
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl);
mask = msi_capable_mask(ctrl);
/* Keep cached state to be restored */
- __msi_mask_irq(desc, mask, ~mask);
+ arch_msi_mask_irq(desc, mask, ~mask);
/* Restore dev->irq to its default pin-assertion irq */
dev->irq = desc->msi_attrib.default_irq;
@@ -734,21 +915,24 @@ void pci_disable_msi(struct pci_dev *dev)
EXPORT_SYMBOL(pci_disable_msi);
/**
- * pci_msix_table_size - return the number of device's MSI-X table entries
+ * pci_msix_vec_count - return the number of device's MSI-X table entries
* @dev: pointer to the pci_dev data structure of MSI-X device function
- */
-int pci_msix_table_size(struct pci_dev *dev)
+ * This function returns the number of device's MSI-X table entries and
+ * therefore the number of MSI-X vectors device is capable of sending.
+ * It returns a negative errno if the device is not capable of sending MSI-X
+ * interrupts.
+ **/
+int pci_msix_vec_count(struct pci_dev *dev)
{
- int pos;
u16 control;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (!pos)
- return 0;
+ if (!dev->msix_cap)
+ return -EINVAL;
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- return multi_msix_capable(control);
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
+ return msix_table_size(control);
}
+EXPORT_SYMBOL(pci_msix_vec_count);
/**
* pci_enable_msix - configure device's MSI-X capability structure
@@ -770,14 +954,16 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
int status, nr_entries;
int i, j;
- if (!entries)
+ if (!entries || !dev->msix_cap || dev->current_state != PCI_D0)
return -EINVAL;
status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
if (status)
return status;
- nr_entries = pci_msix_table_size(dev);
+ nr_entries = pci_msix_vec_count(dev);
+ if (nr_entries < 0)
+ return nr_entries;
if (nvec > nr_entries)
return nr_entries;
@@ -794,8 +980,7 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
/* Check whether driver already requested for MSI irq */
if (dev->msi_enabled) {
- dev_info(&dev->dev, "can't enable MSI-X "
- "(MSI IRQ already assigned)\n");
+ dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
return -EINVAL;
}
status = msix_capability_init(dev, entries, nvec);
@@ -813,7 +998,7 @@ void pci_msix_shutdown(struct pci_dev *dev)
/* Return the device with MSI-X masked as initial states */
list_for_each_entry(entry, &dev->msi_list, list) {
/* Keep cached states to be restored */
- __msix_mask_irq(entry, 1);
+ arch_msix_mask_irq(entry, 1);
}
msix_set_enable(dev, 0);
@@ -869,4 +1054,121 @@ EXPORT_SYMBOL(pci_msi_enabled);
void pci_msi_init_pci_dev(struct pci_dev *dev)
{
INIT_LIST_HEAD(&dev->msi_list);
+
+ /* Disable the msi hardware to avoid screaming interrupts
+ * during boot. This is the power on reset default so
+ * usually this should be a noop.
+ */
+ dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (dev->msi_cap)
+ msi_set_enable(dev, 0);
+
+ dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ if (dev->msix_cap)
+ msix_set_enable(dev, 0);
+}
+
+/**
+ * pci_enable_msi_range - configure device's MSI capability structure
+ * @dev: device to configure
+ * @minvec: minimal number of interrupts to configure
+ * @maxvec: maximum number of interrupts to configure
+ *
+ * This function tries to allocate a maximum possible number of interrupts in a
+ * range between @minvec and @maxvec. It returns a negative errno if an error
+ * occurs. If it succeeds, it returns the actual number of interrupts allocated
+ * and updates the @dev's irq member to the lowest new interrupt number;
+ * the other interrupt numbers allocated to this device are consecutive.
+ **/
+int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
+{
+ int nvec;
+ int rc;
+
+ if (dev->current_state != PCI_D0)
+ return -EINVAL;
+
+ WARN_ON(!!dev->msi_enabled);
+
+ /* Check whether driver already requested MSI-X irqs */
+ if (dev->msix_enabled) {
+ dev_info(&dev->dev,
+ "can't enable MSI (MSI-X already enabled)\n");
+ return -EINVAL;
+ }
+
+ if (maxvec < minvec)
+ return -ERANGE;
+
+ nvec = pci_msi_vec_count(dev);
+ if (nvec < 0)
+ return nvec;
+ else if (nvec < minvec)
+ return -EINVAL;
+ else if (nvec > maxvec)
+ nvec = maxvec;
+
+ do {
+ rc = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI);
+ if (rc < 0) {
+ return rc;
+ } else if (rc > 0) {
+ if (rc < minvec)
+ return -ENOSPC;
+ nvec = rc;
+ }
+ } while (rc);
+
+ do {
+ rc = msi_capability_init(dev, nvec);
+ if (rc < 0) {
+ return rc;
+ } else if (rc > 0) {
+ if (rc < minvec)
+ return -ENOSPC;
+ nvec = rc;
+ }
+ } while (rc);
+
+ return nvec;
+}
+EXPORT_SYMBOL(pci_enable_msi_range);
+
+/**
+ * pci_enable_msix_range - configure device's MSI-X capability structure
+ * @dev: pointer to the pci_dev data structure of MSI-X device function
+ * @entries: pointer to an array of MSI-X entries
+ * @minvec: minimum number of MSI-X irqs requested
+ * @maxvec: maximum number of MSI-X irqs requested
+ *
+ * Setup the MSI-X capability structure of device function with a maximum
+ * possible number of interrupts in the range between @minvec and @maxvec
+ * upon its software driver call to request for MSI-X mode enabled on its
+ * hardware device function. It returns a negative errno if an error occurs.
+ * If it succeeds, it returns the actual number of interrupts allocated and
+ * indicates the successful configuration of MSI-X capability structure
+ * with new allocated MSI-X interrupts.
+ **/
+int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
+ int minvec, int maxvec)
+{
+ int nvec = maxvec;
+ int rc;
+
+ if (maxvec < minvec)
+ return -ERANGE;
+
+ do {
+ rc = pci_enable_msix(dev, entries, nvec);
+ if (rc < 0) {
+ return rc;
+ } else if (rc > 0) {
+ if (rc < minvec)
+ return -ENOSPC;
+ nvec = rc;
+ }
+ } while (rc);
+
+ return nvec;
}
+EXPORT_SYMBOL(pci_enable_msix_range);
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
deleted file mode 100644
index feff3bee6fe..00000000000
--- a/drivers/pci/msi.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2003-2004 Intel
- * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
- */
-
-#ifndef MSI_H
-#define MSI_H
-
-#define PCI_MSIX_ENTRY_SIZE 16
-#define PCI_MSIX_ENTRY_LOWER_ADDR 0
-#define PCI_MSIX_ENTRY_UPPER_ADDR 4
-#define PCI_MSIX_ENTRY_DATA 8
-#define PCI_MSIX_ENTRY_VECTOR_CTRL 12
-
-#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
-#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO)
-#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI)
-#define msi_data_reg(base, is64bit) \
- (base + ((is64bit == 1) ? PCI_MSI_DATA_64 : PCI_MSI_DATA_32))
-#define msi_mask_reg(base, is64bit) \
- (base + ((is64bit == 1) ? PCI_MSI_MASK_64 : PCI_MSI_MASK_32))
-#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
-#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
-
-#define msix_table_offset_reg(base) (base + PCI_MSIX_TABLE)
-#define msix_pba_offset_reg(base) (base + PCI_MSIX_PBA)
-#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
-#define multi_msix_capable(control) msix_table_size((control))
-
-#endif /* MSI_H */
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
new file mode 100644
index 00000000000..f0929934bb7
--- /dev/null
+++ b/drivers/pci/of.c
@@ -0,0 +1,61 @@
+/*
+ * PCI <-> OF mapping helpers
+ *
+ * Copyright 2011 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include "pci.h"
+
+void pci_set_of_node(struct pci_dev *dev)
+{
+ if (!dev->bus->dev.of_node)
+ return;
+ dev->dev.of_node = of_pci_find_child_device(dev->bus->dev.of_node,
+ dev->devfn);
+}
+
+void pci_release_of_node(struct pci_dev *dev)
+{
+ of_node_put(dev->dev.of_node);
+ dev->dev.of_node = NULL;
+}
+
+void pci_set_bus_of_node(struct pci_bus *bus)
+{
+ if (bus->self == NULL)
+ bus->dev.of_node = pcibios_get_phb_of_node(bus);
+ else
+ bus->dev.of_node = of_node_get(bus->self->dev.of_node);
+}
+
+void pci_release_bus_of_node(struct pci_bus *bus)
+{
+ of_node_put(bus->dev.of_node);
+ bus->dev.of_node = NULL;
+}
+
+struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
+{
+ /* This should only be called for PHBs */
+ if (WARN_ON(bus->self || bus->parent))
+ return NULL;
+
+ /* Look for a node pointer in either the intermediary device we
+ * create above the root bus or it's own parent. Normally only
+ * the later is populated.
+ */
+ if (bus->bridge->of_node)
+ return of_node_get(bus->bridge->of_node);
+ if (bus->bridge->parent && bus->bridge->parent->of_node)
+ return of_node_get(bus->bridge->parent->of_node);
+ return NULL;
+}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 24e19c594e5..ca4927ba843 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -12,15 +12,11 @@
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/pci-aspm.h>
-#include <acpi/acpi.h>
-#include <acpi/acpi_bus.h>
-
#include <linux/pci-acpi.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
#include "pci.h"
-static DEFINE_MUTEX(pci_acpi_pm_notify_mtx);
-
/**
* pci_acpi_wake_bus - Wake-up notification handler for root buses.
* @handle: ACPI handle of a device the notification is for.
@@ -45,74 +41,27 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
{
struct pci_dev *pci_dev = context;
- if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) {
- pci_check_pme_status(pci_dev);
- pm_runtime_resume(&pci_dev->dev);
- pci_wakeup_event(pci_dev);
- if (pci_dev->subordinate)
- pci_pme_wakeup_bus(pci_dev->subordinate);
- }
-}
-
-/**
- * add_pm_notifier - Register PM notifier for given ACPI device.
- * @dev: ACPI device to add the notifier for.
- * @context: PCI device or bus to check for PME status if an event is signaled.
- *
- * NOTE: @dev need not be a run-wake or wake-up device to be a valid source of
- * PM wake-up events. For example, wake-up events may be generated for bridges
- * if one of the devices below the bridge is signaling PME, even if the bridge
- * itself doesn't have a wake-up GPE associated with it.
- */
-static acpi_status add_pm_notifier(struct acpi_device *dev,
- acpi_notify_handler handler,
- void *context)
-{
- acpi_status status = AE_ALREADY_EXISTS;
-
- mutex_lock(&pci_acpi_pm_notify_mtx);
-
- if (dev->wakeup.flags.notifier_present)
- goto out;
-
- status = acpi_install_notify_handler(dev->handle,
- ACPI_SYSTEM_NOTIFY,
- handler, context);
- if (ACPI_FAILURE(status))
- goto out;
+ if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev)
+ return;
- dev->wakeup.flags.notifier_present = true;
+ if (pci_dev->pme_poll)
+ pci_dev->pme_poll = false;
- out:
- mutex_unlock(&pci_acpi_pm_notify_mtx);
- return status;
-}
-
-/**
- * remove_pm_notifier - Unregister PM notifier from given ACPI device.
- * @dev: ACPI device to remove the notifier from.
- */
-static acpi_status remove_pm_notifier(struct acpi_device *dev,
- acpi_notify_handler handler)
-{
- acpi_status status = AE_BAD_PARAMETER;
-
- mutex_lock(&pci_acpi_pm_notify_mtx);
-
- if (!dev->wakeup.flags.notifier_present)
- goto out;
+ if (pci_dev->current_state == PCI_D3cold) {
+ pci_wakeup_event(pci_dev);
+ pm_runtime_resume(&pci_dev->dev);
+ return;
+ }
- status = acpi_remove_notify_handler(dev->handle,
- ACPI_SYSTEM_NOTIFY,
- handler);
- if (ACPI_FAILURE(status))
- goto out;
+ /* Clear PME Status if set. */
+ if (pci_dev->pme_support)
+ pci_check_pme_status(pci_dev);
- dev->wakeup.flags.notifier_present = false;
+ pci_wakeup_event(pci_dev);
+ pm_runtime_resume(&pci_dev->dev);
- out:
- mutex_unlock(&pci_acpi_pm_notify_mtx);
- return status;
+ if (pci_dev->subordinate)
+ pci_pme_wakeup_bus(pci_dev->subordinate);
}
/**
@@ -123,7 +72,7 @@ static acpi_status remove_pm_notifier(struct acpi_device *dev,
acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev,
struct pci_bus *pci_bus)
{
- return add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus);
+ return acpi_add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus);
}
/**
@@ -132,7 +81,7 @@ acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev,
*/
acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
{
- return remove_pm_notifier(dev, pci_acpi_wake_bus);
+ return acpi_remove_pm_notifier(dev, pci_acpi_wake_bus);
}
/**
@@ -143,7 +92,7 @@ acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
struct pci_dev *pci_dev)
{
- return add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev);
+ return acpi_add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev);
}
/**
@@ -152,7 +101,21 @@ acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
*/
acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
{
- return remove_pm_notifier(dev, pci_acpi_wake_dev);
+ return acpi_remove_pm_notifier(dev, pci_acpi_wake_dev);
+}
+
+phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
+{
+ acpi_status status = AE_NOT_EXIST;
+ unsigned long long mcfg_addr;
+
+ if (handle)
+ status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
+ NULL, &mcfg_addr);
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ return (phys_addr_t)mcfg_addr;
}
/*
@@ -175,14 +138,18 @@ acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
* if (_PRW at S-state x)
* choose from highest power _SxD to lowest power _SxW
* else // no _PRW at S-state x
- * choose highest power _SxD or any lower power
+ * choose highest power _SxD or any lower power
*/
static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
{
- int acpi_state;
+ int acpi_state, d_max;
- acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL);
+ if (pdev->no_d3cold)
+ d_max = ACPI_STATE_D3_HOT;
+ else
+ d_max = ACPI_STATE_D3_COLD;
+ acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
if (acpi_state < 0)
return PCI_POWER_ERROR;
@@ -193,55 +160,61 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
return PCI_D1;
case ACPI_STATE_D2:
return PCI_D2;
- case ACPI_STATE_D3:
+ case ACPI_STATE_D3_HOT:
return PCI_D3hot;
+ case ACPI_STATE_D3_COLD:
+ return PCI_D3cold;
}
return PCI_POWER_ERROR;
}
static bool acpi_pci_power_manageable(struct pci_dev *dev)
{
- acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ acpi_handle handle = ACPI_HANDLE(&dev->dev);
return handle ? acpi_bus_power_manageable(handle) : false;
}
static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{
- acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
- acpi_handle tmp;
+ acpi_handle handle = ACPI_HANDLE(&dev->dev);
static const u8 state_conv[] = {
[PCI_D0] = ACPI_STATE_D0,
[PCI_D1] = ACPI_STATE_D1,
[PCI_D2] = ACPI_STATE_D2,
- [PCI_D3hot] = ACPI_STATE_D3,
- [PCI_D3cold] = ACPI_STATE_D3
+ [PCI_D3hot] = ACPI_STATE_D3_COLD,
+ [PCI_D3cold] = ACPI_STATE_D3_COLD,
};
int error = -EINVAL;
/* If the ACPI device has _EJ0, ignore the device */
- if (!handle || ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp)))
+ if (!handle || acpi_has_method(handle, "_EJ0"))
return -ENODEV;
switch (state) {
+ case PCI_D3cold:
+ if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
+ PM_QOS_FLAGS_ALL) {
+ error = -EBUSY;
+ break;
+ }
case PCI_D0:
case PCI_D1:
case PCI_D2:
case PCI_D3hot:
- case PCI_D3cold:
error = acpi_bus_set_power(handle, state_conv[state]);
}
if (!error)
- dev_printk(KERN_INFO, &dev->dev,
- "power state changed by ACPI to D%d\n", state);
+ dev_dbg(&dev->dev, "power state changed by ACPI to %s\n",
+ acpi_power_state_string(state_conv[state]));
return error;
}
static bool acpi_pci_can_wakeup(struct pci_dev *dev)
{
- acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ acpi_handle handle = ACPI_HANDLE(&dev->dev);
return handle ? acpi_bus_can_wakeup(handle) : false;
}
@@ -268,49 +241,6 @@ static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
return 0;
}
-/**
- * acpi_dev_run_wake - Enable/disable wake-up for given device.
- * @phys_dev: Device to enable/disable the platform to wake-up the system for.
- * @enable: Whether enable or disable the wake-up functionality.
- *
- * Find the ACPI device object corresponding to @pci_dev and try to
- * enable/disable the GPE associated with it.
- */
-static int acpi_dev_run_wake(struct device *phys_dev, bool enable)
-{
- struct acpi_device *dev;
- acpi_handle handle;
- int error = -ENODEV;
-
- if (!device_run_wake(phys_dev))
- return -EINVAL;
-
- handle = DEVICE_ACPI_HANDLE(phys_dev);
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) {
- dev_dbg(phys_dev, "ACPI handle has no context in %s!\n",
- __func__);
- return -ENODEV;
- }
-
- if (enable) {
- if (!dev->wakeup.run_wake_count++) {
- acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0);
- acpi_enable_gpe(dev->wakeup.gpe_device,
- dev->wakeup.gpe_number);
- }
- } else if (dev->wakeup.run_wake_count > 0) {
- if (!--dev->wakeup.run_wake_count) {
- acpi_disable_gpe(dev->wakeup.gpe_device,
- dev->wakeup.gpe_number);
- acpi_disable_wakeup_device_power(dev);
- }
- } else {
- error = -EALREADY;
- }
-
- return error;
-}
-
static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
{
while (bus->parent) {
@@ -318,22 +248,28 @@ static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
if (bridge->pme_interrupt)
return;
- if (!acpi_dev_run_wake(&bridge->dev, enable))
+ if (!acpi_pm_device_run_wake(&bridge->dev, enable))
return;
bus = bus->parent;
}
/* We have reached the root bus. */
if (bus->bridge)
- acpi_dev_run_wake(bus->bridge, enable);
+ acpi_pm_device_run_wake(bus->bridge, enable);
}
static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
{
- if (dev->pme_interrupt)
+ /*
+ * Per PCI Express Base Specification Revision 2.0 section
+ * 5.3.3.2 Link Wakeup, platform support is needed for D3cold
+ * waking up to power on the main link even if there is PME
+ * support for D3cold
+ */
+ if (dev->pme_interrupt && !dev->runtime_d3cold)
return 0;
- if (!acpi_dev_run_wake(&dev->dev, enable))
+ if (!acpi_pm_device_run_wake(&dev->dev, enable))
return 0;
acpi_pci_propagate_run_wake(dev->bus, enable);
@@ -344,48 +280,85 @@ static struct pci_platform_pm_ops acpi_pci_platform_pm = {
.is_manageable = acpi_pci_power_manageable,
.set_state = acpi_pci_set_power_state,
.choose_state = acpi_pci_choose_state,
- .can_wakeup = acpi_pci_can_wakeup,
.sleep_wake = acpi_pci_sleep_wake,
.run_wake = acpi_pci_run_wake,
};
+void acpi_pci_add_bus(struct pci_bus *bus)
+{
+ if (acpi_pci_disabled || !bus->bridge)
+ return;
+
+ acpi_pci_slot_enumerate(bus);
+ acpiphp_enumerate_slots(bus);
+}
+
+void acpi_pci_remove_bus(struct pci_bus *bus)
+{
+ if (acpi_pci_disabled || !bus->bridge)
+ return;
+
+ acpiphp_remove_slots(bus);
+ acpi_pci_slot_remove(bus);
+}
+
/* ACPI bus type */
-static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
+static struct acpi_device *acpi_pci_find_companion(struct device *dev)
{
- struct pci_dev * pci_dev;
- u64 addr;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ bool check_children;
+ u64 addr;
- pci_dev = to_pci_dev(dev);
+ check_children = pci_is_bridge(pci_dev);
/* Please ref to ACPI spec for the syntax of _ADR */
addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
- *handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr);
- if (!*handle)
- return -ENODEV;
- return 0;
+ return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
+ check_children);
}
-static int acpi_pci_find_root_bridge(struct device *dev, acpi_handle *handle)
+static void pci_acpi_setup(struct device *dev)
{
- int num;
- unsigned int seg, bus;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct acpi_device *adev = ACPI_COMPANION(dev);
- /*
- * The string should be the same as root bridge's name
- * Please look at 'pci_scan_bus_parented'
- */
- num = sscanf(dev_name(dev), "pci%04x:%02x", &seg, &bus);
- if (num != 2)
- return -ENODEV;
- *handle = acpi_get_pci_rootbridge_handle(seg, bus);
- if (!*handle)
- return -ENODEV;
- return 0;
+ if (!adev)
+ return;
+
+ pci_acpi_add_pm_notifier(adev, pci_dev);
+ if (!adev->wakeup.flags.valid)
+ return;
+
+ device_set_wakeup_capable(dev, true);
+ acpi_pci_sleep_wake(pci_dev, false);
+ if (adev->wakeup.flags.run_wake)
+ device_set_run_wake(dev, true);
+}
+
+static void pci_acpi_cleanup(struct device *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (!adev)
+ return;
+
+ pci_acpi_remove_pm_notifier(adev);
+ if (adev->wakeup.flags.valid) {
+ device_set_wakeup_capable(dev, false);
+ device_set_run_wake(dev, false);
+ }
+}
+
+static bool pci_acpi_bus_match(struct device *dev)
+{
+ return dev_is_pci(dev);
}
static struct acpi_bus_type acpi_pci_bus = {
- .bus = &pci_bus_type,
- .find_device = acpi_pci_find_device,
- .find_bridge = acpi_pci_find_root_bridge,
+ .name = "PCI",
+ .match = pci_acpi_bus_match,
+ .find_companion = acpi_pci_find_companion,
+ .setup = pci_acpi_setup,
+ .cleanup = pci_acpi_cleanup,
};
static int __init acpi_pci_init(void)
@@ -393,19 +366,23 @@ static int __init acpi_pci_init(void)
int ret;
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
- printk(KERN_INFO"ACPI FADT declares the system doesn't support MSI, so disable it\n");
+ pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
pci_no_msi();
}
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
- printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
+ pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
pcie_no_aspm();
}
ret = register_acpi_bus_type(&acpi_pci_bus);
if (ret)
return 0;
+
pci_set_platform_pm(&acpi_pci_platform_pm);
+ acpi_pci_slot_init();
+ acpiphp_init();
+
return 0;
}
arch_initcall(acpi_pci_init);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 8a6f797de8e..3f8e3dbcaa7 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -18,6 +18,8 @@
#include <linux/sched.h>
#include <linux/cpu.h>
#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
+#include <linux/kexec.h>
#include "pci.h"
struct pci_dynid {
@@ -71,12 +73,11 @@ int pci_add_dynid(struct pci_driver *drv,
list_add_tail(&dynid->node, &drv->dynids.list);
spin_unlock(&drv->dynids.lock);
- get_driver(&drv->driver);
retval = driver_attach(&drv->driver);
- put_driver(&drv->driver);
return retval;
}
+EXPORT_SYMBOL_GPL(pci_add_dynid);
static void pci_free_dynids(struct pci_driver *drv)
{
@@ -90,10 +91,6 @@ static void pci_free_dynids(struct pci_driver *drv)
spin_unlock(&drv->dynids.lock);
}
-/*
- * Dynamic device ID manipulation via sysfs is disabled for !CONFIG_HOTPLUG
- */
-#ifdef CONFIG_HOTPLUG
/**
* store_new_id - sysfs frontend to pci_add_dynid()
* @driver: target device driver
@@ -102,16 +99,16 @@ static void pci_free_dynids(struct pci_driver *drv)
*
* Allow PCI IDs to be added to an existing driver via sysfs.
*/
-static ssize_t
-store_new_id(struct device_driver *driver, const char *buf, size_t count)
+static ssize_t store_new_id(struct device_driver *driver, const char *buf,
+ size_t count)
{
struct pci_driver *pdrv = to_pci_driver(driver);
const struct pci_device_id *ids = pdrv->id_table;
- __u32 vendor, device, subvendor=PCI_ANY_ID,
- subdevice=PCI_ANY_ID, class=0, class_mask=0;
- unsigned long driver_data=0;
- int fields=0;
- int retval;
+ __u32 vendor, device, subvendor = PCI_ANY_ID,
+ subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
+ unsigned long driver_data = 0;
+ int fields = 0;
+ int retval = 0;
fields = sscanf(buf, "%x %x %x %x %x %x %lx",
&vendor, &device, &subvendor, &subdevice,
@@ -119,6 +116,26 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
if (fields < 2)
return -EINVAL;
+ if (fields != 7) {
+ struct pci_dev *pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
+ if (!pdev)
+ return -ENOMEM;
+
+ pdev->vendor = vendor;
+ pdev->device = device;
+ pdev->subsystem_vendor = subvendor;
+ pdev->subsystem_device = subdevice;
+ pdev->class = class;
+
+ if (pci_match_id(pdrv->id_table, pdev))
+ retval = -EEXIST;
+
+ kfree(pdev);
+
+ if (retval)
+ return retval;
+ }
+
/* Only accept driver_data values that match an existing id_table
entry */
if (ids) {
@@ -150,8 +167,8 @@ static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
*
* Removes a dynamic pci device ID to this driver.
*/
-static ssize_t
-store_remove_id(struct device_driver *driver, const char *buf, size_t count)
+static ssize_t store_remove_id(struct device_driver *driver, const char *buf,
+ size_t count)
{
struct pci_dynid *dynid, *n;
struct pci_driver *pdrv = to_pci_driver(driver);
@@ -188,45 +205,12 @@ store_remove_id(struct device_driver *driver, const char *buf, size_t count)
}
static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
-static int
-pci_create_newid_file(struct pci_driver *drv)
-{
- int error = 0;
- if (drv->probe != NULL)
- error = driver_create_file(&drv->driver, &driver_attr_new_id);
- return error;
-}
-
-static void pci_remove_newid_file(struct pci_driver *drv)
-{
- driver_remove_file(&drv->driver, &driver_attr_new_id);
-}
-
-static int
-pci_create_removeid_file(struct pci_driver *drv)
-{
- int error = 0;
- if (drv->probe != NULL)
- error = driver_create_file(&drv->driver,&driver_attr_remove_id);
- return error;
-}
-
-static void pci_remove_removeid_file(struct pci_driver *drv)
-{
- driver_remove_file(&drv->driver, &driver_attr_remove_id);
-}
-#else /* !CONFIG_HOTPLUG */
-static inline int pci_create_newid_file(struct pci_driver *drv)
-{
- return 0;
-}
-static inline void pci_remove_newid_file(struct pci_driver *drv) {}
-static inline int pci_create_removeid_file(struct pci_driver *drv)
-{
- return 0;
-}
-static inline void pci_remove_removeid_file(struct pci_driver *drv) {}
-#endif
+static struct attribute *pci_drv_attrs[] = {
+ &driver_attr_new_id.attr,
+ &driver_attr_remove_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(pci_drv);
/**
* pci_match_id - See if a pci device matches a given pci_id table
@@ -252,6 +236,14 @@ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
}
return NULL;
}
+EXPORT_SYMBOL(pci_match_id);
+
+static const struct pci_device_id pci_device_id_any = {
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+};
/**
* pci_match_device - Tell if a PCI device structure has a matching PCI device id structure
@@ -266,18 +258,30 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
struct pci_dev *dev)
{
struct pci_dynid *dynid;
+ const struct pci_device_id *found_id = NULL;
+
+ /* When driver_override is set, only bind to the matching driver */
+ if (dev->driver_override && strcmp(dev->driver_override, drv->name))
+ return NULL;
/* Look at the dynamic ids first, before the static ones */
spin_lock(&drv->dynids.lock);
list_for_each_entry(dynid, &drv->dynids.list, node) {
if (pci_match_one_device(&dynid->id, dev)) {
- spin_unlock(&drv->dynids.lock);
- return &dynid->id;
+ found_id = &dynid->id;
+ break;
}
}
spin_unlock(&drv->dynids.lock);
- return pci_match_id(drv->id_table, dev);
+ if (!found_id)
+ found_id = pci_match_id(drv->id_table, dev);
+
+ /* driver_override will always match, send a dummy id */
+ if (!found_id && dev->driver_override)
+ found_id = &pci_device_id_any;
+
+ return found_id;
}
struct drv_dev_and_id {
@@ -289,26 +293,35 @@ struct drv_dev_and_id {
static long local_pci_probe(void *_ddi)
{
struct drv_dev_and_id *ddi = _ddi;
- struct device *dev = &ddi->dev->dev;
+ struct pci_dev *pci_dev = ddi->dev;
+ struct pci_driver *pci_drv = ddi->drv;
+ struct device *dev = &pci_dev->dev;
int rc;
- /* Unbound PCI devices are always set to disabled and suspended.
- * During probe, the device is set to enabled and active and the
- * usage count is incremented. If the driver supports runtime PM,
- * it should call pm_runtime_put_noidle() in its probe routine and
- * pm_runtime_get_noresume() in its remove routine.
+ /*
+ * Unbound PCI devices are always put in D0, regardless of
+ * runtime PM status. During probe, the device is set to
+ * active and the usage count is incremented. If the driver
+ * supports runtime PM, it should call pm_runtime_put_noidle()
+ * in its probe routine and pm_runtime_get_noresume() in its
+ * remove routine.
*/
- pm_runtime_get_noresume(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
- rc = ddi->drv->probe(ddi->dev, ddi->id);
- if (rc) {
- pm_runtime_disable(dev);
- pm_runtime_set_suspended(dev);
- pm_runtime_put_noidle(dev);
+ pm_runtime_get_sync(dev);
+ pci_dev->driver = pci_drv;
+ rc = pci_drv->probe(pci_dev, ddi->id);
+ if (!rc)
+ return rc;
+ if (rc < 0) {
+ pci_dev->driver = NULL;
+ pm_runtime_put_sync(dev);
+ return rc;
}
- return rc;
+ /*
+ * Probe function should return < 0 for failure, 0 for success
+ * Treat values > 0 as success, but warn.
+ */
+ dev_warn(dev, "Driver probe function unexpectedly returned %d\n", rc);
+ return 0;
}
static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
@@ -317,12 +330,27 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
int error, node;
struct drv_dev_and_id ddi = { drv, dev, id };
- /* Execute driver initialization on node where the device's
- bus is attached to. This way the driver likely allocates
- its local memory on the right node without any need to
- change it. */
+ /*
+ * Execute driver initialization on node where the device is
+ * attached. This way the driver likely allocates its local memory
+ * on the right node.
+ */
node = dev_to_node(&dev->dev);
- if (node >= 0) {
+
+ /*
+ * On NUMA systems, we are likely to call a PF probe function using
+ * work_on_cpu(). If that probe calls pci_enable_sriov() (which
+ * adds the VF devices via pci_bus_add_device()), we may re-enter
+ * this function to call the VF probe function. Calling
+ * work_on_cpu() again will cause a lockdep warning. Since VFs are
+ * always on the same node as the PF, we can work around this by
+ * avoiding work_on_cpu() when we're already on the correct node.
+ *
+ * Preemption is enabled, so it's theoretically unsafe to use
+ * numa_node_id(), but even if we run the probe function on the
+ * wrong node, it should be functionally correct.
+ */
+ if (node >= 0 && node != numa_node_id()) {
int cpu;
get_online_cpus();
@@ -334,19 +362,19 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
put_online_cpus();
} else
error = local_pci_probe(&ddi);
+
return error;
}
/**
- * __pci_device_probe()
+ * __pci_device_probe - check if a driver wants to claim a specific PCI device
* @drv: driver to call to check if it wants the PCI device
* @pci_dev: PCI device being probed
- *
+ *
* returns 0 on success, else error.
* side-effect: pci_dev->driver is set to drv when drv claims pci_dev.
*/
-static int
-__pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
+static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
{
const struct pci_device_id *id;
int error = 0;
@@ -357,15 +385,13 @@ __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
id = pci_match_device(drv, pci_dev);
if (id)
error = pci_call_probe(drv, pci_dev, id);
- if (error >= 0) {
- pci_dev->driver = drv;
+ if (error >= 0)
error = 0;
- }
}
return error;
}
-static int pci_device_probe(struct device * dev)
+static int pci_device_probe(struct device *dev)
{
int error = 0;
struct pci_driver *drv;
@@ -381,10 +407,10 @@ static int pci_device_probe(struct device * dev)
return error;
}
-static int pci_device_remove(struct device * dev)
+static int pci_device_remove(struct device *dev)
{
- struct pci_dev * pci_dev = to_pci_dev(dev);
- struct pci_driver * drv = pci_dev->driver;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = pci_dev->driver;
if (drv) {
if (drv->remove) {
@@ -396,9 +422,7 @@ static int pci_device_remove(struct device * dev)
}
/* Undo the runtime PM settings in local_pci_probe() */
- pm_runtime_disable(dev);
- pm_runtime_set_suspended(dev);
- pm_runtime_put_noidle(dev);
+ pm_runtime_put_sync(dev);
/*
* If the device is still on, set the power state as "unknown",
@@ -411,7 +435,7 @@ static int pci_device_remove(struct device * dev)
* We would love to complain here if pci_dev->is_enabled is set, that
* the driver should have called pci_disable_device(), but the
* unfortunate fact is there are too many odd BIOS and bridge setups
- * that don't like drivers doing that all of the time.
+ * that don't like drivers doing that all of the time.
* Oh well, we can dream of sane hardware when we sleep, no matter how
* horrible the crap we have to deal with is when we are awake...
*/
@@ -425,13 +449,27 @@ static void pci_device_shutdown(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *drv = pci_dev->driver;
+ pm_runtime_resume(dev);
+
if (drv && drv->shutdown)
drv->shutdown(pci_dev);
pci_msi_shutdown(pci_dev);
pci_msix_shutdown(pci_dev);
+
+#ifdef CONFIG_KEXEC
+ /*
+ * If this is a kexec reboot, turn off Bus Master bit on the
+ * device to tell it to not continue to do DMA. Don't touch
+ * devices in D3cold or unknown states.
+ * If it is not a kexec reboot, firmware will hit the PCI
+ * devices with big hammer and stop their DMA any way.
+ */
+ if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
+ pci_clear_master(pci_dev);
+#endif
}
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
/* Auxiliary functions used for system resume and run-time resume. */
@@ -449,19 +487,21 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
return error;
}
- return pci_restore_state(pci_dev);
+ pci_restore_state(pci_dev);
+ return 0;
}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+
static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
{
- pci_restore_standard_config(pci_dev);
+ pci_power_up(pci_dev);
+ pci_restore_state(pci_dev);
pci_fixup_device(pci_fixup_resume_early, pci_dev);
}
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-
/*
* Default "suspend" method for devices that have no driver provided suspend,
* or not even a driver at all (second part).
@@ -498,8 +538,8 @@ static int pci_pm_reenable_device(struct pci_dev *pci_dev)
static int pci_legacy_suspend(struct device *dev, pm_message_t state)
{
- struct pci_dev * pci_dev = to_pci_dev(dev);
- struct pci_driver * drv = pci_dev->driver;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = pci_dev->driver;
if (drv && drv->suspend) {
pci_power_t prev = pci_dev->current_state;
@@ -525,8 +565,8 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
{
- struct pci_dev * pci_dev = to_pci_dev(dev);
- struct pci_driver * drv = pci_dev->driver;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = pci_dev->driver;
if (drv && drv->suspend_late) {
pci_power_t prev = pci_dev->current_state;
@@ -556,8 +596,8 @@ static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
static int pci_legacy_resume_early(struct device *dev)
{
- struct pci_dev * pci_dev = to_pci_dev(dev);
- struct pci_driver * drv = pci_dev->driver;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = pci_dev->driver;
return drv && drv->resume_early ?
drv->resume_early(pci_dev) : 0;
@@ -565,8 +605,8 @@ static int pci_legacy_resume_early(struct device *dev)
static int pci_legacy_resume(struct device *dev)
{
- struct pci_dev * pci_dev = to_pci_dev(dev);
- struct pci_driver * drv = pci_dev->driver;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = pci_dev->driver;
pci_fixup_device(pci_fixup_resume, pci_dev);
@@ -580,14 +620,14 @@ static void pci_pm_default_resume(struct pci_dev *pci_dev)
{
pci_fixup_device(pci_fixup_resume, pci_dev);
- if (!pci_is_bridge(pci_dev))
+ if (!pci_has_subordinate(pci_dev))
pci_enable_wake(pci_dev, PCI_D0, false);
}
static void pci_pm_default_suspend(struct pci_dev *pci_dev)
{
/* Disable non-bridge devices without PM support */
- if (!pci_is_bridge(pci_dev))
+ if (!pci_has_subordinate(pci_dev))
pci_disable_enabled_device(pci_dev);
}
@@ -602,7 +642,8 @@ static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
* supported as well. Drivers are supposed to support either the
* former, or the latter, but not both at the same time.
*/
- WARN_ON(ret && drv->driver.pm);
+ WARN(ret && drv->driver.pm, "driver %s device %04x:%04x\n",
+ drv->name, pci_dev->vendor, pci_dev->device);
return ret;
}
@@ -615,15 +656,11 @@ static int pci_pm_prepare(struct device *dev)
int error = 0;
/*
- * PCI devices suspended at run time need to be resumed at this
- * point, because in general it is necessary to reconfigure them for
- * system suspend. Namely, if the device is supposed to wake up the
- * system from the sleep state, we may need to reconfigure it for this
- * purpose. In turn, if the device is not supposed to wake up the
- * system from the sleep state, we'll have to prevent it from signaling
- * wake-up.
+ * Devices having power.ignore_children set may still be necessary for
+ * suspending their children in the next phase of device suspend.
*/
- pm_runtime_resume(dev);
+ if (dev->power.ignore_children)
+ pm_runtime_resume(dev);
if (drv && drv->pm && drv->pm->prepare)
error = drv->pm->prepare(dev);
@@ -631,18 +668,10 @@ static int pci_pm_prepare(struct device *dev)
return error;
}
-static void pci_pm_complete(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
-
- if (drv && drv->pm && drv->pm->complete)
- drv->pm->complete(dev);
-}
#else /* !CONFIG_PM_SLEEP */
#define pci_pm_prepare NULL
-#define pci_pm_complete NULL
#endif /* !CONFIG_PM_SLEEP */
@@ -661,6 +690,17 @@ static int pci_pm_suspend(struct device *dev)
goto Fixup;
}
+ /*
+ * PCI devices suspended at run time need to be resumed at this point,
+ * because in general it is necessary to reconfigure them for system
+ * suspend. Namely, if the device is supposed to wake up the system
+ * from the sleep state, we may need to reconfigure it for this purpose.
+ * In turn, if the device is not supposed to wake up the system from the
+ * sleep state, we'll have to prevent it from signaling wake-up.
+ */
+ pm_runtime_resume(dev);
+
+ pci_dev->state_saved = false;
if (pm->suspend) {
pci_power_t prev = pci_dev->current_state;
int error;
@@ -717,12 +757,24 @@ static int pci_pm_suspend_noirq(struct device *dev)
if (!pci_dev->state_saved) {
pci_save_state(pci_dev);
- if (!pci_is_bridge(pci_dev))
+ if (!pci_has_subordinate(pci_dev))
pci_prepare_to_sleep(pci_dev);
}
pci_pm_set_unknown_state(pci_dev);
+ /*
+ * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
+ * PCI COMMAND register isn't 0, the BIOS assumes that the controller
+ * hasn't been quiesced and tries to turn it off. If the controller
+ * is already in D3, this can hang or cause memory corruption.
+ *
+ * Since the value of the COMMAND register doesn't matter once the
+ * device has been suspended, we can safely set it to 0 here.
+ */
+ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
+ pci_write_config_word(pci_dev, PCI_COMMAND, 0);
+
return 0;
}
@@ -780,7 +832,14 @@ static int pci_pm_resume(struct device *dev)
#endif /* !CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+
+
+/*
+ * pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing
+ * a hibernate transition
+ */
+struct dev_pm_ops __weak pcibios_pm_ops;
static int pci_pm_freeze(struct device *dev)
{
@@ -795,6 +854,15 @@ static int pci_pm_freeze(struct device *dev)
return 0;
}
+ /*
+ * This used to be done in pci_pm_prepare() for all devices and some
+ * drivers may depend on it, so do it here. Ideally, runtime-suspended
+ * devices should not be touched during freeze/thaw transitions,
+ * however.
+ */
+ pm_runtime_resume(dev);
+
+ pci_dev->state_saved = false;
if (pm->freeze) {
int error;
@@ -804,6 +872,9 @@ static int pci_pm_freeze(struct device *dev)
return error;
}
+ if (pcibios_pm_ops.freeze)
+ return pcibios_pm_ops.freeze(dev);
+
return 0;
}
@@ -829,6 +900,9 @@ static int pci_pm_freeze_noirq(struct device *dev)
pci_pm_set_unknown_state(pci_dev);
+ if (pcibios_pm_ops.freeze_noirq)
+ return pcibios_pm_ops.freeze_noirq(dev);
+
return 0;
}
@@ -838,6 +912,12 @@ static int pci_pm_thaw_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
+ if (pcibios_pm_ops.thaw_noirq) {
+ error = pcibios_pm_ops.thaw_noirq(dev);
+ if (error)
+ return error;
+ }
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume_early(dev);
@@ -855,6 +935,12 @@ static int pci_pm_thaw(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int error = 0;
+ if (pcibios_pm_ops.thaw) {
+ error = pcibios_pm_ops.thaw(dev);
+ if (error)
+ return error;
+ }
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume(dev);
@@ -883,6 +969,10 @@ static int pci_pm_poweroff(struct device *dev)
goto Fixup;
}
+ /* The reason to do that is the same as in pci_pm_suspend(). */
+ pm_runtime_resume(dev);
+
+ pci_dev->state_saved = false;
if (pm->poweroff) {
int error;
@@ -895,6 +985,9 @@ static int pci_pm_poweroff(struct device *dev)
Fixup:
pci_fixup_device(pci_fixup_suspend, pci_dev);
+ if (pcibios_pm_ops.poweroff)
+ return pcibios_pm_ops.poweroff(dev);
+
return 0;
}
@@ -918,9 +1011,19 @@ static int pci_pm_poweroff_noirq(struct device *dev)
return error;
}
- if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
+ if (!pci_dev->state_saved && !pci_has_subordinate(pci_dev))
pci_prepare_to_sleep(pci_dev);
+ /*
+ * The reason for doing this here is the same as for the analogous code
+ * in pci_pm_suspend_noirq().
+ */
+ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
+ pci_write_config_word(pci_dev, PCI_COMMAND, 0);
+
+ if (pcibios_pm_ops.poweroff_noirq)
+ return pcibios_pm_ops.poweroff_noirq(dev);
+
return 0;
}
@@ -930,6 +1033,12 @@ static int pci_pm_restore_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
+ if (pcibios_pm_ops.restore_noirq) {
+ error = pcibios_pm_ops.restore_noirq(dev);
+ if (error)
+ return error;
+ }
+
pci_pm_default_resume_early(pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
@@ -947,6 +1056,12 @@ static int pci_pm_restore(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int error = 0;
+ if (pcibios_pm_ops.restore) {
+ error = pcibios_pm_ops.restore(dev);
+ if (error)
+ return error;
+ }
+
/*
* This is necessary for the hibernation error path in which restore is
* called without restoring the standard config registers of the device.
@@ -969,7 +1084,7 @@ static int pci_pm_restore(struct device *dev)
return error;
}
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define pci_pm_freeze NULL
#define pci_pm_freeze_noirq NULL
@@ -980,7 +1095,7 @@ static int pci_pm_restore(struct device *dev)
#define pci_pm_restore NULL
#define pci_pm_restore_noirq NULL
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
#ifdef CONFIG_PM_RUNTIME
@@ -991,13 +1106,24 @@ static int pci_pm_runtime_suspend(struct device *dev)
pci_power_t prev = pci_dev->current_state;
int error;
+ /*
+ * If pci_dev->driver is not set (unbound), the device should
+ * always remain in D0 regardless of the runtime PM status
+ */
+ if (!pci_dev->driver)
+ return 0;
+
if (!pm || !pm->runtime_suspend)
return -ENOSYS;
+ pci_dev->state_saved = false;
+ pci_dev->no_d3cold = false;
error = pm->runtime_suspend(dev);
suspend_report_result(pm->runtime_suspend, error);
if (error)
return error;
+ if (!pci_dev->d3cold_allowed)
+ pci_dev->no_d3cold = true;
pci_fixup_device(pci_fixup_suspend, pci_dev);
@@ -1009,45 +1135,62 @@ static int pci_pm_runtime_suspend(struct device *dev)
return 0;
}
- if (!pci_dev->state_saved)
+ if (!pci_dev->state_saved) {
pci_save_state(pci_dev);
-
- pci_finish_runtime_suspend(pci_dev);
+ pci_finish_runtime_suspend(pci_dev);
+ }
return 0;
}
static int pci_pm_runtime_resume(struct device *dev)
{
+ int rc;
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ /*
+ * If pci_dev->driver is not set (unbound), the device should
+ * always remain in D0 regardless of the runtime PM status
+ */
+ if (!pci_dev->driver)
+ return 0;
+
if (!pm || !pm->runtime_resume)
return -ENOSYS;
- pci_pm_default_resume_early(pci_dev);
+ pci_restore_standard_config(pci_dev);
+ pci_fixup_device(pci_fixup_resume_early, pci_dev);
__pci_enable_wake(pci_dev, PCI_D0, true, false);
pci_fixup_device(pci_fixup_resume, pci_dev);
- return pm->runtime_resume(dev);
+ rc = pm->runtime_resume(dev);
+
+ pci_dev->runtime_d3cold = false;
+
+ return rc;
}
static int pci_pm_runtime_idle(struct device *dev)
{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int ret = 0;
+
+ /*
+ * If pci_dev->driver is not set (unbound), the device should
+ * always remain in D0 regardless of the runtime PM status
+ */
+ if (!pci_dev->driver)
+ return 0;
if (!pm)
return -ENOSYS;
- if (pm->runtime_idle) {
- int ret = pm->runtime_idle(dev);
- if (ret)
- return ret;
- }
-
- pm_runtime_suspend(dev);
+ if (pm->runtime_idle)
+ ret = pm->runtime_idle(dev);
- return 0;
+ return ret;
}
#else /* !CONFIG_PM_RUNTIME */
@@ -1058,11 +1201,10 @@ static int pci_pm_runtime_idle(struct device *dev)
#endif /* !CONFIG_PM_RUNTIME */
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
-const struct dev_pm_ops pci_dev_pm_ops = {
+static const struct dev_pm_ops pci_dev_pm_ops = {
.prepare = pci_pm_prepare,
- .complete = pci_pm_complete,
.suspend = pci_pm_suspend,
.resume = pci_pm_resume,
.freeze = pci_pm_freeze,
@@ -1093,17 +1235,15 @@ const struct dev_pm_ops pci_dev_pm_ops = {
* @drv: the driver structure to register
* @owner: owner module of drv
* @mod_name: module name string
- *
+ *
* Adds the driver structure to the list of registered drivers.
- * Returns a negative value on error, otherwise 0.
- * If no error occurred, the driver remains registered even if
+ * Returns a negative value on error, otherwise 0.
+ * If no error occurred, the driver remains registered even if
* no device was claimed during registration.
*/
int __pci_register_driver(struct pci_driver *drv, struct module *owner,
const char *mod_name)
{
- int error;
-
/* initialize common driver fields */
drv->driver.name = drv->name;
drv->driver.bus = &pci_bus_type;
@@ -1114,45 +1254,26 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
INIT_LIST_HEAD(&drv->dynids.list);
/* register with core */
- error = driver_register(&drv->driver);
- if (error)
- goto out;
-
- error = pci_create_newid_file(drv);
- if (error)
- goto out_newid;
-
- error = pci_create_removeid_file(drv);
- if (error)
- goto out_removeid;
-out:
- return error;
-
-out_removeid:
- pci_remove_newid_file(drv);
-out_newid:
- driver_unregister(&drv->driver);
- goto out;
+ return driver_register(&drv->driver);
}
+EXPORT_SYMBOL(__pci_register_driver);
/**
* pci_unregister_driver - unregister a pci driver
* @drv: the driver structure to unregister
- *
+ *
* Deletes the driver structure from the list of registered PCI drivers,
* gives it a chance to clean up by calling its remove() function for
* each device it was responsible for, and marks those devices as
* driverless.
*/
-void
-pci_unregister_driver(struct pci_driver *drv)
+void pci_unregister_driver(struct pci_driver *drv)
{
- pci_remove_removeid_file(drv);
- pci_remove_newid_file(drv);
driver_unregister(&drv->driver);
pci_free_dynids(drv);
}
+EXPORT_SYMBOL(pci_unregister_driver);
static struct pci_driver pci_compat_driver = {
.name = "compat"
@@ -1162,28 +1283,28 @@ static struct pci_driver pci_compat_driver = {
* pci_dev_driver - get the pci_driver of a device
* @dev: the device to query
*
- * Returns the appropriate pci_driver structure or %NULL if there is no
+ * Returns the appropriate pci_driver structure or %NULL if there is no
* registered driver for the device.
*/
-struct pci_driver *
-pci_dev_driver(const struct pci_dev *dev)
+struct pci_driver *pci_dev_driver(const struct pci_dev *dev)
{
if (dev->driver)
return dev->driver;
else {
int i;
- for(i=0; i<=PCI_ROM_RESOURCE; i++)
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++)
if (dev->resource[i].flags & IORESOURCE_BUSY)
return &pci_compat_driver;
}
return NULL;
}
+EXPORT_SYMBOL(pci_dev_driver);
/**
* pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure
* @dev: the PCI device structure to match against
* @drv: the device driver to search for matching PCI device id structures
- *
+ *
* Used by a driver to check whether a PCI device present in the
* system is in its list of supported devices. Returns the matching
* pci_device_id structure or %NULL if there is no match.
@@ -1191,9 +1312,13 @@ pci_dev_driver(const struct pci_dev *dev)
static int pci_bus_match(struct device *dev, struct device_driver *drv)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *pci_drv = to_pci_driver(drv);
+ struct pci_driver *pci_drv;
const struct pci_device_id *found_id;
+ if (!pci_dev->match_driver)
+ return 0;
+
+ pci_drv = to_pci_driver(drv);
found_id = pci_match_device(pci_drv, pci_dev);
if (found_id)
return 1;
@@ -1219,6 +1344,7 @@ struct pci_dev *pci_dev_get(struct pci_dev *dev)
get_device(&dev->dev);
return dev;
}
+EXPORT_SYMBOL(pci_dev_get);
/**
* pci_dev_put - release a use of the pci device structure
@@ -1232,13 +1358,39 @@ void pci_dev_put(struct pci_dev *dev)
if (dev)
put_device(&dev->dev);
}
+EXPORT_SYMBOL(pci_dev_put);
-#ifndef CONFIG_HOTPLUG
-int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- return -ENODEV;
+ struct pci_dev *pdev;
+
+ if (!dev)
+ return -ENODEV;
+
+ pdev = to_pci_dev(dev);
+
+ if (add_uevent_var(env, "PCI_CLASS=%04X", pdev->class))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor,
+ pdev->subsystem_device))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x",
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device,
+ (u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
+ (u8)(pdev->class)))
+ return -ENOMEM;
+
+ return 0;
}
-#endif
struct bus_type pci_bus_type = {
.name = "pci",
@@ -1247,23 +1399,15 @@ struct bus_type pci_bus_type = {
.probe = pci_device_probe,
.remove = pci_device_remove,
.shutdown = pci_device_shutdown,
- .dev_attrs = pci_dev_attrs,
- .bus_attrs = pci_bus_attrs,
+ .dev_groups = pci_dev_groups,
+ .bus_groups = pci_bus_groups,
+ .drv_groups = pci_drv_groups,
.pm = PCI_PM_OPS_PTR,
};
+EXPORT_SYMBOL(pci_bus_type);
static int __init pci_driver_init(void)
{
return bus_register(&pci_bus_type);
}
-
postcore_initcall(pci_driver_init);
-
-EXPORT_SYMBOL_GPL(pci_add_dynid);
-EXPORT_SYMBOL(pci_match_id);
-EXPORT_SYMBOL(__pci_register_driver);
-EXPORT_SYMBOL(pci_unregister_driver);
-EXPORT_SYMBOL(pci_dev_driver);
-EXPORT_SYMBOL(pci_bus_type);
-EXPORT_SYMBOL(pci_dev_get);
-EXPORT_SYMBOL(pci_dev_put);
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 90c0a729cd3..a3fbe2012ea 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -5,6 +5,13 @@
* by Narendra K <Narendra_K@dell.com>,
* Jordan Hargrave <Jordan_Hargrave@dell.com>
*
+ * PCI Firmware Specification Revision 3.1 section 4.6.7 (DSM for Naming a
+ * PCI or PCI Express Device Under Operating Systems) defines an instance
+ * number and string name. This code retrieves them and exports them to sysfs.
+ * If the system firmware does not provide the ACPI _DSM (Device Specific
+ * Method), then the SMBIOS type 41 instance number and string is exported to
+ * sysfs.
+ *
* SMBIOS defines type 41 for onboard pci devices. This code retrieves
* the instance number and string from the type 41 record and exports
* it to sysfs.
@@ -19,17 +26,22 @@
#include <linux/pci_ids.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/nls.h>
+#include <linux/acpi.h>
+#include <linux/pci-acpi.h>
#include "pci.h"
+#define DEVICE_LABEL_DSM 0x07
+
+#ifdef CONFIG_DMI
enum smbios_attr_enum {
SMBIOS_ATTR_NONE = 0,
SMBIOS_ATTR_LABEL_SHOW,
SMBIOS_ATTR_INSTANCE_SHOW,
};
-static mode_t
-find_smbios_instance_string(struct pci_dev *pdev, char *buf,
- enum smbios_attr_enum attribute)
+static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf,
+ enum smbios_attr_enum attribute)
{
const struct dmi_device *dmi;
struct dmi_dev_onboard *donboard;
@@ -61,9 +73,8 @@ find_smbios_instance_string(struct pci_dev *pdev, char *buf,
return 0;
}
-static mode_t
-smbios_instance_string_exist(struct kobject *kobj, struct attribute *attr,
- int n)
+static umode_t smbios_instance_string_exist(struct kobject *kobj,
+ struct attribute *attr, int n)
{
struct device *dev;
struct pci_dev *pdev;
@@ -75,8 +86,8 @@ smbios_instance_string_exist(struct kobject *kobj, struct attribute *attr,
S_IRUGO : 0;
}
-static ssize_t
-smbioslabel_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t smbioslabel_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
pdev = to_pci_dev(dev);
@@ -85,9 +96,8 @@ smbioslabel_show(struct device *dev, struct device_attribute *attr, char *buf)
SMBIOS_ATTR_LABEL_SHOW);
}
-static ssize_t
-smbiosinstance_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t smbiosinstance_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
pdev = to_pci_dev(dev);
@@ -117,27 +127,181 @@ static struct attribute_group smbios_attr_group = {
.is_visible = smbios_instance_string_exist,
};
-static int
-pci_create_smbiosname_file(struct pci_dev *pdev)
+static int pci_create_smbiosname_file(struct pci_dev *pdev)
{
- if (!sysfs_create_group(&pdev->dev.kobj, &smbios_attr_group))
- return 0;
- return -ENODEV;
+ return sysfs_create_group(&pdev->dev.kobj, &smbios_attr_group);
}
-static void
-pci_remove_smbiosname_file(struct pci_dev *pdev)
+static void pci_remove_smbiosname_file(struct pci_dev *pdev)
{
sysfs_remove_group(&pdev->dev.kobj, &smbios_attr_group);
}
+#else
+static inline int pci_create_smbiosname_file(struct pci_dev *pdev)
+{
+ return -1;
+}
+
+static inline void pci_remove_smbiosname_file(struct pci_dev *pdev)
+{
+}
+#endif
+
+#ifdef CONFIG_ACPI
+static const char device_label_dsm_uuid[] = {
+ 0xD0, 0x37, 0xC9, 0xE5, 0x53, 0x35, 0x7A, 0x4D,
+ 0x91, 0x17, 0xEA, 0x4D, 0x19, 0xC3, 0x43, 0x4D
+};
+
+enum acpi_attr_enum {
+ ACPI_ATTR_LABEL_SHOW,
+ ACPI_ATTR_INDEX_SHOW,
+};
+
+static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
+{
+ int len;
+ len = utf16s_to_utf8s((const wchar_t *)obj->string.pointer,
+ obj->string.length,
+ UTF16_LITTLE_ENDIAN,
+ buf, PAGE_SIZE);
+ buf[len] = '\n';
+}
+
+static int dsm_get_label(struct device *dev, char *buf,
+ enum acpi_attr_enum attr)
+{
+ acpi_handle handle;
+ union acpi_object *obj, *tmp;
+ int len = -1;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
+ return -1;
+
+ obj = acpi_evaluate_dsm(handle, device_label_dsm_uuid, 0x2,
+ DEVICE_LABEL_DSM, NULL);
+ if (!obj)
+ return -1;
+
+ tmp = obj->package.elements;
+ if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 2 &&
+ tmp[0].type == ACPI_TYPE_INTEGER &&
+ tmp[1].type == ACPI_TYPE_STRING) {
+ /*
+ * The second string element is optional even when
+ * this _DSM is implemented; when not implemented,
+ * this entry must return a null string.
+ */
+ if (attr == ACPI_ATTR_INDEX_SHOW)
+ scnprintf(buf, PAGE_SIZE, "%llu\n", tmp->integer.value);
+ else if (attr == ACPI_ATTR_LABEL_SHOW)
+ dsm_label_utf16s_to_utf8s(tmp + 1, buf);
+ len = strlen(buf) > 0 ? strlen(buf) : -1;
+ }
+
+ ACPI_FREE(obj);
+
+ return len;
+}
+
+static bool device_has_dsm(struct device *dev)
+{
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
+ return false;
+
+ return !!acpi_check_dsm(handle, device_label_dsm_uuid, 0x2,
+ 1 << DEVICE_LABEL_DSM);
+}
+
+static umode_t acpi_index_string_exist(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev;
+
+ dev = container_of(kobj, struct device, kobj);
+
+ if (device_has_dsm(dev))
+ return S_IRUGO;
+
+ return 0;
+}
+
+static ssize_t acpilabel_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return dsm_get_label(dev, buf, ACPI_ATTR_LABEL_SHOW);
+}
+
+static ssize_t acpiindex_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return dsm_get_label(dev, buf, ACPI_ATTR_INDEX_SHOW);
+}
+
+static struct device_attribute acpi_attr_label = {
+ .attr = {.name = "label", .mode = 0444},
+ .show = acpilabel_show,
+};
+
+static struct device_attribute acpi_attr_index = {
+ .attr = {.name = "acpi_index", .mode = 0444},
+ .show = acpiindex_show,
+};
+
+static struct attribute *acpi_attributes[] = {
+ &acpi_attr_label.attr,
+ &acpi_attr_index.attr,
+ NULL,
+};
+
+static struct attribute_group acpi_attr_group = {
+ .attrs = acpi_attributes,
+ .is_visible = acpi_index_string_exist,
+};
+
+static int pci_create_acpi_index_label_files(struct pci_dev *pdev)
+{
+ return sysfs_create_group(&pdev->dev.kobj, &acpi_attr_group);
+}
+
+static int pci_remove_acpi_index_label_files(struct pci_dev *pdev)
+{
+ sysfs_remove_group(&pdev->dev.kobj, &acpi_attr_group);
+ return 0;
+}
+#else
+static inline int pci_create_acpi_index_label_files(struct pci_dev *pdev)
+{
+ return -1;
+}
+
+static inline int pci_remove_acpi_index_label_files(struct pci_dev *pdev)
+{
+ return -1;
+}
+
+static inline bool device_has_dsm(struct device *dev)
+{
+ return false;
+}
+#endif
void pci_create_firmware_label_files(struct pci_dev *pdev)
{
- if (!pci_create_smbiosname_file(pdev))
- ;
+ if (device_has_dsm(&pdev->dev))
+ pci_create_acpi_index_label_files(pdev);
+ else
+ pci_create_smbiosname_file(pdev);
}
void pci_remove_firmware_label_files(struct pci_dev *pdev)
{
- pci_remove_smbiosname_file(pdev);
+ if (device_has_dsm(&pdev->dev))
+ pci_remove_acpi_index_label_files(pdev);
+ else
+ pci_remove_smbiosname_file(pdev);
}
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index f7b68ca6cc9..886fb357027 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -2,13 +2,13 @@
*
* Copyright (C) 2008 Red Hat, Inc.
* Author:
- * Chris Wright
+ * Chris Wright
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* Usage is simple, allocate a new id to the stub driver and bind the
* device to it. For example:
- *
+ *
* # echo "8086 10f5" > /sys/bus/pci/drivers/pci-stub/new_id
* # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind
* # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/pci-stub/bind
@@ -28,7 +28,7 @@ MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the stub driver, format is "
static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- dev_printk(KERN_INFO, &dev->dev, "claimed by stub\n");
+ dev_info(&dev->dev, "claimed by stub\n");
return 0;
}
@@ -47,13 +47,20 @@ static int __init pci_stub_init(void)
if (rc)
return rc;
+ /* no ids passed actually */
+ if (ids[0] == '\0')
+ return 0;
+
/* add ids specified in the module parameter */
p = ids;
while ((id = strsep(&p, ","))) {
unsigned int vendor, device, subvendor = PCI_ANY_ID,
- subdevice = PCI_ANY_ID, class=0, class_mask=0;
+ subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
int fields;
+ if (!strlen(id))
+ continue;
+
fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
&vendor, &device, &subvendor, &subdevice,
&class, &class_mask);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index b5a7d9bfcb2..9ff0a901ecf 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -10,7 +10,7 @@
*
* File attributes for PCI devices
*
- * Modeled after usb's driverfs.c
+ * Modeled after usb's driverfs.c
*
*/
@@ -19,12 +19,17 @@
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/stat.h>
+#include <linux/export.h>
#include <linux/topology.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/capability.h>
+#include <linux/security.h>
#include <linux/pci-aspm.h>
#include <linux/slab.h>
+#include <linux/vgaarb.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
#include "pci.h"
static int sysfs_initialized; /* = 0 */
@@ -36,9 +41,10 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct pci_dev *pdev; \
\
- pdev = to_pci_dev (dev); \
- return sprintf (buf, format_string, pdev->field); \
-}
+ pdev = to_pci_dev(dev); \
+ return sprintf(buf, format_string, pdev->field); \
+} \
+static DEVICE_ATTR_RO(field)
pci_config_attr(vendor, "0x%04x\n");
pci_config_attr(device, "0x%04x\n");
@@ -52,7 +58,7 @@ static ssize_t broken_parity_status_show(struct device *dev,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf (buf, "%u\n", pdev->broken_parity_status);
+ return sprintf(buf, "%u\n", pdev->broken_parity_status);
}
static ssize_t broken_parity_status_store(struct device *dev,
@@ -62,17 +68,18 @@ static ssize_t broken_parity_status_store(struct device *dev,
struct pci_dev *pdev = to_pci_dev(dev);
unsigned long val;
- if (strict_strtoul(buf, 0, &val) < 0)
+ if (kstrtoul(buf, 0, &val) < 0)
return -EINVAL;
pdev->broken_parity_status = !!val;
return count;
}
+static DEVICE_ATTR_RW(broken_parity_status);
-static ssize_t local_cpus_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
+static ssize_t pci_dev_show_local_cpu(struct device *dev, int type,
+ struct device_attribute *attr, char *buf)
+{
const struct cpumask *mask;
int len;
@@ -82,37 +89,68 @@ static ssize_t local_cpus_show(struct device *dev,
#else
mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
#endif
- len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
+ len = type ?
+ cpumask_scnprintf(buf, PAGE_SIZE-2, mask) :
+ cpulist_scnprintf(buf, PAGE_SIZE-2, mask);
+
buf[len++] = '\n';
buf[len] = '\0';
return len;
}
+static ssize_t local_cpus_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return pci_dev_show_local_cpu(dev, 1, attr, buf);
+}
+static DEVICE_ATTR_RO(local_cpus);
static ssize_t local_cpulist_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- const struct cpumask *mask;
- int len;
+ return pci_dev_show_local_cpu(dev, 0, attr, buf);
+}
+static DEVICE_ATTR_RO(local_cpulist);
-#ifdef CONFIG_NUMA
- mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
- cpumask_of_node(dev_to_node(dev));
-#else
- mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
-#endif
- len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask);
- buf[len++] = '\n';
- buf[len] = '\0';
- return len;
+/*
+ * PCI Bus Class Devices
+ */
+static ssize_t pci_bus_show_cpuaffinity(struct device *dev, int type,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ const struct cpumask *cpumask;
+
+ cpumask = cpumask_of_pcibus(to_pci_bus(dev));
+ ret = type ?
+ cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask) :
+ cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask);
+ buf[ret++] = '\n';
+ buf[ret] = '\0';
+ return ret;
+}
+
+static ssize_t cpuaffinity_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
}
+static DEVICE_ATTR_RO(cpuaffinity);
+
+static ssize_t cpulistaffinity_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return pci_bus_show_cpuaffinity(dev, 1, attr, buf);
+}
+static DEVICE_ATTR_RO(cpulistaffinity);
/* show resources */
-static ssize_t
-resource_show(struct device * dev, struct device_attribute *attr, char * buf)
+static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct pci_dev * pci_dev = to_pci_dev(dev);
- char * str = buf;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ char *str = buf;
int i;
int max;
resource_size_t start, end;
@@ -125,15 +163,17 @@ resource_show(struct device * dev, struct device_attribute *attr, char * buf)
for (i = 0; i < max; i++) {
struct resource *res = &pci_dev->resource[i];
pci_resource_to_user(pci_dev, i, res, &start, &end);
- str += sprintf(str,"0x%016llx 0x%016llx 0x%016llx\n",
+ str += sprintf(str, "0x%016llx 0x%016llx 0x%016llx\n",
(unsigned long long)start,
(unsigned long long)end,
(unsigned long long)res->flags);
}
return (str - buf);
}
+static DEVICE_ATTR_RO(resource);
-static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -143,14 +183,14 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
(u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
(u8)(pci_dev->class));
}
+static DEVICE_ATTR_RO(modalias);
-static ssize_t is_enabled_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t enabled_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
unsigned long val;
- ssize_t result = strict_strtoul(buf, 0, &val);
+ ssize_t result = kstrtoul(buf, 0, &val);
if (result < 0)
return result;
@@ -170,67 +210,74 @@ static ssize_t is_enabled_store(struct device *dev,
return result < 0 ? result : count;
}
-static ssize_t is_enabled_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct pci_dev *pdev;
- pdev = to_pci_dev (dev);
- return sprintf (buf, "%u\n", atomic_read(&pdev->enable_cnt));
+ pdev = to_pci_dev(dev);
+ return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt));
}
+static DEVICE_ATTR_RW(enabled);
#ifdef CONFIG_NUMA
-static ssize_t
-numa_node_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- return sprintf (buf, "%d\n", dev->numa_node);
+ return sprintf(buf, "%d\n", dev->numa_node);
}
+static DEVICE_ATTR_RO(numa_node);
#endif
-static ssize_t
-dma_mask_bits_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t dma_mask_bits_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf (buf, "%d\n", fls64(pdev->dma_mask));
+ return sprintf(buf, "%d\n", fls64(pdev->dma_mask));
}
+static DEVICE_ATTR_RO(dma_mask_bits);
-static ssize_t
-consistent_dma_mask_bits_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t consistent_dma_mask_bits_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- return sprintf (buf, "%d\n", fls64(dev->coherent_dma_mask));
+ return sprintf(buf, "%d\n", fls64(dev->coherent_dma_mask));
}
+static DEVICE_ATTR_RO(consistent_dma_mask_bits);
-static ssize_t
-msi_bus_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
if (!pdev->subordinate)
return 0;
- return sprintf (buf, "%u\n",
- !(pdev->subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI));
+ return sprintf(buf, "%u\n",
+ !(pdev->subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI));
}
-static ssize_t
-msi_bus_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
unsigned long val;
- if (strict_strtoul(buf, 0, &val) < 0)
+ if (kstrtoul(buf, 0, &val) < 0)
return -EINVAL;
- /* bad things may happen if the no_msi flag is changed
- * while some drivers are loaded */
+ /*
+ * Bad things may happen if the no_msi flag is changed
+ * while drivers are loaded.
+ */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- /* Maybe pci devices without subordinate busses shouldn't even have this
- * attribute in the first place? */
+ /*
+ * Maybe devices without subordinate buses shouldn't have this
+ * attribute in the first place?
+ */
if (!pdev->subordinate)
return count;
@@ -239,140 +286,352 @@ msi_bus_store(struct device *dev, struct device_attribute *attr,
!!val) {
pdev->subordinate->bus_flags ^= PCI_BUS_FLAGS_NO_MSI;
- dev_warn(&pdev->dev, "forced subordinate bus to%s support MSI,"
- " bad things could happen\n", val ? "" : " not");
+ dev_warn(&pdev->dev, "forced subordinate bus to%s support MSI, bad things could happen\n",
+ val ? "" : " not");
}
return count;
}
+static DEVICE_ATTR_RW(msi_bus);
-#ifdef CONFIG_HOTPLUG
-static DEFINE_MUTEX(pci_remove_rescan_mutex);
static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf,
size_t count)
{
unsigned long val;
struct pci_bus *b = NULL;
- if (strict_strtoul(buf, 0, &val) < 0)
+ if (kstrtoul(buf, 0, &val) < 0)
return -EINVAL;
if (val) {
- mutex_lock(&pci_remove_rescan_mutex);
+ pci_lock_rescan_remove();
while ((b = pci_find_next_bus(b)) != NULL)
pci_rescan_bus(b);
- mutex_unlock(&pci_remove_rescan_mutex);
+ pci_unlock_rescan_remove();
}
return count;
}
+static BUS_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store);
+
+static struct attribute *pci_bus_attrs[] = {
+ &bus_attr_rescan.attr,
+ NULL,
+};
+
+static const struct attribute_group pci_bus_group = {
+ .attrs = pci_bus_attrs,
+};
-struct bus_attribute pci_bus_attrs[] = {
- __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store),
- __ATTR_NULL
+const struct attribute_group *pci_bus_groups[] = {
+ &pci_bus_group,
+ NULL,
};
-static ssize_t
-dev_rescan_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t dev_rescan_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
unsigned long val;
struct pci_dev *pdev = to_pci_dev(dev);
- if (strict_strtoul(buf, 0, &val) < 0)
+ if (kstrtoul(buf, 0, &val) < 0)
return -EINVAL;
if (val) {
- mutex_lock(&pci_remove_rescan_mutex);
+ pci_lock_rescan_remove();
pci_rescan_bus(pdev->bus);
- mutex_unlock(&pci_remove_rescan_mutex);
+ pci_unlock_rescan_remove();
}
return count;
}
+static struct device_attribute dev_rescan_attr = __ATTR(rescan,
+ (S_IWUSR|S_IWGRP),
+ NULL, dev_rescan_store);
-static void remove_callback(struct device *dev)
+static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct pci_dev *pdev = to_pci_dev(dev);
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
- mutex_lock(&pci_remove_rescan_mutex);
- pci_remove_bus_device(pdev);
- mutex_unlock(&pci_remove_rescan_mutex);
+ if (val && device_remove_file_self(dev, attr))
+ pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
+ return count;
}
+static struct device_attribute dev_remove_attr = __ATTR(remove,
+ (S_IWUSR|S_IWGRP),
+ NULL, remove_store);
-static ssize_t
-remove_store(struct device *dev, struct device_attribute *dummy,
- const char *buf, size_t count)
+static ssize_t dev_bus_rescan_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- int ret = 0;
unsigned long val;
+ struct pci_bus *bus = to_pci_bus(dev);
- if (strict_strtoul(buf, 0, &val) < 0)
+ if (kstrtoul(buf, 0, &val) < 0)
return -EINVAL;
- /* An attribute cannot be unregistered by one of its own methods,
- * so we have to use this roundabout approach.
- */
- if (val)
- ret = device_schedule_callback(dev, remove_callback);
- if (ret)
- count = ret;
+ if (val) {
+ pci_lock_rescan_remove();
+ if (!pci_is_root_bus(bus) && list_empty(&bus->devices))
+ pci_rescan_bus_bridge_resize(bus->self);
+ else
+ pci_rescan_bus(bus);
+ pci_unlock_rescan_remove();
+ }
return count;
}
+static DEVICE_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store);
+
+#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
+static ssize_t d3cold_allowed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ pdev->d3cold_allowed = !!val;
+ pm_runtime_resume(dev);
+
+ return count;
+}
+
+static ssize_t d3cold_allowed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ return sprintf(buf, "%u\n", pdev->d3cold_allowed);
+}
+static DEVICE_ATTR_RW(d3cold_allowed);
#endif
-struct device_attribute pci_dev_attrs[] = {
- __ATTR_RO(resource),
- __ATTR_RO(vendor),
- __ATTR_RO(device),
- __ATTR_RO(subsystem_vendor),
- __ATTR_RO(subsystem_device),
- __ATTR_RO(class),
- __ATTR_RO(irq),
- __ATTR_RO(local_cpus),
- __ATTR_RO(local_cpulist),
- __ATTR_RO(modalias),
+#ifdef CONFIG_OF
+static ssize_t devspec_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct device_node *np = pci_device_to_OF_node(pdev);
+
+ if (np == NULL || np->full_name == NULL)
+ return 0;
+ return sprintf(buf, "%s", np->full_name);
+}
+static DEVICE_ATTR_RO(devspec);
+#endif
+
+#ifdef CONFIG_PCI_IOV
+static ssize_t sriov_totalvfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%u\n", pci_sriov_get_totalvfs(pdev));
+}
+
+
+static ssize_t sriov_numvfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%u\n", pdev->sriov->num_VFs);
+}
+
+/*
+ * num_vfs > 0; number of VFs to enable
+ * num_vfs = 0; disable all VFs
+ *
+ * Note: SRIOV spec doesn't allow partial VF
+ * disable, so it's all or none.
+ */
+static ssize_t sriov_numvfs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+ u16 num_vfs;
+
+ ret = kstrtou16(buf, 0, &num_vfs);
+ if (ret < 0)
+ return ret;
+
+ if (num_vfs > pci_sriov_get_totalvfs(pdev))
+ return -ERANGE;
+
+ if (num_vfs == pdev->sriov->num_VFs)
+ return count; /* no change */
+
+ /* is PF driver loaded w/callback */
+ if (!pdev->driver || !pdev->driver->sriov_configure) {
+ dev_info(&pdev->dev, "Driver doesn't support SRIOV configuration via sysfs\n");
+ return -ENOSYS;
+ }
+
+ if (num_vfs == 0) {
+ /* disable VFs */
+ ret = pdev->driver->sriov_configure(pdev, 0);
+ if (ret < 0)
+ return ret;
+ return count;
+ }
+
+ /* enable VFs */
+ if (pdev->sriov->num_VFs) {
+ dev_warn(&pdev->dev, "%d VFs already enabled. Disable before enabling %d VFs\n",
+ pdev->sriov->num_VFs, num_vfs);
+ return -EBUSY;
+ }
+
+ ret = pdev->driver->sriov_configure(pdev, num_vfs);
+ if (ret < 0)
+ return ret;
+
+ if (ret != num_vfs)
+ dev_warn(&pdev->dev, "%d VFs requested; only %d enabled\n",
+ num_vfs, ret);
+
+ return count;
+}
+
+static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs);
+static struct device_attribute sriov_numvfs_attr =
+ __ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP),
+ sriov_numvfs_show, sriov_numvfs_store);
+#endif /* CONFIG_PCI_IOV */
+
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ char *driver_override, *old = pdev->driver_override, *cp;
+
+ if (count > PATH_MAX)
+ return -EINVAL;
+
+ driver_override = kstrndup(buf, count, GFP_KERNEL);
+ if (!driver_override)
+ return -ENOMEM;
+
+ cp = strchr(driver_override, '\n');
+ if (cp)
+ *cp = '\0';
+
+ if (strlen(driver_override)) {
+ pdev->driver_override = driver_override;
+ } else {
+ kfree(driver_override);
+ pdev->driver_override = NULL;
+ }
+
+ kfree(old);
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%s\n", pdev->driver_override);
+}
+static DEVICE_ATTR_RW(driver_override);
+
+static struct attribute *pci_dev_attrs[] = {
+ &dev_attr_resource.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_device.attr,
+ &dev_attr_subsystem_vendor.attr,
+ &dev_attr_subsystem_device.attr,
+ &dev_attr_class.attr,
+ &dev_attr_irq.attr,
+ &dev_attr_local_cpus.attr,
+ &dev_attr_local_cpulist.attr,
+ &dev_attr_modalias.attr,
#ifdef CONFIG_NUMA
- __ATTR_RO(numa_node),
+ &dev_attr_numa_node.attr,
#endif
- __ATTR_RO(dma_mask_bits),
- __ATTR_RO(consistent_dma_mask_bits),
- __ATTR(enable, 0600, is_enabled_show, is_enabled_store),
- __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR),
- broken_parity_status_show,broken_parity_status_store),
- __ATTR(msi_bus, 0644, msi_bus_show, msi_bus_store),
-#ifdef CONFIG_HOTPLUG
- __ATTR(remove, (S_IWUSR|S_IWGRP), NULL, remove_store),
- __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_rescan_store),
+ &dev_attr_dma_mask_bits.attr,
+ &dev_attr_consistent_dma_mask_bits.attr,
+ &dev_attr_enabled.attr,
+ &dev_attr_broken_parity_status.attr,
+ &dev_attr_msi_bus.attr,
+#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
+ &dev_attr_d3cold_allowed.attr,
#endif
- __ATTR_NULL,
+#ifdef CONFIG_OF
+ &dev_attr_devspec.attr,
+#endif
+ &dev_attr_driver_override.attr,
+ NULL,
+};
+
+static const struct attribute_group pci_dev_group = {
+ .attrs = pci_dev_attrs,
+};
+
+const struct attribute_group *pci_dev_groups[] = {
+ &pci_dev_group,
+ NULL,
+};
+
+static struct attribute *pcibus_attrs[] = {
+ &dev_attr_rescan.attr,
+ &dev_attr_cpuaffinity.attr,
+ &dev_attr_cpulistaffinity.attr,
+ NULL,
+};
+
+static const struct attribute_group pcibus_group = {
+ .attrs = pcibus_attrs,
};
-static ssize_t
-boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
+const struct attribute_group *pcibus_groups[] = {
+ &pcibus_group,
+ NULL,
+};
+
+static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *vga_dev = vga_default_device();
+
+ if (vga_dev)
+ return sprintf(buf, "%u\n", (pdev == vga_dev));
return sprintf(buf, "%u\n",
!!(pdev->resource[PCI_ROM_RESOURCE].flags &
IORESOURCE_ROM_SHADOW));
}
-struct device_attribute vga_attr = __ATTR_RO(boot_vga);
+static struct device_attribute vga_attr = __ATTR_RO(boot_vga);
-static ssize_t
-pci_read_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
- struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
+ struct pci_dev *dev = to_pci_dev(container_of(kobj, struct device,
+ kobj));
unsigned int size = 64;
loff_t init_off = off;
- u8 *data = (u8*) buf;
+ u8 *data = (u8 *) buf;
/* Several chips lock up trying to read undefined config space */
- if (cap_raised(filp->f_cred->cap_effective, CAP_SYS_ADMIN)) {
+ if (security_capable(filp->f_cred, &init_user_ns, CAP_SYS_ADMIN) == 0)
size = dev->cfg_size;
- } else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
+ else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
size = 128;
- }
if (off > size)
return 0;
@@ -383,6 +642,8 @@ pci_read_config(struct file *filp, struct kobject *kobj,
size = count;
}
+ pci_config_pm_runtime_get(dev);
+
if ((off & 1) && size) {
u8 val;
pci_user_read_config_byte(dev, off, &val);
@@ -428,18 +689,20 @@ pci_read_config(struct file *filp, struct kobject *kobj,
--size;
}
+ pci_config_pm_runtime_put(dev);
+
return count;
}
-static ssize_t
-pci_write_config(struct file* filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
- struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
+ struct pci_dev *dev = to_pci_dev(container_of(kobj, struct device,
+ kobj));
unsigned int size = count;
loff_t init_off = off;
- u8 *data = (u8*) buf;
+ u8 *data = (u8 *) buf;
if (off > dev->cfg_size)
return 0;
@@ -447,20 +710,22 @@ pci_write_config(struct file* filp, struct kobject *kobj,
size = dev->cfg_size - off;
count = size;
}
-
+
+ pci_config_pm_runtime_get(dev);
+
if ((off & 1) && size) {
pci_user_write_config_byte(dev, off, data[off - init_off]);
off++;
size--;
}
-
+
if ((off & 3) && size > 2) {
u16 val = data[off - init_off];
val |= (u16) data[off - init_off + 1] << 8;
- pci_user_write_config_word(dev, off, val);
- off += 2;
- size -= 2;
- }
+ pci_user_write_config_word(dev, off, val);
+ off += 2;
+ size -= 2;
+ }
while (size > 3) {
u32 val = data[off - init_off];
@@ -471,7 +736,7 @@ pci_write_config(struct file* filp, struct kobject *kobj,
off += 4;
size -= 4;
}
-
+
if (size >= 2) {
u16 val = data[off - init_off];
val |= (u16) data[off - init_off + 1] << 8;
@@ -486,13 +751,14 @@ pci_write_config(struct file* filp, struct kobject *kobj,
--size;
}
+ pci_config_pm_runtime_put(dev);
+
return count;
}
-static ssize_t
-read_vpd_attr(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t read_vpd_attr(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct pci_dev *dev =
to_pci_dev(container_of(kobj, struct device, kobj));
@@ -505,10 +771,9 @@ read_vpd_attr(struct file *filp, struct kobject *kobj,
return pci_read_vpd(dev, off, count, buf);
}
-static ssize_t
-write_vpd_attr(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t write_vpd_attr(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct pci_dev *dev =
to_pci_dev(container_of(kobj, struct device, kobj));
@@ -534,20 +799,18 @@ write_vpd_attr(struct file *filp, struct kobject *kobj,
* Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific
* callback routine (pci_legacy_read).
*/
-static ssize_t
-pci_read_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
- struct pci_bus *bus = to_pci_bus(container_of(kobj,
- struct device,
+ struct pci_bus *bus = to_pci_bus(container_of(kobj, struct device,
kobj));
- /* Only support 1, 2 or 4 byte accesses */
- if (count != 1 && count != 2 && count != 4)
- return -EINVAL;
+ /* Only support 1, 2 or 4 byte accesses */
+ if (count != 1 && count != 2 && count != 4)
+ return -EINVAL;
- return pci_legacy_read(bus, off, (u32 *)buf, count);
+ return pci_legacy_read(bus, off, (u32 *)buf, count);
}
/**
@@ -562,19 +825,18 @@ pci_read_legacy_io(struct file *filp, struct kobject *kobj,
* Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific
* callback routine (pci_legacy_write).
*/
-static ssize_t
-pci_write_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
- struct pci_bus *bus = to_pci_bus(container_of(kobj,
- struct device,
+ struct pci_bus *bus = to_pci_bus(container_of(kobj, struct device,
kobj));
- /* Only support 1, 2 or 4 byte accesses */
- if (count != 1 && count != 2 && count != 4)
- return -EINVAL;
- return pci_legacy_write(bus, off, *(u32 *)buf, count);
+ /* Only support 1, 2 or 4 byte accesses */
+ if (count != 1 && count != 2 && count != 4)
+ return -EINVAL;
+
+ return pci_legacy_write(bus, off, *(u32 *)buf, count);
}
/**
@@ -588,16 +850,14 @@ pci_write_legacy_io(struct file *filp, struct kobject *kobj,
* legacy memory space (first meg of bus space) into application virtual
* memory space.
*/
-static int
-pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- struct vm_area_struct *vma)
+static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
{
- struct pci_bus *bus = to_pci_bus(container_of(kobj,
- struct device,
+ struct pci_bus *bus = to_pci_bus(container_of(kobj, struct device,
kobj));
- return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
+ return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
}
/**
@@ -611,16 +871,14 @@ pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
* legacy IO space (first meg of bus space) into application virtual
* memory space. Returns -ENOSYS if the operation isn't supported
*/
-static int
-pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- struct vm_area_struct *vma)
+static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
{
- struct pci_bus *bus = to_pci_bus(container_of(kobj,
- struct device,
+ struct pci_bus *bus = to_pci_bus(container_of(kobj, struct device,
kobj));
- return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
+ return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
}
/**
@@ -630,10 +888,9 @@ pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
*
* Stub implementation. Can be overridden by arch if necessary.
*/
-void __weak
-pci_adjust_legacy_attr(struct pci_bus *b, enum pci_mmap_state mmap_type)
+void __weak pci_adjust_legacy_attr(struct pci_bus *b,
+ enum pci_mmap_state mmap_type)
{
- return;
}
/**
@@ -644,7 +901,7 @@ pci_adjust_legacy_attr(struct pci_bus *b, enum pci_mmap_state mmap_type)
* a per-bus basis. This routine creates the files and ties them into
* their associated read, write and mmap files from pci-sysfs.c
*
- * On error unwind, but don't propogate the error to the caller
+ * On error unwind, but don't propagate the error to the caller
* as it is ok to set up the PCI bus without these files.
*/
void pci_create_legacy_files(struct pci_bus *b)
@@ -688,8 +945,7 @@ legacy_io_err:
kfree(b->legacy_io);
b->legacy_io = NULL;
kzalloc_err:
- printk(KERN_WARNING "pci: warning: could not create legacy I/O port "
- "and ISA memory resources to sysfs\n");
+ printk(KERN_WARNING "pci: warning: could not create legacy I/O port and ISA memory resources to sysfs\n");
return;
}
@@ -705,17 +961,21 @@ void pci_remove_legacy_files(struct pci_bus *b)
#ifdef HAVE_PCI_MMAP
-int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma)
+int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
+ enum pci_mmap_api mmap_api)
{
- unsigned long nr, start, size;
+ unsigned long nr, start, size, pci_start;
- nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ if (pci_resource_len(pdev, resno) == 0)
+ return 0;
+ nr = vma_pages(vma);
start = vma->vm_pgoff;
size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
- if (start < size && size - start >= nr)
+ pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
+ pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
+ if (start >= pci_start && start < pci_start + size &&
+ start + nr <= pci_start + size)
return 1;
- WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n",
- current->comm, start, start+nr, pci_name(pdev), resno, size);
return 0;
}
@@ -728,9 +988,8 @@ int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma)
*
* Use the regular PCI mapping routines to map a PCI resource into userspace.
*/
-static int
-pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
- struct vm_area_struct *vma, int write_combine)
+static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
+ struct vm_area_struct *vma, int write_combine)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj,
struct device, kobj));
@@ -745,8 +1004,14 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
- if (!pci_mmap_fits(pdev, i, vma))
+ if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) {
+ WARN(1, "process \"%s\" tried to map 0x%08lx bytes at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
+ current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff,
+ pci_name(pdev), i,
+ (u64)pci_resource_start(pdev, i),
+ (u64)pci_resource_len(pdev, i));
return -EINVAL;
+ }
/* pci_mmap_page_range() expects the same kind of entry as coming
* from /proc/bus/pci/ which is a "user visible" value. If this is
@@ -762,26 +1027,23 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
return pci_mmap_page_range(pdev, vma, mmap_type, write_combine);
}
-static int
-pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- struct vm_area_struct *vma)
+static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 0);
}
-static int
-pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- struct vm_area_struct *vma)
+static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 1);
}
-static ssize_t
-pci_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count, bool write)
+static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count, bool write)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj,
struct device, kobj));
@@ -826,18 +1088,16 @@ pci_resource_io(struct file *filp, struct kobject *kobj,
return -EINVAL;
}
-static ssize_t
-pci_read_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
return pci_resource_io(filp, kobj, attr, buf, off, count, false);
}
-static ssize_t
-pci_write_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
return pci_resource_io(filp, kobj, attr, buf, off, count, true);
}
@@ -849,8 +1109,7 @@ pci_write_resource_io(struct file *filp, struct kobject *kobj,
* If we created resource files for @pdev, remove them from sysfs and
* free their resources.
*/
-static void
-pci_remove_resource_files(struct pci_dev *pdev)
+static void pci_remove_resource_files(struct pci_dev *pdev)
{
int i;
@@ -953,10 +1212,9 @@ void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
*
* writing anything except 0 enables it
*/
-static ssize_t
-pci_write_rom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj));
@@ -980,10 +1238,9 @@ pci_write_rom(struct file *filp, struct kobject *kobj,
* Put @count bytes starting at @off into @buf from the ROM in the PCI
* device corresponding to @kobj.
*/
-static ssize_t
-pci_read_rom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj));
void __iomem *rom;
@@ -991,21 +1248,21 @@ pci_read_rom(struct file *filp, struct kobject *kobj,
if (!pdev->rom_attr_enabled)
return -EINVAL;
-
+
rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */
if (!rom || !size)
return -EIO;
-
+
if (off >= size)
count = 0;
else {
if (off + count > size)
count = size - off;
-
+
memcpy_fromio(buf, rom + off, count);
}
pci_unmap_rom(pdev, rom);
-
+
return count;
}
@@ -1029,18 +1286,12 @@ static struct bin_attribute pcie_config_attr = {
.write = pci_write_config,
};
-int __attribute__ ((weak)) pcibios_add_platform_entries(struct pci_dev *dev)
-{
- return 0;
-}
-
-static ssize_t reset_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
unsigned long val;
- ssize_t result = strict_strtoul(buf, 0, &val);
+ ssize_t result = kstrtoul(buf, 0, &val);
if (result < 0)
return result;
@@ -1076,7 +1327,7 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
attr->write = write_vpd_attr;
retval = sysfs_create_bin_file(&dev->dev.kobj, attr);
if (retval) {
- kfree(dev->vpd->attr);
+ kfree(attr);
return retval;
}
dev->vpd->attr = attr;
@@ -1103,7 +1354,7 @@ error:
return retval;
}
-int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
+int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
{
int retval;
int rom_size = 0;
@@ -1138,7 +1389,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
sysfs_bin_attr_init(attr);
attr->size = rom_size;
attr->attr.name = "rom";
- attr->attr.mode = S_IRUSR;
+ attr->attr.mode = S_IRUSR | S_IWUSR;
attr->read = pci_read_rom;
attr->write = pci_write_rom;
retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
@@ -1149,29 +1400,15 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
pdev->rom_attr = attr;
}
- if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
- retval = device_create_file(&pdev->dev, &vga_attr);
- if (retval)
- goto err_rom_file;
- }
-
- /* add platform-specific attributes */
- retval = pcibios_add_platform_entries(pdev);
- if (retval)
- goto err_vga_file;
-
/* add sysfs entries for various capabilities */
retval = pci_create_capabilities_sysfs(pdev);
if (retval)
- goto err_vga_file;
+ goto err_rom_file;
pci_create_firmware_label_files(pdev);
return 0;
-err_vga_file:
- if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
- device_remove_file(&pdev->dev, &vga_attr);
err_rom_file:
if (rom_size) {
sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
@@ -1255,5 +1492,87 @@ static int __init pci_sysfs_init(void)
return 0;
}
-
late_initcall(pci_sysfs_init);
+
+static struct attribute *pci_dev_dev_attrs[] = {
+ &vga_attr.attr,
+ NULL,
+};
+
+static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (a == &vga_attr.attr)
+ if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ return 0;
+
+ return a->mode;
+}
+
+static struct attribute *pci_dev_hp_attrs[] = {
+ &dev_remove_attr.attr,
+ &dev_rescan_attr.attr,
+ NULL,
+};
+
+static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (pdev->is_virtfn)
+ return 0;
+
+ return a->mode;
+}
+
+static struct attribute_group pci_dev_hp_attr_group = {
+ .attrs = pci_dev_hp_attrs,
+ .is_visible = pci_dev_hp_attrs_are_visible,
+};
+
+#ifdef CONFIG_PCI_IOV
+static struct attribute *sriov_dev_attrs[] = {
+ &sriov_totalvfs_attr.attr,
+ &sriov_numvfs_attr.attr,
+ NULL,
+};
+
+static umode_t sriov_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+
+ if (!dev_is_pf(dev))
+ return 0;
+
+ return a->mode;
+}
+
+static struct attribute_group sriov_dev_attr_group = {
+ .attrs = sriov_dev_attrs,
+ .is_visible = sriov_attrs_are_visible,
+};
+#endif /* CONFIG_PCI_IOV */
+
+static struct attribute_group pci_dev_attr_group = {
+ .attrs = pci_dev_dev_attrs,
+ .is_visible = pci_dev_attrs_are_visible,
+};
+
+static const struct attribute_group *pci_dev_attr_groups[] = {
+ &pci_dev_attr_group,
+ &pci_dev_hp_attr_group,
+#ifdef CONFIG_PCI_IOV
+ &sriov_dev_attr_group,
+#endif
+ NULL,
+};
+
+struct device_type pci_dev_type = {
+ .groups = pci_dev_attr_groups,
+};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e98c8104297..1c8592b0e14 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -22,6 +22,8 @@
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
+#include <linux/pci_hotplug.h>
+#include <asm-generic/pci-bridge.h>
#include <asm/setup.h>
#include "pci.h"
@@ -77,15 +79,26 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
+
/*
* The default CLS is used if arch didn't set CLS explicitly and not
* all pci devices agree on the same value. Arch can override either
* the dfl or actual value as it sees fit. Don't forget this is
* measured in 32-bit words, not bytes.
*/
-u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
+u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
u8 pci_cache_line_size;
+/*
+ * If we set up a device for bus mastering, we need to check the latency
+ * timer as certain BIOSes forget to set it properly.
+ */
+unsigned int pcibios_max_latency = 255;
+
+/* If set, the PCIe ARI capability will not be used. */
+static bool pcie_ari_disabled;
+
/**
* pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
* @bus: pointer to PCI bus structure to search
@@ -93,15 +106,15 @@ u8 pci_cache_line_size;
* Given a PCI bus, returns the highest PCI bus number present in the set
* including the given PCI bus and its list of child PCI buses.
*/
-unsigned char pci_bus_max_busnr(struct pci_bus* bus)
+unsigned char pci_bus_max_busnr(struct pci_bus *bus)
{
- struct list_head *tmp;
+ struct pci_bus *tmp;
unsigned char max, n;
- max = bus->subordinate;
- list_for_each(tmp, &bus->children) {
- n = pci_bus_max_busnr(pci_bus_b(tmp));
- if(n > max)
+ max = bus->busn_res.end;
+ list_for_each_entry(tmp, &bus->children, node) {
+ n = pci_bus_max_busnr(tmp);
+ if (n > max)
max = n;
}
return max;
@@ -124,30 +137,6 @@ void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
EXPORT_SYMBOL_GPL(pci_ioremap_bar);
#endif
-#if 0
-/**
- * pci_max_busnr - returns maximum PCI bus number
- *
- * Returns the highest PCI bus number present in the system global list of
- * PCI buses.
- */
-unsigned char __devinit
-pci_max_busnr(void)
-{
- struct pci_bus *bus = NULL;
- unsigned char max, n;
-
- max = 0;
- while ((bus = pci_find_next_bus(bus)) != NULL) {
- n = pci_bus_max_busnr(bus);
- if(n > max)
- max = n;
- }
- return max;
-}
-
-#endif /* 0 */
-
#define PCI_FIND_CAP_TTL 48
static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
@@ -209,7 +198,7 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus,
}
/**
- * pci_find_capability - query for devices' capabilities
+ * pci_find_capability - query for devices' capabilities
* @dev: PCI device to query
* @cap: capability code
*
@@ -218,12 +207,12 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus,
* device's PCI configuration space or 0 in case the device does not
* support it. Possible values for @cap:
*
- * %PCI_CAP_ID_PM Power Management
- * %PCI_CAP_ID_AGP Accelerated Graphics Port
- * %PCI_CAP_ID_VPD Vital Product Data
- * %PCI_CAP_ID_SLOTID Slot Identification
+ * %PCI_CAP_ID_PM Power Management
+ * %PCI_CAP_ID_AGP Accelerated Graphics Port
+ * %PCI_CAP_ID_VPD Vital Product Data
+ * %PCI_CAP_ID_SLOTID Slot Identification
* %PCI_CAP_ID_MSI Message Signalled Interrupts
- * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
+ * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
* %PCI_CAP_ID_PCIX PCI-X
* %PCI_CAP_ID_EXP PCI Express
*/
@@ -237,15 +226,16 @@ int pci_find_capability(struct pci_dev *dev, int cap)
return pos;
}
+EXPORT_SYMBOL(pci_find_capability);
/**
- * pci_bus_find_capability - query for devices' capabilities
+ * pci_bus_find_capability - query for devices' capabilities
* @bus: the PCI bus to query
* @devfn: PCI device to query
* @cap: capability code
*
* Like pci_find_capability() but works for pci devices that do not have a
- * pci_dev structure set up yet.
+ * pci_dev structure set up yet.
*
* Returns the address of the requested capability structure within the
* device's PCI configuration space or 0 in case the device does not
@@ -264,22 +254,20 @@ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
return pos;
}
+EXPORT_SYMBOL(pci_bus_find_capability);
/**
- * pci_find_ext_capability - Find an extended capability
+ * pci_find_next_ext_capability - Find an extended capability
* @dev: PCI device to query
+ * @start: address at which to start looking (0 to start at beginning of list)
* @cap: capability code
*
- * Returns the address of the requested extended capability structure
+ * Returns the address of the next matching extended capability structure
* within the device's PCI configuration space or 0 if the device does
- * not support it. Possible values for @cap:
- *
- * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
- * %PCI_EXT_CAP_ID_VC Virtual Channel
- * %PCI_EXT_CAP_ID_DSN Device Serial Number
- * %PCI_EXT_CAP_ID_PWR Power Budgeting
+ * not support it. Some capabilities can occur several times, e.g., the
+ * vendor-specific capability, and this provides a way to find them all.
*/
-int pci_find_ext_capability(struct pci_dev *dev, int cap)
+int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
{
u32 header;
int ttl;
@@ -291,6 +279,9 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap)
if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
return 0;
+ if (start)
+ pos = start;
+
if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
return 0;
@@ -302,7 +293,7 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap)
return 0;
while (ttl-- > 0) {
- if (PCI_EXT_CAP_ID(header) == cap)
+ if (PCI_EXT_CAP_ID(header) == cap && pos != start)
return pos;
pos = PCI_EXT_CAP_NEXT(header);
@@ -315,50 +306,27 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap)
return 0;
}
-EXPORT_SYMBOL_GPL(pci_find_ext_capability);
+EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
/**
- * pci_bus_find_ext_capability - find an extended capability
- * @bus: the PCI bus to query
- * @devfn: PCI device to query
- * @cap: capability code
+ * pci_find_ext_capability - Find an extended capability
+ * @dev: PCI device to query
+ * @cap: capability code
*
- * Like pci_find_ext_capability() but works for pci devices that do not have a
- * pci_dev structure set up yet.
+ * Returns the address of the requested extended capability structure
+ * within the device's PCI configuration space or 0 if the device does
+ * not support it. Possible values for @cap:
*
- * Returns the address of the requested capability structure within the
- * device's PCI configuration space or 0 in case the device does not
- * support it.
+ * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
+ * %PCI_EXT_CAP_ID_VC Virtual Channel
+ * %PCI_EXT_CAP_ID_DSN Device Serial Number
+ * %PCI_EXT_CAP_ID_PWR Power Budgeting
*/
-int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
- int cap)
+int pci_find_ext_capability(struct pci_dev *dev, int cap)
{
- u32 header;
- int ttl;
- int pos = PCI_CFG_SPACE_SIZE;
-
- /* minimum 8 bytes per capability */
- ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
-
- if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
- return 0;
- if (header == 0xffffffff || header == 0)
- return 0;
-
- while (ttl-- > 0) {
- if (PCI_EXT_CAP_ID(header) == cap)
- return pos;
-
- pos = PCI_EXT_CAP_NEXT(header);
- if (pos < PCI_CFG_SPACE_SIZE)
- break;
-
- if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
- break;
- }
-
- return 0;
+ return pci_find_next_ext_capability(dev, 0, cap);
}
+EXPORT_SYMBOL_GPL(pci_find_ext_capability);
static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
{
@@ -435,33 +403,67 @@ EXPORT_SYMBOL_GPL(pci_find_ht_capability);
* @res: child resource record for which parent is sought
*
* For given resource region of given device, return the resource
- * region of parent bus the given region is contained in or where
- * it should be allocated from.
+ * region of parent bus the given region is contained in.
*/
-struct resource *
-pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
+struct resource *pci_find_parent_resource(const struct pci_dev *dev,
+ struct resource *res)
{
const struct pci_bus *bus = dev->bus;
+ struct resource *r;
int i;
- struct resource *best = NULL, *r;
pci_bus_for_each_resource(bus, r, i) {
if (!r)
continue;
- if (res->start && !(res->start >= r->start && res->end <= r->end))
- continue; /* Not contained */
- if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
- continue; /* Wrong type */
- if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
- return r; /* Exact match */
- /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
- if (r->flags & IORESOURCE_PREFETCH)
- continue;
- /* .. but we can put a prefetchable resource inside a non-prefetchable one */
- if (!best)
- best = r;
+ if (res->start && resource_contains(r, res)) {
+
+ /*
+ * If the window is prefetchable but the BAR is
+ * not, the allocator made a mistake.
+ */
+ if (r->flags & IORESOURCE_PREFETCH &&
+ !(res->flags & IORESOURCE_PREFETCH))
+ return NULL;
+
+ /*
+ * If we're below a transparent bridge, there may
+ * be both a positively-decoded aperture and a
+ * subtractively-decoded region that contain the BAR.
+ * We want the positively-decoded one, so this depends
+ * on pci_bus_for_each_resource() giving us those
+ * first.
+ */
+ return r;
+ }
}
- return best;
+ return NULL;
+}
+EXPORT_SYMBOL(pci_find_parent_resource);
+
+/**
+ * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
+ * @dev: the PCI device to operate on
+ * @pos: config space offset of status word
+ * @mask: mask of bit(s) to care about in status word
+ *
+ * Return 1 when mask bit(s) in status word clear, 0 otherwise.
+ */
+int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
+{
+ int i;
+
+ /* Wait for Transaction Pending bit clean */
+ for (i = 0; i < 4; i++) {
+ u16 status;
+ if (i)
+ msleep((1 << (i - 1)) * 100);
+
+ pci_read_config_word(dev, pos, &status);
+ if (!(status & mask))
+ return 1;
+ }
+
+ return 0;
}
/**
@@ -471,8 +473,7 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
* Restore the BAR values for a given device, so as to make it
* accessible by its driver.
*/
-static void
-pci_restore_bars(struct pci_dev *dev)
+static void pci_restore_bars(struct pci_dev *dev)
{
int i;
@@ -485,7 +486,7 @@ static struct pci_platform_pm_ops *pci_platform_pm;
int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
{
if (!ops->is_manageable || !ops->set_state || !ops->choose_state
- || !ops->sleep_wake || !ops->can_wakeup)
+ || !ops->sleep_wake)
return -EINVAL;
pci_platform_pm = ops;
return 0;
@@ -497,7 +498,7 @@ static inline bool platform_pci_power_manageable(struct pci_dev *dev)
}
static inline int platform_pci_set_power_state(struct pci_dev *dev,
- pci_power_t t)
+ pci_power_t t)
{
return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
}
@@ -508,11 +509,6 @@ static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
}
-static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
-{
- return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
-}
-
static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
{
return pci_platform_pm ?
@@ -554,13 +550,13 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
return -EINVAL;
/* Validate current state:
- * Can enter D0 from any state, but if we can only go deeper
+ * Can enter D0 from any state, but if we can only go deeper
* to sleep if we're already in a low power state
*/
if (state != PCI_D0 && dev->current_state <= PCI_D3cold
&& dev->current_state > state) {
- dev_err(&dev->dev, "invalid power transition "
- "(from state %d to %d)\n", dev->current_state, state);
+ dev_err(&dev->dev, "invalid power transition (from state %d to %d)\n",
+ dev->current_state, state);
return -EINVAL;
}
@@ -607,10 +603,11 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
if (dev->current_state != state && printk_ratelimit())
- dev_info(&dev->dev, "Refused to change power state, "
- "currently in D%d\n", dev->current_state);
+ dev_info(&dev->dev, "Refused to change power state, currently in D%d\n",
+ dev->current_state);
- /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
+ /*
+ * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
* INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
* from D3hot to D0 _may_ perform an internal reset, thereby
* going to "D0 Uninitialized" rather than "D0 Initialized".
@@ -642,6 +639,16 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
if (dev->pm_cap) {
u16 pmcsr;
+ /*
+ * Configuration space is not accessible for device in
+ * D3cold, so just keep or set D3cold for safety
+ */
+ if (dev->current_state == PCI_D3cold)
+ return;
+ if (state == PCI_D3cold) {
+ dev->current_state = PCI_D3cold;
+ return;
+ }
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
} else {
@@ -650,6 +657,19 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
}
/**
+ * pci_power_up - Put the given device into D0 forcibly
+ * @dev: PCI device to power up
+ */
+void pci_power_up(struct pci_dev *dev)
+{
+ if (platform_pci_power_manageable(dev))
+ platform_pci_set_power_state(dev, PCI_D0);
+
+ pci_raw_set_power_state(dev, PCI_D0);
+ pci_update_current_state(dev, PCI_D0);
+}
+
+/**
* pci_platform_power_transition - Use platform to change device power state
* @dev: PCI device to handle.
* @state: State to put the device into.
@@ -662,25 +682,88 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
error = platform_pci_set_power_state(dev, state);
if (!error)
pci_update_current_state(dev, state);
- } else {
+ } else
error = -ENODEV;
- /* Fall back to PCI_D0 if native PM is not supported */
- if (!dev->pm_cap)
- dev->current_state = PCI_D0;
- }
+
+ if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
+ dev->current_state = PCI_D0;
return error;
}
/**
+ * pci_wakeup - Wake up a PCI device
+ * @pci_dev: Device to handle.
+ * @ign: ignored parameter
+ */
+static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
+{
+ pci_wakeup_event(pci_dev);
+ pm_request_resume(&pci_dev->dev);
+ return 0;
+}
+
+/**
+ * pci_wakeup_bus - Walk given bus and wake up devices on it
+ * @bus: Top bus of the subtree to walk.
+ */
+static void pci_wakeup_bus(struct pci_bus *bus)
+{
+ if (bus)
+ pci_walk_bus(bus, pci_wakeup, NULL);
+}
+
+/**
* __pci_start_power_transition - Start power transition of a PCI device
* @dev: PCI device to handle.
* @state: State to put the device into.
*/
static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
{
- if (state == PCI_D0)
+ if (state == PCI_D0) {
pci_platform_power_transition(dev, PCI_D0);
+ /*
+ * Mandatory power management transition delays, see
+ * PCI Express Base Specification Revision 2.0 Section
+ * 6.6.1: Conventional Reset. Do not delay for
+ * devices powered on/off by corresponding bridge,
+ * because have already delayed for the bridge.
+ */
+ if (dev->runtime_d3cold) {
+ msleep(dev->d3cold_delay);
+ /*
+ * When powering on a bridge from D3cold, the
+ * whole hierarchy may be powered on into
+ * D0uninitialized state, resume them to give
+ * them a chance to suspend again
+ */
+ pci_wakeup_bus(dev->subordinate);
+ }
+ }
+}
+
+/**
+ * __pci_dev_set_current_state - Set current state of a PCI device
+ * @dev: Device to handle
+ * @data: pointer to state to be set
+ */
+static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
+{
+ pci_power_t state = *(pci_power_t *)data;
+
+ dev->current_state = state;
+ return 0;
+}
+
+/**
+ * __pci_bus_set_current_state - Walk given bus and set current state of devices
+ * @bus: Top bus of the subtree to walk.
+ * @state: state to be set
+ */
+static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
+{
+ if (bus)
+ pci_walk_bus(bus, __pci_dev_set_current_state, &state);
}
/**
@@ -692,8 +775,15 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
*/
int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
{
- return state >= PCI_D0 ?
- pci_platform_power_transition(dev, state) : -EINVAL;
+ int ret;
+
+ if (state <= PCI_D0)
+ return -EINVAL;
+ ret = pci_platform_power_transition(dev, state);
+ /* Power off the bridge may power off the whole hierarchy */
+ if (!ret && state == PCI_D3cold)
+ __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
+ return ret;
}
EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
@@ -717,8 +807,8 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
int error;
/* bound the state we're entering */
- if (state > PCI_D3hot)
- state = PCI_D3hot;
+ if (state > PCI_D3cold)
+ state = PCI_D3cold;
else if (state < PCI_D0)
state = PCI_D0;
else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
@@ -729,20 +819,36 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
*/
return 0;
+ /* Check if we're already there */
+ if (dev->current_state == state)
+ return 0;
+
__pci_start_power_transition(dev, state);
/* This device is quirked not to be put into D3, so
don't put it in D3 */
- if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
+ if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
return 0;
- error = pci_raw_set_power_state(dev, state);
+ /*
+ * To put device in D3cold, we put device into D3hot in native
+ * way, then put device into D3cold with platform ops
+ */
+ error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
+ PCI_D3hot : state);
if (!__pci_complete_power_transition(dev, state))
error = 0;
+ /*
+ * When aspm_policy is "powersave" this call ensures
+ * that ASPM is configured.
+ */
+ if (!error && dev->bus->self)
+ pcie_aspm_powersave_config_link(dev->bus->self);
return error;
}
+EXPORT_SYMBOL(pci_set_power_state);
/**
* pci_choose_state - Choose the power state of a PCI device
@@ -758,7 +864,7 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
{
pci_power_t ret;
- if (!pci_find_capability(dev, PCI_CAP_ID_PM))
+ if (!dev->pm_cap)
return PCI_D0;
ret = platform_pci_choose_state(dev);
@@ -781,42 +887,39 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
}
return PCI_D0;
}
-
EXPORT_SYMBOL(pci_choose_state);
#define PCI_EXP_SAVE_REGS 7
-#define pcie_cap_has_devctl(type, flags) 1
-#define pcie_cap_has_lnkctl(type, flags) \
- ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
- (type == PCI_EXP_TYPE_ROOT_PORT || \
- type == PCI_EXP_TYPE_ENDPOINT || \
- type == PCI_EXP_TYPE_LEG_END))
-#define pcie_cap_has_sltctl(type, flags) \
- ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
- ((type == PCI_EXP_TYPE_ROOT_PORT) || \
- (type == PCI_EXP_TYPE_DOWNSTREAM && \
- (flags & PCI_EXP_FLAGS_SLOT))))
-#define pcie_cap_has_rtctl(type, flags) \
- ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
- (type == PCI_EXP_TYPE_ROOT_PORT || \
- type == PCI_EXP_TYPE_RC_EC))
-#define pcie_cap_has_devctl2(type, flags) \
- ((flags & PCI_EXP_FLAGS_VERS) > 1)
-#define pcie_cap_has_lnkctl2(type, flags) \
- ((flags & PCI_EXP_FLAGS_VERS) > 1)
-#define pcie_cap_has_sltctl2(type, flags) \
- ((flags & PCI_EXP_FLAGS_VERS) > 1)
+static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
+ u16 cap, bool extended)
+{
+ struct pci_cap_saved_state *tmp;
+
+ hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
+ if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
+ return tmp;
+ }
+ return NULL;
+}
+
+struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
+{
+ return _pci_find_saved_cap(dev, cap, false);
+}
+
+struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
+{
+ return _pci_find_saved_cap(dev, cap, true);
+}
static int pci_save_pcie_state(struct pci_dev *dev)
{
- int pos, i = 0;
+ int i = 0;
struct pci_cap_saved_state *save_state;
u16 *cap;
- u16 flags;
- pos = pci_pcie_cap(dev);
- if (!pos)
+ if (!pci_is_pcie(dev))
return 0;
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
@@ -824,57 +927,37 @@ static int pci_save_pcie_state(struct pci_dev *dev)
dev_err(&dev->dev, "buffer not found in %s\n", __func__);
return -ENOMEM;
}
- cap = (u16 *)&save_state->data[0];
-
- pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
-
- if (pcie_cap_has_devctl(dev->pcie_type, flags))
- pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
- if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
- pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
- if (pcie_cap_has_sltctl(dev->pcie_type, flags))
- pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
- if (pcie_cap_has_rtctl(dev->pcie_type, flags))
- pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
- if (pcie_cap_has_devctl2(dev->pcie_type, flags))
- pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
- if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
- pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
- if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
- pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
+
+ cap = (u16 *)&save_state->cap.data[0];
+ pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
+ pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
+ pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
+ pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
+ pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
return 0;
}
static void pci_restore_pcie_state(struct pci_dev *dev)
{
- int i = 0, pos;
+ int i = 0;
struct pci_cap_saved_state *save_state;
u16 *cap;
- u16 flags;
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
- if (!save_state || pos <= 0)
+ if (!save_state)
return;
- cap = (u16 *)&save_state->data[0];
- pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
-
- if (pcie_cap_has_devctl(dev->pcie_type, flags))
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
- if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
- pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
- if (pcie_cap_has_sltctl(dev->pcie_type, flags))
- pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
- if (pcie_cap_has_rtctl(dev->pcie_type, flags))
- pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
- if (pcie_cap_has_devctl2(dev->pcie_type, flags))
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
- if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
- pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
- if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
- pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
+ cap = (u16 *)&save_state->cap.data[0];
+ pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
+ pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
+ pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
+ pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
+ pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
+ pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
+ pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
}
@@ -893,7 +976,8 @@ static int pci_save_pcix_state(struct pci_dev *dev)
return -ENOMEM;
}
- pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data);
+ pci_read_config_word(dev, pos + PCI_X_CMD,
+ (u16 *)save_state->cap.data);
return 0;
}
@@ -908,7 +992,7 @@ static void pci_restore_pcix_state(struct pci_dev *dev)
pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
if (!save_state || pos <= 0)
return;
- cap = (u16 *)&save_state->data[0];
+ cap = (u16 *)&save_state->cap.data[0];
pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
}
@@ -918,8 +1002,7 @@ static void pci_restore_pcix_state(struct pci_dev *dev)
* pci_save_state - save the PCI configuration space of a device before suspending
* @dev: - PCI device that we're dealing with
*/
-int
-pci_save_state(struct pci_dev *dev)
+int pci_save_state(struct pci_dev *dev)
{
int i;
/* XXX: 100% dword access ok here? */
@@ -930,51 +1013,190 @@ pci_save_state(struct pci_dev *dev)
return i;
if ((i = pci_save_pcix_state(dev)) != 0)
return i;
+ if ((i = pci_save_vc_state(dev)) != 0)
+ return i;
return 0;
}
+EXPORT_SYMBOL(pci_save_state);
-/**
+static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
+ u32 saved_val, int retry)
+{
+ u32 val;
+
+ pci_read_config_dword(pdev, offset, &val);
+ if (val == saved_val)
+ return;
+
+ for (;;) {
+ dev_dbg(&pdev->dev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
+ offset, val, saved_val);
+ pci_write_config_dword(pdev, offset, saved_val);
+ if (retry-- <= 0)
+ return;
+
+ pci_read_config_dword(pdev, offset, &val);
+ if (val == saved_val)
+ return;
+
+ mdelay(1);
+ }
+}
+
+static void pci_restore_config_space_range(struct pci_dev *pdev,
+ int start, int end, int retry)
+{
+ int index;
+
+ for (index = end; index >= start; index--)
+ pci_restore_config_dword(pdev, 4 * index,
+ pdev->saved_config_space[index],
+ retry);
+}
+
+static void pci_restore_config_space(struct pci_dev *pdev)
+{
+ if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
+ pci_restore_config_space_range(pdev, 10, 15, 0);
+ /* Restore BARs before the command register. */
+ pci_restore_config_space_range(pdev, 4, 9, 10);
+ pci_restore_config_space_range(pdev, 0, 3, 0);
+ } else {
+ pci_restore_config_space_range(pdev, 0, 15, 0);
+ }
+}
+
+/**
* pci_restore_state - Restore the saved state of a PCI device
* @dev: - PCI device that we're dealing with
*/
-int
-pci_restore_state(struct pci_dev *dev)
+void pci_restore_state(struct pci_dev *dev)
{
- int i;
- u32 val;
-
if (!dev->state_saved)
- return 0;
+ return;
/* PCI Express register must be restored first */
pci_restore_pcie_state(dev);
+ pci_restore_ats_state(dev);
+ pci_restore_vc_state(dev);
+
+ pci_restore_config_space(dev);
- /*
- * The Base Address register should be programmed before the command
- * register(s)
- */
- for (i = 15; i >= 0; i--) {
- pci_read_config_dword(dev, i * 4, &val);
- if (val != dev->saved_config_space[i]) {
- dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
- "space at offset %#x (was %#x, writing %#x)\n",
- i, val, (int)dev->saved_config_space[i]);
- pci_write_config_dword(dev,i * 4,
- dev->saved_config_space[i]);
- }
- }
pci_restore_pcix_state(dev);
pci_restore_msi_state(dev);
pci_restore_iov_state(dev);
dev->state_saved = false;
+}
+EXPORT_SYMBOL(pci_restore_state);
+
+struct pci_saved_state {
+ u32 config_space[16];
+ struct pci_cap_saved_data cap[0];
+};
+
+/**
+ * pci_store_saved_state - Allocate and return an opaque struct containing
+ * the device saved state.
+ * @dev: PCI device that we're dealing with
+ *
+ * Return NULL if no state or error.
+ */
+struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
+{
+ struct pci_saved_state *state;
+ struct pci_cap_saved_state *tmp;
+ struct pci_cap_saved_data *cap;
+ size_t size;
+
+ if (!dev->state_saved)
+ return NULL;
+
+ size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
+
+ hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
+ size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
+
+ state = kzalloc(size, GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ memcpy(state->config_space, dev->saved_config_space,
+ sizeof(state->config_space));
+
+ cap = state->cap;
+ hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
+ size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
+ memcpy(cap, &tmp->cap, len);
+ cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
+ }
+ /* Empty cap_save terminates list */
+
+ return state;
+}
+EXPORT_SYMBOL_GPL(pci_store_saved_state);
+
+/**
+ * pci_load_saved_state - Reload the provided save state into struct pci_dev.
+ * @dev: PCI device that we're dealing with
+ * @state: Saved state returned from pci_store_saved_state()
+ */
+static int pci_load_saved_state(struct pci_dev *dev,
+ struct pci_saved_state *state)
+{
+ struct pci_cap_saved_data *cap;
+
+ dev->state_saved = false;
+
+ if (!state)
+ return 0;
+
+ memcpy(dev->saved_config_space, state->config_space,
+ sizeof(state->config_space));
+
+ cap = state->cap;
+ while (cap->size) {
+ struct pci_cap_saved_state *tmp;
+ tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
+ if (!tmp || tmp->cap.size != cap->size)
+ return -EINVAL;
+
+ memcpy(tmp->cap.data, cap->data, tmp->cap.size);
+ cap = (struct pci_cap_saved_data *)((u8 *)cap +
+ sizeof(struct pci_cap_saved_data) + cap->size);
+ }
+
+ dev->state_saved = true;
return 0;
}
+/**
+ * pci_load_and_free_saved_state - Reload the save state pointed to by state,
+ * and free the memory allocated for it.
+ * @dev: PCI device that we're dealing with
+ * @state: Pointer to saved state returned from pci_store_saved_state()
+ */
+int pci_load_and_free_saved_state(struct pci_dev *dev,
+ struct pci_saved_state **state)
+{
+ int ret = pci_load_saved_state(dev, *state);
+ kfree(*state);
+ *state = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
+
+int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
+{
+ return pci_enable_resources(dev, bars);
+}
+
static int do_pci_enable_device(struct pci_dev *dev, int bars)
{
int err;
+ u16 cmd;
+ u8 pin;
err = pci_set_power_state(dev, PCI_D0);
if (err < 0 && err != -EIO)
@@ -984,6 +1206,17 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
return err;
pci_fixup_device(pci_fixup_enable, dev);
+ if (dev->msi_enabled || dev->msix_enabled)
+ return 0;
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ if (pin) {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & PCI_COMMAND_INTX_DISABLE)
+ pci_write_config_word(dev, PCI_COMMAND,
+ cmd & ~PCI_COMMAND_INTX_DISABLE);
+ }
+
return 0;
}
@@ -1000,17 +1233,60 @@ int pci_reenable_device(struct pci_dev *dev)
return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
return 0;
}
+EXPORT_SYMBOL(pci_reenable_device);
+
+static void pci_enable_bridge(struct pci_dev *dev)
+{
+ struct pci_dev *bridge;
+ int retval;
+
+ bridge = pci_upstream_bridge(dev);
+ if (bridge)
+ pci_enable_bridge(bridge);
+
+ if (pci_is_enabled(dev)) {
+ if (!dev->is_busmaster)
+ pci_set_master(dev);
+ return;
+ }
+
+ retval = pci_enable_device(dev);
+ if (retval)
+ dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
+ retval);
+ pci_set_master(dev);
+}
-static int __pci_enable_device_flags(struct pci_dev *dev,
- resource_size_t flags)
+static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
{
+ struct pci_dev *bridge;
int err;
int i, bars = 0;
- if (atomic_add_return(1, &dev->enable_cnt) > 1)
+ /*
+ * Power state could be unknown at this point, either due to a fresh
+ * boot or a device removal call. So get the current power state
+ * so that things like MSI message writing will behave as expected
+ * (e.g. if the device really is in D0 at enable time).
+ */
+ if (dev->pm_cap) {
+ u16 pmcsr;
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
+ }
+
+ if (atomic_inc_return(&dev->enable_cnt) > 1)
return 0; /* already enabled */
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ bridge = pci_upstream_bridge(dev);
+ if (bridge)
+ pci_enable_bridge(bridge);
+
+ /* only skip sriov related */
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+ if (dev->resource[i].flags & flags)
+ bars |= (1 << i);
+ for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
if (dev->resource[i].flags & flags)
bars |= (1 << i);
@@ -1030,8 +1306,9 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
*/
int pci_enable_device_io(struct pci_dev *dev)
{
- return __pci_enable_device_flags(dev, IORESOURCE_IO);
+ return pci_enable_device_flags(dev, IORESOURCE_IO);
}
+EXPORT_SYMBOL(pci_enable_device_io);
/**
* pci_enable_device_mem - Initialize a device for use with Memory space
@@ -1043,8 +1320,9 @@ int pci_enable_device_io(struct pci_dev *dev)
*/
int pci_enable_device_mem(struct pci_dev *dev)
{
- return __pci_enable_device_flags(dev, IORESOURCE_MEM);
+ return pci_enable_device_flags(dev, IORESOURCE_MEM);
}
+EXPORT_SYMBOL(pci_enable_device_mem);
/**
* pci_enable_device - Initialize device before it's used by a driver.
@@ -1059,8 +1337,9 @@ int pci_enable_device_mem(struct pci_dev *dev)
*/
int pci_enable_device(struct pci_dev *dev)
{
- return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
+ return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
}
+EXPORT_SYMBOL(pci_enable_device);
/*
* Managed PCI resources. This manages device on/off, intx/msi/msix
@@ -1098,7 +1377,7 @@ static void pcim_release(struct device *gendev, void *res)
pci_disable_device(dev);
}
-static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
+static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
{
struct pci_devres *dr, *new_dr;
@@ -1112,7 +1391,7 @@ static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
return devres_get(&pdev->dev, new_dr, NULL, NULL);
}
-static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
+static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
{
if (pci_is_managed(pdev))
return devres_find(&pdev->dev, pcim_release, NULL, NULL);
@@ -1143,6 +1422,7 @@ int pcim_enable_device(struct pci_dev *pdev)
}
return rc;
}
+EXPORT_SYMBOL(pcim_enable_device);
/**
* pcim_pin_device - Pin managed PCI device
@@ -1161,6 +1441,30 @@ void pcim_pin_device(struct pci_dev *pdev)
if (dr)
dr->pinned = 1;
}
+EXPORT_SYMBOL(pcim_pin_device);
+
+/*
+ * pcibios_add_device - provide arch specific hooks when adding device dev
+ * @dev: the PCI device being added
+ *
+ * Permits the platform to provide architecture specific functionality when
+ * devices are added. This is the default implementation. Architecture
+ * implementations can override this.
+ */
+int __weak pcibios_add_device(struct pci_dev *dev)
+{
+ return 0;
+}
+
+/**
+ * pcibios_release_device - provide arch specific hooks when releasing device dev
+ * @dev: the PCI device being released
+ *
+ * Permits the platform to provide architecture specific functionality when
+ * devices are released. This is the default implementation. Architecture
+ * implementations can override this.
+ */
+void __weak pcibios_release_device(struct pci_dev *dev) {}
/**
* pcibios_disable_device - disable arch specific PCI resources for device dev
@@ -1170,7 +1474,18 @@ void pcim_pin_device(struct pci_dev *pdev)
* is the default implementation. Architecture implementations can
* override this.
*/
-void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
+void __weak pcibios_disable_device (struct pci_dev *dev) {}
+
+/**
+ * pcibios_penalize_isa_irq - penalize an ISA IRQ
+ * @irq: ISA IRQ to penalize
+ * @active: IRQ active or not
+ *
+ * Permits the platform to provide architecture-specific functionality when
+ * penalizing ISA IRQs. This is the default implementation. Architecture
+ * implementations can override this.
+ */
+void __weak pcibios_penalize_isa_irq(int irq, int active) {}
static void do_pci_disable_device(struct pci_dev *dev)
{
@@ -1208,8 +1523,7 @@ void pci_disable_enabled_device(struct pci_dev *dev)
* Note we don't actually disable the device until all callers of
* pci_enable_device() have called pci_disable_device().
*/
-void
-pci_disable_device(struct pci_dev *dev)
+void pci_disable_device(struct pci_dev *dev)
{
struct pci_devres *dr;
@@ -1217,13 +1531,17 @@ pci_disable_device(struct pci_dev *dev)
if (dr)
dr->enabled = 0;
- if (atomic_sub_return(1, &dev->enable_cnt) != 0)
+ dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
+ "disabling already-disabled device");
+
+ if (atomic_dec_return(&dev->enable_cnt) != 0)
return;
do_pci_disable_device(dev);
dev->is_busmaster = 0;
}
+EXPORT_SYMBOL(pci_disable_device);
/**
* pcibios_set_pcie_reset_state - set reset state for device dev
@@ -1234,8 +1552,8 @@ pci_disable_device(struct pci_dev *dev)
* Sets the PCIe reset state for the device. This is the default
* implementation. Architecture implementations can override this.
*/
-int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
- enum pcie_reset_state state)
+int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
+ enum pcie_reset_state state)
{
return -EINVAL;
}
@@ -1252,6 +1570,7 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
{
return pcibios_set_pcie_reset_state(dev, state);
}
+EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
/**
* pci_check_pme_status - Check if given device has generated PME.
@@ -1288,35 +1607,22 @@ bool pci_check_pme_status(struct pci_dev *dev)
return ret;
}
-/*
- * Time to wait before the system can be put into a sleep state after reporting
- * a wakeup event signaled by a PCI device.
- */
-#define PCI_WAKEUP_COOLDOWN 100
-
-/**
- * pci_wakeup_event - Report a wakeup event related to a given PCI device.
- * @dev: Device to report the wakeup event for.
- */
-void pci_wakeup_event(struct pci_dev *dev)
-{
- if (device_may_wakeup(&dev->dev))
- pm_wakeup_event(&dev->dev, PCI_WAKEUP_COOLDOWN);
-}
-
/**
* pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
* @dev: Device to handle.
- * @ign: Ignored.
+ * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
*
* Check if @dev has generated PME and queue a resume request for it in that
* case.
*/
-static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
+static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
{
+ if (pme_poll_reset && dev->pme_poll)
+ dev->pme_poll = false;
+
if (pci_check_pme_status(dev)) {
- pm_request_resume(&dev->dev);
pci_wakeup_event(dev);
+ pm_request_resume(&dev->dev);
}
return 0;
}
@@ -1328,9 +1634,10 @@ static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
void pci_pme_wakeup_bus(struct pci_bus *bus)
{
if (bus)
- pci_walk_bus(bus, pci_pme_wakeup, NULL);
+ pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
}
+
/**
* pci_pme_capable - check the capability of PCI device to generate PME#
* @dev: PCI device to handle.
@@ -1343,34 +1650,38 @@ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
return !!(dev->pme_support & (1 << state));
}
+EXPORT_SYMBOL(pci_pme_capable);
static void pci_pme_list_scan(struct work_struct *work)
{
- struct pci_pme_device *pme_dev;
+ struct pci_pme_device *pme_dev, *n;
mutex_lock(&pci_pme_list_mutex);
- if (!list_empty(&pci_pme_list)) {
- list_for_each_entry(pme_dev, &pci_pme_list, list)
+ list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
+ if (pme_dev->dev->pme_poll) {
+ struct pci_dev *bridge;
+
+ bridge = pme_dev->dev->bus->self;
+ /*
+ * If bridge is in low power state, the
+ * configuration space of subordinate devices
+ * may be not accessible
+ */
+ if (bridge && bridge->current_state != PCI_D0)
+ continue;
pci_pme_wakeup(pme_dev->dev, NULL);
- schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT));
+ } else {
+ list_del(&pme_dev->list);
+ kfree(pme_dev);
+ }
}
+ if (!list_empty(&pci_pme_list))
+ schedule_delayed_work(&pci_pme_work,
+ msecs_to_jiffies(PME_TIMEOUT));
mutex_unlock(&pci_pme_list_mutex);
}
/**
- * pci_external_pme - is a device an external PCI PME source?
- * @dev: PCI device to check
- *
- */
-
-static bool pci_external_pme(struct pci_dev *dev)
-{
- if (pci_is_pcie(dev) || dev->bus->number == 0)
- return false;
- return true;
-}
-
-/**
* pci_pme_active - enable or disable PCI device's PME# function
* @dev: PCI device to handle.
* @enable: 'true' to enable PME# generation; 'false' to disable it.
@@ -1382,7 +1693,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
{
u16 pmcsr;
- if (!dev->pm_cap)
+ if (!dev->pme_support)
return;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
@@ -1393,23 +1704,35 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
- /* PCI (as opposed to PCIe) PME requires that the device have
- its PME# line hooked up correctly. Not all hardware vendors
- do this, so the PME never gets delivered and the device
- remains asleep. The easiest way around this is to
- periodically walk the list of suspended devices and check
- whether any have their PME flag set. The assumption is that
- we'll wake up often enough anyway that this won't be a huge
- hit, and the power savings from the devices will still be a
- win. */
-
- if (pci_external_pme(dev)) {
+ /*
+ * PCI (as opposed to PCIe) PME requires that the device have
+ * its PME# line hooked up correctly. Not all hardware vendors
+ * do this, so the PME never gets delivered and the device
+ * remains asleep. The easiest way around this is to
+ * periodically walk the list of suspended devices and check
+ * whether any have their PME flag set. The assumption is that
+ * we'll wake up often enough anyway that this won't be a huge
+ * hit, and the power savings from the devices will still be a
+ * win.
+ *
+ * Although PCIe uses in-band PME message instead of PME# line
+ * to report PME, PME does not work for some PCIe devices in
+ * reality. For example, there are devices that set their PME
+ * status bits, but don't really bother to send a PME message;
+ * there are PCI Express Root Ports that don't bother to
+ * trigger interrupts when they receive PME messages from the
+ * devices below. So PME poll is used for PCIe devices too.
+ */
+
+ if (dev->pme_poll) {
struct pci_pme_device *pme_dev;
if (enable) {
pme_dev = kmalloc(sizeof(struct pci_pme_device),
GFP_KERNEL);
- if (!pme_dev)
- goto out;
+ if (!pme_dev) {
+ dev_warn(&dev->dev, "can't enable PME#\n");
+ return;
+ }
pme_dev->dev = dev;
mutex_lock(&pci_pme_list_mutex);
list_add(&pme_dev->list, &pci_pme_list);
@@ -1430,10 +1753,9 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
}
}
-out:
- dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
- enable ? "enabled" : "disabled");
+ dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
}
+EXPORT_SYMBOL(pci_pme_active);
/**
* __pci_enable_wake - enable PCI device as wakeup event source
@@ -1519,6 +1841,7 @@ int pci_wake_from_d3(struct pci_dev *dev, bool enable)
pci_enable_wake(dev, PCI_D3cold, enable) :
pci_enable_wake(dev, PCI_D3hot, enable);
}
+EXPORT_SYMBOL(pci_wake_from_d3);
/**
* pci_target_state - find an appropriate low power state for a given PCI dev
@@ -1528,7 +1851,7 @@ int pci_wake_from_d3(struct pci_dev *dev, bool enable)
* If the platform can't manage @dev, return the deepest state from which it
* can generate wake events, based on any available PME info.
*/
-pci_power_t pci_target_state(struct pci_dev *dev)
+static pci_power_t pci_target_state(struct pci_dev *dev)
{
pci_power_t target_state = PCI_D3hot;
@@ -1584,6 +1907,10 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
if (target_state == PCI_POWER_ERROR)
return -EIO;
+ /* D3cold during system suspend/hibernate is not supported */
+ if (target_state > PCI_D3hot)
+ target_state = PCI_D3hot;
+
pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
error = pci_set_power_state(dev, target_state);
@@ -1593,6 +1920,7 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
return error;
}
+EXPORT_SYMBOL(pci_prepare_to_sleep);
/**
* pci_back_from_sleep - turn PCI device on during system-wide transition into working state
@@ -1605,6 +1933,7 @@ int pci_back_from_sleep(struct pci_dev *dev)
pci_enable_wake(dev, PCI_D0, false);
return pci_set_power_state(dev, PCI_D0);
}
+EXPORT_SYMBOL(pci_back_from_sleep);
/**
* pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
@@ -1621,12 +1950,16 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
if (target_state == PCI_POWER_ERROR)
return -EIO;
+ dev->runtime_d3cold = target_state == PCI_D3cold;
+
__pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
error = pci_set_power_state(dev, target_state);
- if (error)
+ if (error) {
__pci_enable_wake(dev, target_state, true, false);
+ dev->runtime_d3cold = false;
+ }
return error;
}
@@ -1635,7 +1968,7 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
* pci_dev_run_wake - Check if device can generate run-time wake-up events.
* @dev: Device to check.
*
- * Return true if the device itself is cabable of generating wake-up events
+ * Return true if the device itself is capable of generating wake-up events
* (through the platform or using the native PCIe PME) or if the device supports
* PME and one of its upstream bridges can generate wake-up events.
*/
@@ -1666,6 +1999,38 @@ bool pci_dev_run_wake(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_dev_run_wake);
+void pci_config_pm_runtime_get(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+
+ if (parent)
+ pm_runtime_get_sync(parent);
+ pm_runtime_get_noresume(dev);
+ /*
+ * pdev->current_state is set to PCI_D3cold during suspending,
+ * so wait until suspending completes
+ */
+ pm_runtime_barrier(dev);
+ /*
+ * Only need to resume devices in D3cold, because config
+ * registers are still accessible for devices suspended but
+ * not in D3cold.
+ */
+ if (pdev->current_state == PCI_D3cold)
+ pm_runtime_resume(dev);
+}
+
+void pci_config_pm_runtime_put(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+
+ pm_runtime_put(dev);
+ if (parent)
+ pm_runtime_put_sync(parent);
+}
+
/**
* pci_pm_init - Initialize PM functions of given PCI device
* @dev: PCI device to handle.
@@ -1676,10 +2041,13 @@ void pci_pm_init(struct pci_dev *dev)
u16 pmc;
pm_runtime_forbid(&dev->dev);
+ pm_runtime_set_active(&dev->dev);
+ pm_runtime_enable(&dev->dev);
device_enable_async_suspend(&dev->dev);
dev->wakeup_prepared = false;
dev->pm_cap = 0;
+ dev->pme_support = 0;
/* find PCI PM capability in list */
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
@@ -1696,6 +2064,8 @@ void pci_pm_init(struct pci_dev *dev)
dev->pm_cap = pm;
dev->d3_delay = PCI_PM_D3_WAIT;
+ dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
+ dev->d3cold_allowed = true;
dev->d1_support = false;
dev->d2_support = false;
@@ -1721,6 +2091,7 @@ void pci_pm_init(struct pci_dev *dev)
(pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
(pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
+ dev->pme_poll = true;
/*
* Make device's PM flags reflect the wake-up capability, but
* let the user space enable it to wake up the system as needed.
@@ -1728,43 +2099,34 @@ void pci_pm_init(struct pci_dev *dev)
device_set_wakeup_capable(&dev->dev, true);
/* Disable the PME# generation functionality */
pci_pme_active(dev, false);
- } else {
- dev->pme_support = 0;
}
}
-/**
- * platform_pci_wakeup_init - init platform wakeup if present
- * @dev: PCI device
- *
- * Some devices don't have PCI PM caps but can still generate wakeup
- * events through platform methods (like ACPI events). If @dev supports
- * platform wakeup events, set the device flag to indicate as much. This
- * may be redundant if the device also supports PCI PM caps, but double
- * initialization should be safe in that case.
- */
-void platform_pci_wakeup_init(struct pci_dev *dev)
+static void pci_add_saved_cap(struct pci_dev *pci_dev,
+ struct pci_cap_saved_state *new_cap)
{
- if (!platform_pci_can_wakeup(dev))
- return;
-
- device_set_wakeup_capable(&dev->dev, true);
- platform_pci_sleep_wake(dev, false);
+ hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
}
/**
- * pci_add_save_buffer - allocate buffer for saving given capability registers
+ * _pci_add_cap_save_buffer - allocate buffer for saving given
+ * capability registers
* @dev: the PCI device
* @cap: the capability to allocate the buffer for
+ * @extended: Standard or Extended capability ID
* @size: requested size of the buffer
*/
-static int pci_add_cap_save_buffer(
- struct pci_dev *dev, char cap, unsigned int size)
+static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
+ bool extended, unsigned int size)
{
int pos;
struct pci_cap_saved_state *save_state;
- pos = pci_find_capability(dev, cap);
+ if (extended)
+ pos = pci_find_ext_capability(dev, cap);
+ else
+ pos = pci_find_capability(dev, cap);
+
if (pos <= 0)
return 0;
@@ -1772,12 +2134,24 @@ static int pci_add_cap_save_buffer(
if (!save_state)
return -ENOMEM;
- save_state->cap_nr = cap;
+ save_state->cap.cap_nr = cap;
+ save_state->cap.cap_extended = extended;
+ save_state->cap.size = size;
pci_add_saved_cap(dev, save_state);
return 0;
}
+int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
+{
+ return _pci_add_cap_save_buffer(dev, cap, false, size);
+}
+
+int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
+{
+ return _pci_add_cap_save_buffer(dev, cap, true, size);
+}
+
/**
* pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
* @dev: the PCI device
@@ -1796,43 +2170,51 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
if (error)
dev_err(&dev->dev,
"unable to preallocate PCI-X save buffer\n");
+
+ pci_allocate_vc_save_buffers(dev);
+}
+
+void pci_free_cap_save_buffers(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *tmp;
+ struct hlist_node *n;
+
+ hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
+ kfree(tmp);
}
/**
- * pci_enable_ari - enable ARI forwarding if hardware support it
+ * pci_configure_ari - enable or disable ARI forwarding
* @dev: the PCI device
+ *
+ * If @dev and its upstream bridge both support ARI, enable ARI in the
+ * bridge. Otherwise, disable ARI in the bridge.
*/
-void pci_enable_ari(struct pci_dev *dev)
+void pci_configure_ari(struct pci_dev *dev)
{
- int pos;
u32 cap;
- u16 ctrl;
struct pci_dev *bridge;
- if (!pci_is_pcie(dev) || dev->devfn)
- return;
-
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
- if (!pos)
+ if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
return;
bridge = dev->bus->self;
- if (!bridge || !pci_is_pcie(bridge))
+ if (!bridge)
return;
- pos = pci_pcie_cap(bridge);
- if (!pos)
- return;
-
- pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
if (!(cap & PCI_EXP_DEVCAP2_ARI))
return;
- pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
- ctrl |= PCI_EXP_DEVCTL2_ARI;
- pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
-
- bridge->ari_enabled = 1;
+ if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
+ pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ARI);
+ bridge->ari_enabled = 1;
+ } else {
+ pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ARI);
+ bridge->ari_enabled = 0;
+ }
}
static int pci_acs_enable;
@@ -1846,24 +2228,18 @@ void pci_request_acs(void)
}
/**
- * pci_enable_acs - enable ACS if hardware support it
+ * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
* @dev: the PCI device
*/
-void pci_enable_acs(struct pci_dev *dev)
+static int pci_std_enable_acs(struct pci_dev *dev)
{
int pos;
u16 cap;
u16 ctrl;
- if (!pci_acs_enable)
- return;
-
- if (!pci_is_pcie(dev))
- return;
-
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
if (!pos)
- return;
+ return -ENODEV;
pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
@@ -1881,12 +2257,159 @@ void pci_enable_acs(struct pci_dev *dev)
ctrl |= (cap & PCI_ACS_UF);
pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+
+ return 0;
+}
+
+/**
+ * pci_enable_acs - enable ACS if hardware support it
+ * @dev: the PCI device
+ */
+void pci_enable_acs(struct pci_dev *dev)
+{
+ if (!pci_acs_enable)
+ return;
+
+ if (!pci_std_enable_acs(dev))
+ return;
+
+ pci_dev_specific_enable_acs(dev);
+}
+
+static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
+{
+ int pos;
+ u16 cap, ctrl;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return false;
+
+ /*
+ * Except for egress control, capabilities are either required
+ * or only required if controllable. Features missing from the
+ * capability field can therefore be assumed as hard-wired enabled.
+ */
+ pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
+ acs_flags &= (cap | PCI_ACS_EC);
+
+ pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
+ return (ctrl & acs_flags) == acs_flags;
+}
+
+/**
+ * pci_acs_enabled - test ACS against required flags for a given device
+ * @pdev: device to test
+ * @acs_flags: required PCI ACS flags
+ *
+ * Return true if the device supports the provided flags. Automatically
+ * filters out flags that are not implemented on multifunction devices.
+ *
+ * Note that this interface checks the effective ACS capabilities of the
+ * device rather than the actual capabilities. For instance, most single
+ * function endpoints are not required to support ACS because they have no
+ * opportunity for peer-to-peer access. We therefore return 'true'
+ * regardless of whether the device exposes an ACS capability. This makes
+ * it much easier for callers of this function to ignore the actual type
+ * or topology of the device when testing ACS support.
+ */
+bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
+{
+ int ret;
+
+ ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
+ if (ret >= 0)
+ return ret > 0;
+
+ /*
+ * Conventional PCI and PCI-X devices never support ACS, either
+ * effectively or actually. The shared bus topology implies that
+ * any device on the bus can receive or snoop DMA.
+ */
+ if (!pci_is_pcie(pdev))
+ return false;
+
+ switch (pci_pcie_type(pdev)) {
+ /*
+ * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
+ * but since their primary interface is PCI/X, we conservatively
+ * handle them as we would a non-PCIe device.
+ */
+ case PCI_EXP_TYPE_PCIE_BRIDGE:
+ /*
+ * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
+ * applicable... must never implement an ACS Extended Capability...".
+ * This seems arbitrary, but we take a conservative interpretation
+ * of this statement.
+ */
+ case PCI_EXP_TYPE_PCI_BRIDGE:
+ case PCI_EXP_TYPE_RC_EC:
+ return false;
+ /*
+ * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
+ * implement ACS in order to indicate their peer-to-peer capabilities,
+ * regardless of whether they are single- or multi-function devices.
+ */
+ case PCI_EXP_TYPE_DOWNSTREAM:
+ case PCI_EXP_TYPE_ROOT_PORT:
+ return pci_acs_flags_enabled(pdev, acs_flags);
+ /*
+ * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
+ * implemented by the remaining PCIe types to indicate peer-to-peer
+ * capabilities, but only when they are part of a multifunction
+ * device. The footnote for section 6.12 indicates the specific
+ * PCIe types included here.
+ */
+ case PCI_EXP_TYPE_ENDPOINT:
+ case PCI_EXP_TYPE_UPSTREAM:
+ case PCI_EXP_TYPE_LEG_END:
+ case PCI_EXP_TYPE_RC_END:
+ if (!pdev->multifunction)
+ break;
+
+ return pci_acs_flags_enabled(pdev, acs_flags);
+ }
+
+ /*
+ * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
+ * to single function devices with the exception of downstream ports.
+ */
+ return true;
+}
+
+/**
+ * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
+ * @start: starting downstream device
+ * @end: ending upstream device or NULL to search to the root bus
+ * @acs_flags: required flags
+ *
+ * Walk up a device tree from start to end testing PCI ACS support. If
+ * any step along the way does not support the required flags, return false.
+ */
+bool pci_acs_path_enabled(struct pci_dev *start,
+ struct pci_dev *end, u16 acs_flags)
+{
+ struct pci_dev *pdev, *parent = start;
+
+ do {
+ pdev = parent;
+
+ if (!pci_acs_enabled(pdev, acs_flags))
+ return false;
+
+ if (pci_is_root_bus(pdev->bus))
+ return (end == NULL);
+
+ parent = pdev->bus->self;
+ } while (pdev != end);
+
+ return true;
}
/**
* pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
* @dev: the PCI device
- * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
+ * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
*
* Perform INTx swizzling for a device behind one level of bridge. This is
* required by section 9.1 of the PCI-to-PCI bridge specification for devices
@@ -1894,7 +2417,7 @@ void pci_enable_acs(struct pci_dev *dev)
* number is always 0 (see the Implementation Note in section 2.2.8.1 of
* the PCI Express Base Specification, Revision 2.1)
*/
-u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
+u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
{
int slot;
@@ -1906,8 +2429,7 @@ u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
return (((pin - 1) + slot) % 4) + 1;
}
-int
-pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
+int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
{
u8 pin;
@@ -1969,6 +2491,7 @@ void pci_release_region(struct pci_dev *pdev, int bar)
if (dr)
dr->region_mask &= ~(1 << bar);
}
+EXPORT_SYMBOL(pci_release_region);
/**
* __pci_request_region - Reserved PCI I/O and memory resource
@@ -1984,25 +2507,24 @@ void pci_release_region(struct pci_dev *pdev, int bar)
*
* If @exclusive is set, then the region is marked so that userspace
* is explicitly not allowed to map the resource via /dev/mem or
- * sysfs MMIO access.
+ * sysfs MMIO access.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
*/
-static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
- int exclusive)
+static int __pci_request_region(struct pci_dev *pdev, int bar,
+ const char *res_name, int exclusive)
{
struct pci_devres *dr;
if (pci_resource_len(pdev, bar) == 0)
return 0;
-
+
if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
if (!request_region(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar), res_name))
goto err_out;
- }
- else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
+ } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
if (!__request_mem_region(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar), res_name,
exclusive))
@@ -2039,6 +2561,7 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
{
return __pci_request_region(pdev, bar, res_name, 0);
}
+EXPORT_SYMBOL(pci_request_region);
/**
* pci_request_region_exclusive - Reserved PCI I/O and memory resource
@@ -2056,12 +2579,15 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
*
* The key difference that _exclusive makes it that userspace is
* explicitly not allowed to map the resource via /dev/mem or
- * sysfs.
+ * sysfs.
*/
-int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
+int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
+ const char *res_name)
{
return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
}
+EXPORT_SYMBOL(pci_request_region_exclusive);
+
/**
* pci_release_selected_regions - Release selected PCI I/O and memory resources
* @pdev: PCI device whose resources were previously reserved
@@ -2078,9 +2604,10 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars)
if (bars & (1 << i))
pci_release_region(pdev, i);
}
+EXPORT_SYMBOL(pci_release_selected_regions);
-int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
- const char *res_name, int excl)
+static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
+ const char *res_name, int excl)
{
int i;
@@ -2091,7 +2618,7 @@ int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
return 0;
err_out:
- while(--i >= 0)
+ while (--i >= 0)
if (bars & (1 << i))
pci_release_region(pdev, i);
@@ -2110,13 +2637,15 @@ int pci_request_selected_regions(struct pci_dev *pdev, int bars,
{
return __pci_request_selected_regions(pdev, bars, res_name, 0);
}
+EXPORT_SYMBOL(pci_request_selected_regions);
-int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
- int bars, const char *res_name)
+int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
+ const char *res_name)
{
return __pci_request_selected_regions(pdev, bars, res_name,
IORESOURCE_EXCLUSIVE);
}
+EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
/**
* pci_release_regions - Release reserved PCI I/O and memory resources
@@ -2131,6 +2660,7 @@ void pci_release_regions(struct pci_dev *pdev)
{
pci_release_selected_regions(pdev, (1 << 6) - 1);
}
+EXPORT_SYMBOL(pci_release_regions);
/**
* pci_request_regions - Reserved PCI I/O and memory resources
@@ -2149,6 +2679,7 @@ int pci_request_regions(struct pci_dev *pdev, const char *res_name)
{
return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
}
+EXPORT_SYMBOL(pci_request_regions);
/**
* pci_request_regions_exclusive - Reserved PCI I/O and memory resources
@@ -2161,7 +2692,7 @@ int pci_request_regions(struct pci_dev *pdev, const char *res_name)
* successfully.
*
* pci_request_regions_exclusive() will mark the region so that
- * /dev/mem and the sysfs MMIO access will not be allowed.
+ * /dev/mem and the sysfs MMIO access will not be allowed.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
@@ -2171,6 +2702,7 @@ int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
return pci_request_selected_regions_exclusive(pdev,
((1 << 6) - 1), res_name);
}
+EXPORT_SYMBOL(pci_request_regions_exclusive);
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
@@ -2190,6 +2722,45 @@ static void __pci_set_master(struct pci_dev *dev, bool enable)
}
/**
+ * pcibios_setup - process "pci=" kernel boot arguments
+ * @str: string used to pass in "pci=" kernel boot arguments
+ *
+ * Process kernel boot arguments. This is the default implementation.
+ * Architecture specific implementations can override this as necessary.
+ */
+char * __weak __init pcibios_setup(char *str)
+{
+ return str;
+}
+
+/**
+ * pcibios_set_master - enable PCI bus-mastering for device dev
+ * @dev: the PCI device to enable
+ *
+ * Enables PCI bus-mastering for the device. This is the default
+ * implementation. Architecture specific implementations can override
+ * this if necessary.
+ */
+void __weak pcibios_set_master(struct pci_dev *dev)
+{
+ u8 lat;
+
+ /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
+ if (pci_is_pcie(dev))
+ return;
+
+ pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
+ if (lat < 16)
+ lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
+ else if (lat > pcibios_max_latency)
+ lat = pcibios_max_latency;
+ else
+ return;
+
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
+}
+
+/**
* pci_set_master - enables bus-mastering for device dev
* @dev: the PCI device to enable
*
@@ -2201,6 +2772,7 @@ void pci_set_master(struct pci_dev *dev)
__pci_set_master(dev, true);
pcibios_set_master(dev);
}
+EXPORT_SYMBOL(pci_set_master);
/**
* pci_clear_master - disables bus-mastering for device dev
@@ -2210,6 +2782,7 @@ void pci_clear_master(struct pci_dev *dev)
{
__pci_set_master(dev, false);
}
+EXPORT_SYMBOL(pci_clear_master);
/**
* pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
@@ -2242,30 +2815,13 @@ int pci_set_cacheline_size(struct pci_dev *dev)
if (cacheline_size == pci_cache_line_size)
return 0;
- dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
- "supported\n", pci_cache_line_size << 2);
+ dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not supported\n",
+ pci_cache_line_size << 2);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
-#ifdef PCI_DISABLE_MWI
-int pci_set_mwi(struct pci_dev *dev)
-{
- return 0;
-}
-
-int pci_try_set_mwi(struct pci_dev *dev)
-{
- return 0;
-}
-
-void pci_clear_mwi(struct pci_dev *dev)
-{
-}
-
-#else
-
/**
* pci_set_mwi - enables memory-write-invalidate PCI transaction
* @dev: the PCI device for which MWI is enabled
@@ -2274,9 +2830,11 @@ void pci_clear_mwi(struct pci_dev *dev)
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
-int
-pci_set_mwi(struct pci_dev *dev)
+int pci_set_mwi(struct pci_dev *dev)
{
+#ifdef PCI_DISABLE_MWI
+ return 0;
+#else
int rc;
u16 cmd;
@@ -2285,14 +2843,15 @@ pci_set_mwi(struct pci_dev *dev)
return rc;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
- if (! (cmd & PCI_COMMAND_INVALIDATE)) {
+ if (!(cmd & PCI_COMMAND_INVALIDATE)) {
dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
cmd |= PCI_COMMAND_INVALIDATE;
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
-
return 0;
+#endif
}
+EXPORT_SYMBOL(pci_set_mwi);
/**
* pci_try_set_mwi - enables memory-write-invalidate PCI transaction
@@ -2305,9 +2864,13 @@ pci_set_mwi(struct pci_dev *dev)
*/
int pci_try_set_mwi(struct pci_dev *dev)
{
- int rc = pci_set_mwi(dev);
- return rc;
+#ifdef PCI_DISABLE_MWI
+ return 0;
+#else
+ return pci_set_mwi(dev);
+#endif
}
+EXPORT_SYMBOL(pci_try_set_mwi);
/**
* pci_clear_mwi - disables Memory-Write-Invalidate for device dev
@@ -2315,9 +2878,9 @@ int pci_try_set_mwi(struct pci_dev *dev)
*
* Disables PCI Memory-Write-Invalidate transaction on the device
*/
-void
-pci_clear_mwi(struct pci_dev *dev)
+void pci_clear_mwi(struct pci_dev *dev)
{
+#ifndef PCI_DISABLE_MWI
u16 cmd;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
@@ -2325,8 +2888,9 @@ pci_clear_mwi(struct pci_dev *dev)
cmd &= ~PCI_COMMAND_INVALIDATE;
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
+#endif
}
-#endif /* ! PCI_DISABLE_MWI */
+EXPORT_SYMBOL(pci_clear_mwi);
/**
* pci_intx - enables/disables PCI INTx for device dev
@@ -2335,18 +2899,16 @@ pci_clear_mwi(struct pci_dev *dev)
*
* Enables/disables PCI INTx for device dev
*/
-void
-pci_intx(struct pci_dev *pdev, int enable)
+void pci_intx(struct pci_dev *pdev, int enable)
{
u16 pci_command, new;
pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
- if (enable) {
+ if (enable)
new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
- } else {
+ else
new = pci_command | PCI_COMMAND_INTX_DISABLE;
- }
if (new != pci_command) {
struct pci_devres *dr;
@@ -2360,20 +2922,139 @@ pci_intx(struct pci_dev *pdev, int enable)
}
}
}
+EXPORT_SYMBOL_GPL(pci_intx);
+
+/**
+ * pci_intx_mask_supported - probe for INTx masking support
+ * @dev: the PCI device to operate on
+ *
+ * Check if the device dev support INTx masking via the config space
+ * command word.
+ */
+bool pci_intx_mask_supported(struct pci_dev *dev)
+{
+ bool mask_supported = false;
+ u16 orig, new;
+
+ if (dev->broken_intx_masking)
+ return false;
+
+ pci_cfg_access_lock(dev);
+
+ pci_read_config_word(dev, PCI_COMMAND, &orig);
+ pci_write_config_word(dev, PCI_COMMAND,
+ orig ^ PCI_COMMAND_INTX_DISABLE);
+ pci_read_config_word(dev, PCI_COMMAND, &new);
+
+ /*
+ * There's no way to protect against hardware bugs or detect them
+ * reliably, but as long as we know what the value should be, let's
+ * go ahead and check it.
+ */
+ if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
+ dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n",
+ orig, new);
+ } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
+ mask_supported = true;
+ pci_write_config_word(dev, PCI_COMMAND, orig);
+ }
+
+ pci_cfg_access_unlock(dev);
+ return mask_supported;
+}
+EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
+
+static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
+{
+ struct pci_bus *bus = dev->bus;
+ bool mask_updated = true;
+ u32 cmd_status_dword;
+ u16 origcmd, newcmd;
+ unsigned long flags;
+ bool irq_pending;
+
+ /*
+ * We do a single dword read to retrieve both command and status.
+ * Document assumptions that make this possible.
+ */
+ BUILD_BUG_ON(PCI_COMMAND % 4);
+ BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
+
+ raw_spin_lock_irqsave(&pci_lock, flags);
+
+ bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
+
+ irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
+
+ /*
+ * Check interrupt status register to see whether our device
+ * triggered the interrupt (when masking) or the next IRQ is
+ * already pending (when unmasking).
+ */
+ if (mask != irq_pending) {
+ mask_updated = false;
+ goto done;
+ }
+
+ origcmd = cmd_status_dword;
+ newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
+ if (mask)
+ newcmd |= PCI_COMMAND_INTX_DISABLE;
+ if (newcmd != origcmd)
+ bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
+
+done:
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
+
+ return mask_updated;
+}
/**
- * pci_msi_off - disables any msi or msix capabilities
+ * pci_check_and_mask_intx - mask INTx on pending interrupt
* @dev: the PCI device to operate on
*
- * If you want to use msi see pci_enable_msi and friends.
- * This is a lower level primitive that allows us to disable
- * msi operation at the device level.
+ * Check if the device dev has its INTx line asserted, mask it and
+ * return true in that case. False is returned if not interrupt was
+ * pending.
+ */
+bool pci_check_and_mask_intx(struct pci_dev *dev)
+{
+ return pci_check_and_set_intx_mask(dev, true);
+}
+EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
+
+/**
+ * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
+ * @dev: the PCI device to operate on
+ *
+ * Check if the device dev has its INTx line asserted, unmask it if not
+ * and return true. False is returned and the mask remains active if
+ * there was still an interrupt pending.
+ */
+bool pci_check_and_unmask_intx(struct pci_dev *dev)
+{
+ return pci_check_and_set_intx_mask(dev, false);
+}
+EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
+
+/**
+ * pci_msi_off - disables any MSI or MSI-X capabilities
+ * @dev: the PCI device to operate on
+ *
+ * If you want to use MSI, see pci_enable_msi() and friends.
+ * This is a lower-level primitive that allows us to disable
+ * MSI operation at the device level.
*/
void pci_msi_off(struct pci_dev *dev)
{
int pos;
u16 control;
+ /*
+ * This looks like it could go in msi.c, but we need it even when
+ * CONFIG_PCI_MSI=n. For the same reason, we can't use
+ * dev->msi_cap or dev->msix_cap here.
+ */
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
if (pos) {
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
@@ -2401,41 +3082,37 @@ int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
}
EXPORT_SYMBOL(pci_set_dma_seg_boundary);
+/**
+ * pci_wait_for_pending_transaction - waits for pending transaction
+ * @dev: the PCI device to operate on
+ *
+ * Return 0 if transaction is pending 1 otherwise.
+ */
+int pci_wait_for_pending_transaction(struct pci_dev *dev)
+{
+ if (!pci_is_pcie(dev))
+ return 1;
+
+ return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
+ PCI_EXP_DEVSTA_TRPND);
+}
+EXPORT_SYMBOL(pci_wait_for_pending_transaction);
+
static int pcie_flr(struct pci_dev *dev, int probe)
{
- int i;
- int pos;
u32 cap;
- u16 status, control;
-
- pos = pci_pcie_cap(dev);
- if (!pos)
- return -ENOTTY;
- pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
+ pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
if (!(cap & PCI_EXP_DEVCAP_FLR))
return -ENOTTY;
if (probe)
return 0;
- /* Wait for Transaction Pending bit clean */
- for (i = 0; i < 4; i++) {
- if (i)
- msleep((1 << (i - 1)) * 100);
+ if (!pci_wait_for_pending_transaction(dev))
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
- pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto clear;
- }
-
- dev_err(&dev->dev, "transaction is not cleared; "
- "proceeding with reset anyway\n");
-
-clear:
- pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
- control |= PCI_EXP_DEVCTL_BCR_FLR;
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
+ pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
msleep(100);
@@ -2444,10 +3121,8 @@ clear:
static int pci_af_flr(struct pci_dev *dev, int probe)
{
- int i;
int pos;
u8 cap;
- u8 status;
pos = pci_find_capability(dev, PCI_CAP_ID_AF);
if (!pos)
@@ -2460,18 +3135,16 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
if (probe)
return 0;
- /* Wait for Transaction Pending bit clean */
- for (i = 0; i < 4; i++) {
- if (i)
- msleep((1 << (i - 1)) * 100);
-
- pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
- if (!(status & PCI_AF_STATUS_TP))
- goto clear;
- }
+ /*
+ * Wait for Transaction Pending bit to clear. A word-aligned test
+ * is used, so we use the conrol offset rather than status and shift
+ * the test bit to match.
+ */
+ if (pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
+ PCI_AF_STATUS_TP << 8))
+ goto clear;
- dev_err(&dev->dev, "transaction is not cleared; "
- "proceeding with reset anyway\n");
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
clear:
pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
@@ -2480,6 +3153,21 @@ clear:
return 0;
}
+/**
+ * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
+ * @dev: Device to reset.
+ * @probe: If set, only check if the device can be reset this way.
+ *
+ * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
+ * unset, it will be reinitialized internally when going from PCI_D3hot to
+ * PCI_D0. If that's the case and the device is not in a low-power state
+ * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
+ *
+ * NOTE: This causes the caller to sleep for twice the device power transition
+ * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
+ * by default (i.e. unless the @dev's d3_delay field has a different value).
+ * Moreover, only devices in D0 can be reset by this function.
+ */
static int pci_pm_reset(struct pci_dev *dev, int probe)
{
u16 csr;
@@ -2510,9 +3198,47 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
return 0;
}
-static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
+void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
{
u16 ctrl;
+
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
+ ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
+ /*
+ * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
+ * this to 2ms to ensure that we meet the minimum requirement.
+ */
+ msleep(2);
+
+ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
+
+ /*
+ * Trhfa for conventional PCI is 2^25 clock cycles.
+ * Assuming a minimum 33MHz clock this results in a 1s
+ * delay before we can consider subordinate devices to
+ * be re-initialized. PCIe has some ways to shorten this,
+ * but we don't make use of them yet.
+ */
+ ssleep(1);
+}
+
+/**
+ * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
+ * @dev: Bridge device
+ *
+ * Use the bridge control register to assert reset on the secondary bus.
+ * Devices on the secondary bus are left in power-on state.
+ */
+void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
+{
+ pcibios_reset_secondary_bus(dev);
+}
+EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
+
+static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
+{
struct pci_dev *pdev;
if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
@@ -2525,30 +3251,46 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
if (probe)
return 0;
- pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
- ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
- msleep(100);
-
- ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
- msleep(100);
+ pci_reset_bridge_secondary_bus(dev->bus->self);
return 0;
}
-static int pci_dev_reset(struct pci_dev *dev, int probe)
+static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
+{
+ int rc = -ENOTTY;
+
+ if (!hotplug || !try_module_get(hotplug->ops->owner))
+ return rc;
+
+ if (hotplug->ops->reset_slot)
+ rc = hotplug->ops->reset_slot(hotplug, probe);
+
+ module_put(hotplug->ops->owner);
+
+ return rc;
+}
+
+static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
+{
+ struct pci_dev *pdev;
+
+ if (dev->subordinate || !dev->slot)
+ return -ENOTTY;
+
+ list_for_each_entry(pdev, &dev->bus->devices, bus_list)
+ if (pdev != dev && pdev->slot == dev->slot)
+ return -ENOTTY;
+
+ return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
+}
+
+static int __pci_dev_reset(struct pci_dev *dev, int probe)
{
int rc;
might_sleep();
- if (!probe) {
- pci_block_user_cfg_access(dev);
- /* block PM suspend, driver probe, etc. */
- device_lock(&dev->dev);
- }
-
rc = pci_dev_specific_reset(dev, probe);
if (rc != -ENOTTY)
goto done;
@@ -2565,13 +3307,97 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
if (rc != -ENOTTY)
goto done;
+ rc = pci_dev_reset_slot_function(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
rc = pci_parent_bus_reset(dev, probe);
done:
- if (!probe) {
- device_unlock(&dev->dev);
- pci_unblock_user_cfg_access(dev);
+ return rc;
+}
+
+static void pci_dev_lock(struct pci_dev *dev)
+{
+ pci_cfg_access_lock(dev);
+ /* block PM suspend, driver probe, etc. */
+ device_lock(&dev->dev);
+}
+
+/* Return 1 on successful lock, 0 on contention */
+static int pci_dev_trylock(struct pci_dev *dev)
+{
+ if (pci_cfg_access_trylock(dev)) {
+ if (device_trylock(&dev->dev))
+ return 1;
+ pci_cfg_access_unlock(dev);
}
+ return 0;
+}
+
+static void pci_dev_unlock(struct pci_dev *dev)
+{
+ device_unlock(&dev->dev);
+ pci_cfg_access_unlock(dev);
+}
+
+/**
+ * pci_reset_notify - notify device driver of reset
+ * @dev: device to be notified of reset
+ * @prepare: 'true' if device is about to be reset; 'false' if reset attempt
+ * completed
+ *
+ * Must be called prior to device access being disabled and after device
+ * access is restored.
+ */
+static void pci_reset_notify(struct pci_dev *dev, bool prepare)
+{
+ const struct pci_error_handlers *err_handler =
+ dev->driver ? dev->driver->err_handler : NULL;
+ if (err_handler && err_handler->reset_notify)
+ err_handler->reset_notify(dev, prepare);
+}
+
+static void pci_dev_save_and_disable(struct pci_dev *dev)
+{
+ pci_reset_notify(dev, true);
+
+ /*
+ * Wake-up device prior to save. PM registers default to D0 after
+ * reset and a simple register restore doesn't reliably return
+ * to a non-D0 state anyway.
+ */
+ pci_set_power_state(dev, PCI_D0);
+
+ pci_save_state(dev);
+ /*
+ * Disable the device by clearing the Command register, except for
+ * INTx-disable which is set. This not only disables MMIO and I/O port
+ * BARs, but also prevents the device from being Bus Master, preventing
+ * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
+ * compliant devices, INTx-disable prevents legacy interrupts.
+ */
+ pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+}
+
+static void pci_dev_restore(struct pci_dev *dev)
+{
+ pci_restore_state(dev);
+ pci_reset_notify(dev, false);
+}
+
+static int pci_dev_reset(struct pci_dev *dev, int probe)
+{
+ int rc;
+
+ if (!probe)
+ pci_dev_lock(dev);
+
+ rc = __pci_dev_reset(dev, probe);
+
+ if (!probe)
+ pci_dev_unlock(dev);
+
return rc;
}
@@ -2599,6 +3425,31 @@ int __pci_reset_function(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(__pci_reset_function);
/**
+ * __pci_reset_function_locked - reset a PCI device function while holding
+ * the @dev mutex lock.
+ * @dev: PCI device to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device. The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * The device function is presumed to be unused and the caller is holding
+ * the device mutex lock when this function is called.
+ * Resetting the device will make the contents of PCI configuration space
+ * random, so any caller of this must be prepared to reinitialise the
+ * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
+ * etc.
+ *
+ * Returns 0 if the device function was successfully reset or negative if the
+ * device doesn't support resetting a single function.
+ */
+int __pci_reset_function_locked(struct pci_dev *dev)
+{
+ return __pci_dev_reset(dev, 0);
+}
+EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
+
+/**
* pci_probe_reset_function - check whether the device can be safely reset
* @dev: PCI device to reset
*
@@ -2638,23 +3489,393 @@ int pci_reset_function(struct pci_dev *dev)
if (rc)
return rc;
- pci_save_state(dev);
-
- /*
- * both INTx and MSI are disabled after the Interrupt Disable bit
- * is set and the Bus Master bit is cleared.
- */
- pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+ pci_dev_save_and_disable(dev);
rc = pci_dev_reset(dev, 0);
- pci_restore_state(dev);
+ pci_dev_restore(dev);
return rc;
}
EXPORT_SYMBOL_GPL(pci_reset_function);
/**
+ * pci_try_reset_function - quiesce and reset a PCI device function
+ * @dev: PCI device to reset
+ *
+ * Same as above, except return -EAGAIN if unable to lock device.
+ */
+int pci_try_reset_function(struct pci_dev *dev)
+{
+ int rc;
+
+ rc = pci_dev_reset(dev, 1);
+ if (rc)
+ return rc;
+
+ pci_dev_save_and_disable(dev);
+
+ if (pci_dev_trylock(dev)) {
+ rc = __pci_dev_reset(dev, 0);
+ pci_dev_unlock(dev);
+ } else
+ rc = -EAGAIN;
+
+ pci_dev_restore(dev);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_try_reset_function);
+
+/* Lock devices from the top of the tree down */
+static void pci_bus_lock(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_dev_lock(dev);
+ if (dev->subordinate)
+ pci_bus_lock(dev->subordinate);
+ }
+}
+
+/* Unlock devices from the bottom of the tree up */
+static void pci_bus_unlock(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+ pci_dev_unlock(dev);
+ }
+}
+
+/* Return 1 on successful lock, 0 on contention */
+static int pci_bus_trylock(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (!pci_dev_trylock(dev))
+ goto unlock;
+ if (dev->subordinate) {
+ if (!pci_bus_trylock(dev->subordinate)) {
+ pci_dev_unlock(dev);
+ goto unlock;
+ }
+ }
+ }
+ return 1;
+
+unlock:
+ list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+ pci_dev_unlock(dev);
+ }
+ return 0;
+}
+
+/* Lock devices from the top of the tree down */
+static void pci_slot_lock(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ pci_dev_lock(dev);
+ if (dev->subordinate)
+ pci_bus_lock(dev->subordinate);
+ }
+}
+
+/* Unlock devices from the bottom of the tree up */
+static void pci_slot_unlock(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+ pci_dev_unlock(dev);
+ }
+}
+
+/* Return 1 on successful lock, 0 on contention */
+static int pci_slot_trylock(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ if (!pci_dev_trylock(dev))
+ goto unlock;
+ if (dev->subordinate) {
+ if (!pci_bus_trylock(dev->subordinate)) {
+ pci_dev_unlock(dev);
+ goto unlock;
+ }
+ }
+ }
+ return 1;
+
+unlock:
+ list_for_each_entry_continue_reverse(dev,
+ &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+ pci_dev_unlock(dev);
+ }
+ return 0;
+}
+
+/* Save and disable devices from the top of the tree down */
+static void pci_bus_save_and_disable(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_dev_save_and_disable(dev);
+ if (dev->subordinate)
+ pci_bus_save_and_disable(dev->subordinate);
+ }
+}
+
+/*
+ * Restore devices from top of the tree down - parent bridges need to be
+ * restored before we can get to subordinate devices.
+ */
+static void pci_bus_restore(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_dev_restore(dev);
+ if (dev->subordinate)
+ pci_bus_restore(dev->subordinate);
+ }
+}
+
+/* Save and disable devices from the top of the tree down */
+static void pci_slot_save_and_disable(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ pci_dev_save_and_disable(dev);
+ if (dev->subordinate)
+ pci_bus_save_and_disable(dev->subordinate);
+ }
+}
+
+/*
+ * Restore devices from top of the tree down - parent bridges need to be
+ * restored before we can get to subordinate devices.
+ */
+static void pci_slot_restore(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ pci_dev_restore(dev);
+ if (dev->subordinate)
+ pci_bus_restore(dev->subordinate);
+ }
+}
+
+static int pci_slot_reset(struct pci_slot *slot, int probe)
+{
+ int rc;
+
+ if (!slot)
+ return -ENOTTY;
+
+ if (!probe)
+ pci_slot_lock(slot);
+
+ might_sleep();
+
+ rc = pci_reset_hotplug_slot(slot->hotplug, probe);
+
+ if (!probe)
+ pci_slot_unlock(slot);
+
+ return rc;
+}
+
+/**
+ * pci_probe_reset_slot - probe whether a PCI slot can be reset
+ * @slot: PCI slot to probe
+ *
+ * Return 0 if slot can be reset, negative if a slot reset is not supported.
+ */
+int pci_probe_reset_slot(struct pci_slot *slot)
+{
+ return pci_slot_reset(slot, 1);
+}
+EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
+
+/**
+ * pci_reset_slot - reset a PCI slot
+ * @slot: PCI slot to reset
+ *
+ * A PCI bus may host multiple slots, each slot may support a reset mechanism
+ * independent of other slots. For instance, some slots may support slot power
+ * control. In the case of a 1:1 bus to slot architecture, this function may
+ * wrap the bus reset to avoid spurious slot related events such as hotplug.
+ * Generally a slot reset should be attempted before a bus reset. All of the
+ * function of the slot and any subordinate buses behind the slot are reset
+ * through this function. PCI config space of all devices in the slot and
+ * behind the slot is saved before and restored after reset.
+ *
+ * Return 0 on success, non-zero on error.
+ */
+int pci_reset_slot(struct pci_slot *slot)
+{
+ int rc;
+
+ rc = pci_slot_reset(slot, 1);
+ if (rc)
+ return rc;
+
+ pci_slot_save_and_disable(slot);
+
+ rc = pci_slot_reset(slot, 0);
+
+ pci_slot_restore(slot);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_reset_slot);
+
+/**
+ * pci_try_reset_slot - Try to reset a PCI slot
+ * @slot: PCI slot to reset
+ *
+ * Same as above except return -EAGAIN if the slot cannot be locked
+ */
+int pci_try_reset_slot(struct pci_slot *slot)
+{
+ int rc;
+
+ rc = pci_slot_reset(slot, 1);
+ if (rc)
+ return rc;
+
+ pci_slot_save_and_disable(slot);
+
+ if (pci_slot_trylock(slot)) {
+ might_sleep();
+ rc = pci_reset_hotplug_slot(slot->hotplug, 0);
+ pci_slot_unlock(slot);
+ } else
+ rc = -EAGAIN;
+
+ pci_slot_restore(slot);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_try_reset_slot);
+
+static int pci_bus_reset(struct pci_bus *bus, int probe)
+{
+ if (!bus->self)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pci_bus_lock(bus);
+
+ might_sleep();
+
+ pci_reset_bridge_secondary_bus(bus->self);
+
+ pci_bus_unlock(bus);
+
+ return 0;
+}
+
+/**
+ * pci_probe_reset_bus - probe whether a PCI bus can be reset
+ * @bus: PCI bus to probe
+ *
+ * Return 0 if bus can be reset, negative if a bus reset is not supported.
+ */
+int pci_probe_reset_bus(struct pci_bus *bus)
+{
+ return pci_bus_reset(bus, 1);
+}
+EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
+
+/**
+ * pci_reset_bus - reset a PCI bus
+ * @bus: top level PCI bus to reset
+ *
+ * Do a bus reset on the given bus and any subordinate buses, saving
+ * and restoring state of all devices.
+ *
+ * Return 0 on success, non-zero on error.
+ */
+int pci_reset_bus(struct pci_bus *bus)
+{
+ int rc;
+
+ rc = pci_bus_reset(bus, 1);
+ if (rc)
+ return rc;
+
+ pci_bus_save_and_disable(bus);
+
+ rc = pci_bus_reset(bus, 0);
+
+ pci_bus_restore(bus);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_reset_bus);
+
+/**
+ * pci_try_reset_bus - Try to reset a PCI bus
+ * @bus: top level PCI bus to reset
+ *
+ * Same as above except return -EAGAIN if the bus cannot be locked
+ */
+int pci_try_reset_bus(struct pci_bus *bus)
+{
+ int rc;
+
+ rc = pci_bus_reset(bus, 1);
+ if (rc)
+ return rc;
+
+ pci_bus_save_and_disable(bus);
+
+ if (pci_bus_trylock(bus)) {
+ might_sleep();
+ pci_reset_bridge_secondary_bus(bus->self);
+ pci_bus_unlock(bus);
+ } else
+ rc = -EAGAIN;
+
+ pci_bus_restore(bus);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_try_reset_bus);
+
+/**
* pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
* @dev: PCI device to query
*
@@ -2735,8 +3956,7 @@ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
if (o != v) {
- if (v > o && dev->bus &&
- (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
+ if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
return -EIO;
cmd &= ~PCI_X_CMD_MAX_READ;
@@ -2757,18 +3977,11 @@ EXPORT_SYMBOL(pcix_set_mmrbc);
*/
int pcie_get_readrq(struct pci_dev *dev)
{
- int ret, cap;
u16 ctl;
- cap = pci_pcie_cap(dev);
- if (!cap)
- return -EINVAL;
-
- ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
- if (!ret)
- ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
+ pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
- return ret;
+ return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
}
EXPORT_SYMBOL(pcie_get_readrq);
@@ -2778,36 +3991,118 @@ EXPORT_SYMBOL(pcie_get_readrq);
* @rq: maximum memory read count in bytes
* valid values are 128, 256, 512, 1024, 2048, 4096
*
- * If possible sets maximum read byte count
+ * If possible sets maximum memory read request in bytes
*/
int pcie_set_readrq(struct pci_dev *dev, int rq)
{
- int cap, err = -EINVAL;
- u16 ctl, v;
+ u16 v;
if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
- goto out;
+ return -EINVAL;
+
+ /*
+ * If using the "performance" PCIe config, we clamp the
+ * read rq size to the max packet size to prevent the
+ * host bridge generating requests larger than we can
+ * cope with
+ */
+ if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
+ int mps = pcie_get_mps(dev);
+
+ if (mps < rq)
+ rq = mps;
+ }
v = (ffs(rq) - 8) << 12;
- cap = pci_pcie_cap(dev);
- if (!cap)
- goto out;
+ return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_READRQ, v);
+}
+EXPORT_SYMBOL(pcie_set_readrq);
+
+/**
+ * pcie_get_mps - get PCI Express maximum payload size
+ * @dev: PCI device to query
+ *
+ * Returns maximum payload size in bytes
+ */
+int pcie_get_mps(struct pci_dev *dev)
+{
+ u16 ctl;
+
+ pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
+
+ return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
+}
+EXPORT_SYMBOL(pcie_get_mps);
- err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
- if (err)
- goto out;
+/**
+ * pcie_set_mps - set PCI Express maximum payload size
+ * @dev: PCI device to query
+ * @mps: maximum payload size in bytes
+ * valid values are 128, 256, 512, 1024, 2048, 4096
+ *
+ * If possible sets maximum payload size
+ */
+int pcie_set_mps(struct pci_dev *dev, int mps)
+{
+ u16 v;
+
+ if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
+ return -EINVAL;
- if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
- ctl &= ~PCI_EXP_DEVCTL_READRQ;
- ctl |= v;
- err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl);
+ v = ffs(mps) - 8;
+ if (v > dev->pcie_mpss)
+ return -EINVAL;
+ v <<= 5;
+
+ return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_PAYLOAD, v);
+}
+EXPORT_SYMBOL(pcie_set_mps);
+
+/**
+ * pcie_get_minimum_link - determine minimum link settings of a PCI device
+ * @dev: PCI device to query
+ * @speed: storage for minimum speed
+ * @width: storage for minimum width
+ *
+ * This function will walk up the PCI device chain and determine the minimum
+ * link width and speed of the device.
+ */
+int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ int ret;
+
+ *speed = PCI_SPEED_UNKNOWN;
+ *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+ while (dev) {
+ u16 lnksta;
+ enum pci_bus_speed next_speed;
+ enum pcie_link_width next_width;
+
+ ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+ if (ret)
+ return ret;
+
+ next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+ next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+ PCI_EXP_LNKSTA_NLW_SHIFT;
+
+ if (next_speed < *speed)
+ *speed = next_speed;
+
+ if (next_width < *width)
+ *width = next_width;
+
+ dev = dev->bus->self;
}
-out:
- return err;
+ return 0;
}
-EXPORT_SYMBOL(pcie_set_readrq);
+EXPORT_SYMBOL(pcie_get_minimum_link);
/**
* pci_select_bars - Make BAR mask from the type of resource
@@ -2824,6 +4119,7 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags)
bars |= (1 << i);
return bars;
}
+EXPORT_SYMBOL(pci_select_bars);
/**
* pci_resource_bar - get position of the BAR associated with a resource
@@ -2863,11 +4159,11 @@ void __init pci_register_set_vga_state(arch_set_vga_state_t func)
}
static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
- unsigned int command_bits, bool change_bridge)
+ unsigned int command_bits, u32 flags)
{
if (arch_set_vga_state)
return arch_set_vga_state(dev, decode, command_bits,
- change_bridge);
+ flags);
return 0;
}
@@ -2876,31 +4172,34 @@ static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
* @dev: the PCI device
* @decode: true = enable decoding, false = disable decoding
* @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
- * @change_bridge: traverse ancestors and change bridges
+ * @flags: traverse ancestors and change bridges
+ * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
*/
int pci_set_vga_state(struct pci_dev *dev, bool decode,
- unsigned int command_bits, bool change_bridge)
+ unsigned int command_bits, u32 flags)
{
struct pci_bus *bus;
struct pci_dev *bridge;
u16 cmd;
int rc;
- WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
+ WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
/* ARCH specific VGA enables */
- rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge);
+ rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
if (rc)
return rc;
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- if (decode == true)
- cmd |= command_bits;
- else
- cmd &= ~command_bits;
- pci_write_config_word(dev, PCI_COMMAND, cmd);
+ if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (decode == true)
+ cmd |= command_bits;
+ else
+ cmd &= ~command_bits;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
- if (change_bridge == false)
+ if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
return 0;
bus = dev->bus;
@@ -2921,6 +4220,14 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
return 0;
}
+bool pci_device_is_present(struct pci_dev *pdev)
+{
+ u32 v;
+
+ return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
+}
+EXPORT_SYMBOL_GPL(pci_device_is_present);
+
#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
static DEFINE_SPINLOCK(resource_alignment_lock);
@@ -2932,7 +4239,7 @@ static DEFINE_SPINLOCK(resource_alignment_lock);
* RETURNS: Resource alignment if it is specified.
* Zero if it is not specified.
*/
-resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
+static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
{
int seg, bus, slot, func, align_order, count;
resource_size_t align = 0;
@@ -2964,11 +4271,10 @@ resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
bus == dev->bus->number &&
slot == PCI_SLOT(dev->devfn) &&
func == PCI_FUNC(dev->devfn)) {
- if (align_order == -1) {
+ if (align_order == -1)
align = PAGE_SIZE;
- } else {
+ else
align = 1 << align_order;
- }
/* Found */
break;
}
@@ -2982,19 +4288,72 @@ resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
return align;
}
-/**
- * pci_is_reassigndev - check if specified PCI is target device to reassign
- * @dev: the PCI device to check
- *
- * RETURNS: non-zero for PCI device is a target device to reassign,
- * or zero is not.
+/*
+ * This function disables memory decoding and releases memory resources
+ * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
+ * It also rounds up size to specified alignment.
+ * Later on, the kernel will assign page-aligned memory resource back
+ * to the device.
*/
-int pci_is_reassigndev(struct pci_dev *dev)
+void pci_reassigndev_resource_alignment(struct pci_dev *dev)
{
- return (pci_specified_resource_alignment(dev) != 0);
+ int i;
+ struct resource *r;
+ resource_size_t align, size;
+ u16 command;
+
+ /* check if specified PCI is target device to reassign */
+ align = pci_specified_resource_alignment(dev);
+ if (!align)
+ return;
+
+ if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
+ (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
+ dev_warn(&dev->dev,
+ "Can't reassign resources to host bridge.\n");
+ return;
+ }
+
+ dev_info(&dev->dev,
+ "Disabling memory decoding and releasing memory resources.\n");
+ pci_read_config_word(dev, PCI_COMMAND, &command);
+ command &= ~PCI_COMMAND_MEMORY;
+ pci_write_config_word(dev, PCI_COMMAND, command);
+
+ for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
+ r = &dev->resource[i];
+ if (!(r->flags & IORESOURCE_MEM))
+ continue;
+ size = resource_size(r);
+ if (size < align) {
+ size = align;
+ dev_info(&dev->dev,
+ "Rounding up size of resource #%d to %#llx.\n",
+ i, (unsigned long long)size);
+ }
+ r->flags |= IORESOURCE_UNSET;
+ r->end = size - 1;
+ r->start = 0;
+ }
+ /* Need to disable bridge's resource window,
+ * to enable the kernel to reassign new resource
+ * window later on.
+ */
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
+ (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
+ r = &dev->resource[i];
+ if (!(r->flags & IORESOURCE_MEM))
+ continue;
+ r->flags |= IORESOURCE_UNSET;
+ r->end = resource_size(r) - 1;
+ r->start = 0;
+ }
+ pci_disable_bridge_window(dev);
+ }
}
-ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
+static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
{
if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
@@ -3005,7 +4364,7 @@ ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
return count;
}
-ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
+static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
{
size_t count;
spin_lock(&resource_alignment_lock);
@@ -3033,10 +4392,9 @@ static int __init pci_resource_alignment_sysfs_init(void)
return bus_create_file(&pci_bus_type,
&bus_attr_resource_alignment);
}
-
late_initcall(pci_resource_alignment_sysfs_init);
-static void __devinit pci_no_domains(void)
+static void pci_no_domains(void)
{
#ifdef CONFIG_PCI_DOMAINS
pci_domains_supported = 0;
@@ -3044,14 +4402,13 @@ static void __devinit pci_no_domains(void)
}
/**
- * pci_ext_cfg_enabled - can we access extended PCI config space?
- * @dev: The PCI device of the root bridge.
+ * pci_ext_cfg_avail - can we access extended PCI config space?
*
* Returns 1 if we can access PCI extended config space (offsets
* greater than 0xff). This is the default implementation. Architecture
* implementations can override this.
*/
-int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
+int __weak pci_ext_cfg_avail(void)
{
return 1;
}
@@ -3072,8 +4429,14 @@ static int __init pci_setup(char *str)
pci_no_msi();
} else if (!strcmp(str, "noaer")) {
pci_no_aer();
+ } else if (!strncmp(str, "realloc=", 8)) {
+ pci_realloc_get_opt(str + 8);
+ } else if (!strncmp(str, "realloc", 7)) {
+ pci_realloc_get_opt("on");
} else if (!strcmp(str, "nodomains")) {
pci_no_domains();
+ } else if (!strncmp(str, "noari", 5)) {
+ pcie_ari_disabled = true;
} else if (!strncmp(str, "cbiosize=", 9)) {
pci_cardbus_io_size = memparse(str + 9, &str);
} else if (!strncmp(str, "cbmemsize=", 10)) {
@@ -3087,6 +4450,16 @@ static int __init pci_setup(char *str)
pci_hotplug_io_size = memparse(str + 9, &str);
} else if (!strncmp(str, "hpmemsize=", 10)) {
pci_hotplug_mem_size = memparse(str + 10, &str);
+ } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
+ pcie_bus_config = PCIE_BUS_TUNE_OFF;
+ } else if (!strncmp(str, "pcie_bus_safe", 13)) {
+ pcie_bus_config = PCIE_BUS_SAFE;
+ } else if (!strncmp(str, "pcie_bus_perf", 13)) {
+ pcie_bus_config = PCIE_BUS_PERFORMANCE;
+ } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
+ pcie_bus_config = PCIE_BUS_PEER2PEER;
+ } else if (!strncmp(str, "pcie_scan_all", 13)) {
+ pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
@@ -3097,42 +4470,3 @@ static int __init pci_setup(char *str)
return 0;
}
early_param("pci", pci_setup);
-
-EXPORT_SYMBOL(pci_reenable_device);
-EXPORT_SYMBOL(pci_enable_device_io);
-EXPORT_SYMBOL(pci_enable_device_mem);
-EXPORT_SYMBOL(pci_enable_device);
-EXPORT_SYMBOL(pcim_enable_device);
-EXPORT_SYMBOL(pcim_pin_device);
-EXPORT_SYMBOL(pci_disable_device);
-EXPORT_SYMBOL(pci_find_capability);
-EXPORT_SYMBOL(pci_bus_find_capability);
-EXPORT_SYMBOL(pci_release_regions);
-EXPORT_SYMBOL(pci_request_regions);
-EXPORT_SYMBOL(pci_request_regions_exclusive);
-EXPORT_SYMBOL(pci_release_region);
-EXPORT_SYMBOL(pci_request_region);
-EXPORT_SYMBOL(pci_request_region_exclusive);
-EXPORT_SYMBOL(pci_release_selected_regions);
-EXPORT_SYMBOL(pci_request_selected_regions);
-EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
-EXPORT_SYMBOL(pci_set_master);
-EXPORT_SYMBOL(pci_clear_master);
-EXPORT_SYMBOL(pci_set_mwi);
-EXPORT_SYMBOL(pci_try_set_mwi);
-EXPORT_SYMBOL(pci_clear_mwi);
-EXPORT_SYMBOL_GPL(pci_intx);
-EXPORT_SYMBOL(pci_assign_resource);
-EXPORT_SYMBOL(pci_find_parent_resource);
-EXPORT_SYMBOL(pci_select_bars);
-
-EXPORT_SYMBOL(pci_set_power_state);
-EXPORT_SYMBOL(pci_save_state);
-EXPORT_SYMBOL(pci_restore_state);
-EXPORT_SYMBOL(pci_pme_capable);
-EXPORT_SYMBOL(pci_pme_active);
-EXPORT_SYMBOL(pci_wake_from_d3);
-EXPORT_SYMBOL(pci_target_state);
-EXPORT_SYMBOL(pci_prepare_to_sleep);
-EXPORT_SYMBOL(pci_back_from_sleep);
-EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index f5c7c382765..0601890db22 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,29 +1,32 @@
#ifndef DRIVERS_PCI_H
#define DRIVERS_PCI_H
-#include <linux/workqueue.h>
-
#define PCI_CFG_SPACE_SIZE 256
#define PCI_CFG_SPACE_EXP_SIZE 4096
+extern const unsigned char pcie_link_speed[];
+
/* Functions internal to the PCI core code */
-extern int pci_uevent(struct device *dev, struct kobj_uevent_env *env);
-extern int pci_create_sysfs_dev_files(struct pci_dev *pdev);
-extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
-#ifndef CONFIG_DMI
+int pci_create_sysfs_dev_files(struct pci_dev *pdev);
+void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
+#if !defined(CONFIG_DMI) && !defined(CONFIG_ACPI)
static inline void pci_create_firmware_label_files(struct pci_dev *pdev)
{ return; }
static inline void pci_remove_firmware_label_files(struct pci_dev *pdev)
{ return; }
#else
-extern void pci_create_firmware_label_files(struct pci_dev *pdev);
-extern void pci_remove_firmware_label_files(struct pci_dev *pdev);
+void pci_create_firmware_label_files(struct pci_dev *pdev);
+void pci_remove_firmware_label_files(struct pci_dev *pdev);
#endif
-extern void pci_cleanup_rom(struct pci_dev *dev);
+void pci_cleanup_rom(struct pci_dev *dev);
#ifdef HAVE_PCI_MMAP
-extern int pci_mmap_fits(struct pci_dev *pdev, int resno,
- struct vm_area_struct *vma);
+enum pci_mmap_api {
+ PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */
+ PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */
+};
+int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai,
+ enum pci_mmap_api mmap_api);
#endif
int pci_probe_reset_function(struct pci_dev *dev);
@@ -39,9 +42,6 @@ int pci_probe_reset_function(struct pci_dev *dev);
* platform; to be used during system-wide transitions from a
* sleeping state to the working state and vice versa
*
- * @can_wakeup: returns 'true' if given device is capable of waking up the
- * system from a sleeping state
- *
* @sleep_wake: enables/disables the system wake up capability of given device
*
* @run_wake: enables/disables the platform to generate run-time wake-up events
@@ -55,32 +55,33 @@ struct pci_platform_pm_ops {
bool (*is_manageable)(struct pci_dev *dev);
int (*set_state)(struct pci_dev *dev, pci_power_t state);
pci_power_t (*choose_state)(struct pci_dev *dev);
- bool (*can_wakeup)(struct pci_dev *dev);
int (*sleep_wake)(struct pci_dev *dev, bool enable);
int (*run_wake)(struct pci_dev *dev, bool enable);
};
-extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
-extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
-extern void pci_disable_enabled_device(struct pci_dev *dev);
-extern int pci_finish_runtime_suspend(struct pci_dev *dev);
-extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
-extern void pci_pm_init(struct pci_dev *dev);
-extern void platform_pci_wakeup_init(struct pci_dev *dev);
-extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
+int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
+void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
+void pci_power_up(struct pci_dev *dev);
+void pci_disable_enabled_device(struct pci_dev *dev);
+int pci_finish_runtime_suspend(struct pci_dev *dev);
+int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
+void pci_config_pm_runtime_get(struct pci_dev *dev);
+void pci_config_pm_runtime_put(struct pci_dev *dev);
+void pci_pm_init(struct pci_dev *dev);
+void pci_allocate_cap_save_buffers(struct pci_dev *dev);
+void pci_free_cap_save_buffers(struct pci_dev *dev);
+
+static inline void pci_wakeup_event(struct pci_dev *dev)
+{
+ /* Wait 100 ms before the system can be put into a sleep state. */
+ pm_wakeup_event(&dev->dev, 100);
+}
-static inline bool pci_is_bridge(struct pci_dev *pci_dev)
+static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
{
return !!(pci_dev->subordinate);
}
-extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
-extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
-extern int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
-extern int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
-extern int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
-extern int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
-
struct pci_vpd_ops {
ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
@@ -93,7 +94,7 @@ struct pci_vpd {
struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
};
-extern int pci_vpd_pci22_init(struct pci_dev *dev);
+int pci_vpd_pci22_init(struct pci_dev *dev);
static inline void pci_vpd_release(struct pci_dev *dev)
{
if (dev->vpd)
@@ -102,9 +103,9 @@ static inline void pci_vpd_release(struct pci_dev *dev)
/* PCI /proc functions */
#ifdef CONFIG_PROC_FS
-extern int pci_proc_attach_device(struct pci_dev *dev);
-extern int pci_proc_detach_device(struct pci_dev *dev);
-extern int pci_proc_detach_bus(struct pci_bus *bus);
+int pci_proc_attach_device(struct pci_dev *dev);
+int pci_proc_detach_device(struct pci_dev *dev);
+int pci_proc_detach_bus(struct pci_bus *bus);
#else
static inline int pci_proc_attach_device(struct pci_dev *dev) { return 0; }
static inline int pci_proc_detach_device(struct pci_dev *dev) { return 0; }
@@ -112,11 +113,11 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; }
#endif
/* Functions for PCI Hotplug drivers to use */
-extern unsigned int pci_do_scan_bus(struct pci_bus *bus);
+int pci_hp_add_bridge(struct pci_dev *dev);
#ifdef HAVE_PCI_LEGACY
-extern void pci_create_legacy_files(struct pci_bus *bus);
-extern void pci_remove_legacy_files(struct pci_bus *bus);
+void pci_create_legacy_files(struct pci_bus *bus);
+void pci_remove_legacy_files(struct pci_bus *bus);
#else
static inline void pci_create_legacy_files(struct pci_bus *bus) { return; }
static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; }
@@ -125,23 +126,19 @@ static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; }
/* Lock for read/write access to pci device and bus lists */
extern struct rw_semaphore pci_bus_sem;
+extern raw_spinlock_t pci_lock;
+
extern unsigned int pci_pm_d3_delay;
#ifdef CONFIG_PCI_MSI
void pci_no_msi(void);
-extern void pci_msi_init_pci_dev(struct pci_dev *dev);
+void pci_msi_init_pci_dev(struct pci_dev *dev);
#else
static inline void pci_no_msi(void) { }
static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
#endif
-#ifdef CONFIG_PCIEAER
-void pci_no_aer(void);
-bool pci_aer_available(void);
-#else
-static inline void pci_no_aer(void) { }
-static inline bool pci_aer_available(void) { return false; }
-#endif
+void pci_realloc_get_opt(char *);
static inline int pci_no_d1d2(struct pci_dev *dev)
{
@@ -152,14 +149,10 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
return (dev->no_d1d2 || parent_dstates);
}
-extern struct device_attribute pci_dev_attrs[];
-extern struct device_attribute dev_attr_cpuaffinity;
-extern struct device_attribute dev_attr_cpulistaffinity;
-#ifdef CONFIG_HOTPLUG
-extern struct bus_attribute pci_bus_attrs[];
-#else
-#define pci_bus_attrs NULL
-#endif
+extern const struct attribute_group *pci_dev_groups[];
+extern const struct attribute_group *pcibus_groups[];
+extern struct device_type pci_dev_type;
+extern const struct attribute_group *pci_bus_groups[];
/**
@@ -182,8 +175,6 @@ pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
return NULL;
}
-struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev);
-
/* PCI slot sysfs helper code */
#define to_pci_slot(s) container_of(s, struct pci_slot, kobj)
@@ -203,13 +194,19 @@ enum pci_bar_type {
pci_bar_mem64, /* A 64-bit memory BAR */
};
-extern int pci_setup_device(struct pci_dev *dev);
-extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
- struct resource *res, unsigned int reg);
-extern int pci_resource_bar(struct pci_dev *dev, int resno,
- enum pci_bar_type *type);
-extern int pci_bus_add_child(struct pci_bus *bus);
-extern void pci_enable_ari(struct pci_dev *dev);
+bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
+ int crs_timeout);
+int pci_setup_device(struct pci_dev *dev);
+int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ struct resource *res, unsigned int reg);
+int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
+void pci_configure_ari(struct pci_dev *dev);
+void __pci_bus_size_bridges(struct pci_bus *bus,
+ struct list_head *realloc_head);
+void __pci_bus_assign_resources(const struct pci_bus *bus,
+ struct list_head *realloc_head,
+ struct list_head *fail_head);
+
/**
* pci_ari_enabled - query ARI forwarding status
* @bus: the PCI bus
@@ -221,11 +218,8 @@ static inline int pci_ari_enabled(struct pci_bus *bus)
return bus->self && bus->self->ari_enabled;
}
-#ifdef CONFIG_PCI_QUIRKS
-extern int pci_is_reassigndev(struct pci_dev *dev);
-resource_size_t pci_specified_resource_alignment(struct pci_dev *dev);
-extern void pci_disable_bridge_window(struct pci_dev *dev);
-#endif
+void pci_reassigndev_resource_alignment(struct pci_dev *dev);
+void pci_disable_bridge_window(struct pci_dev *dev);
/* Single Root I/O Virtualization */
struct pci_sriov {
@@ -233,52 +227,36 @@ struct pci_sriov {
int nres; /* number of resources */
u32 cap; /* SR-IOV Capabilities */
u16 ctrl; /* SR-IOV Control */
- u16 total; /* total VFs associated with the PF */
- u16 initial; /* initial VFs associated with the PF */
- u16 nr_virtfn; /* number of VFs available */
+ u16 total_VFs; /* total VFs associated with the PF */
+ u16 initial_VFs; /* initial VFs associated with the PF */
+ u16 num_VFs; /* number of VFs available */
u16 offset; /* first VF Routing ID offset */
u16 stride; /* following VF stride */
u32 pgsz; /* page size for BAR alignment */
u8 link; /* Function Dependency Link */
+ u16 driver_max_VFs; /* max num VFs driver supports */
struct pci_dev *dev; /* lowest numbered PF */
struct pci_dev *self; /* this PF */
struct mutex lock; /* lock for VF bus */
- struct work_struct mtask; /* VF Migration task */
- u8 __iomem *mstate; /* VF Migration State Array */
-};
-
-/* Address Translation Service */
-struct pci_ats {
- int pos; /* capability position */
- int stu; /* Smallest Translation Unit */
- int qdep; /* Invalidate Queue Depth */
- int ref_cnt; /* Physical Function reference count */
- unsigned int is_enabled:1; /* Enable bit is set */
};
-#ifdef CONFIG_PCI_IOV
-extern int pci_iov_init(struct pci_dev *dev);
-extern void pci_iov_release(struct pci_dev *dev);
-extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
- enum pci_bar_type *type);
-extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
- int resno);
-extern void pci_restore_iov_state(struct pci_dev *dev);
-extern int pci_iov_bus_range(struct pci_bus *bus);
-
-extern int pci_enable_ats(struct pci_dev *dev, int ps);
-extern void pci_disable_ats(struct pci_dev *dev);
-extern int pci_ats_queue_depth(struct pci_dev *dev);
-/**
- * pci_ats_enabled - query the ATS status
- * @dev: the PCI device
- *
- * Returns 1 if ATS capability is enabled, or 0 if not.
- */
-static inline int pci_ats_enabled(struct pci_dev *dev)
+#ifdef CONFIG_PCI_ATS
+void pci_restore_ats_state(struct pci_dev *dev);
+#else
+static inline void pci_restore_ats_state(struct pci_dev *dev)
{
- return dev->ats && dev->ats->is_enabled;
}
+#endif /* CONFIG_PCI_ATS */
+
+#ifdef CONFIG_PCI_IOV
+int pci_iov_init(struct pci_dev *dev);
+void pci_iov_release(struct pci_dev *dev);
+int pci_iov_resource_bar(struct pci_dev *dev, int resno,
+ enum pci_bar_type *type);
+resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
+void pci_restore_iov_state(struct pci_dev *dev);
+int pci_iov_bus_range(struct pci_bus *bus);
+
#else
static inline int pci_iov_init(struct pci_dev *dev)
{
@@ -301,25 +279,12 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
return 0;
}
-static inline int pci_enable_ats(struct pci_dev *dev, int ps)
-{
- return -ENODEV;
-}
-static inline void pci_disable_ats(struct pci_dev *dev)
-{
-}
-static inline int pci_ats_queue_depth(struct pci_dev *dev)
-{
- return -ENODEV;
-}
-static inline int pci_ats_enabled(struct pci_dev *dev)
-{
- return 0;
-}
#endif /* CONFIG_PCI_IOV */
+unsigned long pci_cardbus_resource_alignment(struct resource *);
+
static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
- struct resource *res)
+ struct resource *res)
{
#ifdef CONFIG_PCI_IOV
int resno = res - dev->resource;
@@ -327,10 +292,12 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
return pci_sriov_resource_alignment(dev, resno);
#endif
+ if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS)
+ return pci_cardbus_resource_alignment(res);
return resource_alignment(res);
}
-extern void pci_enable_acs(struct pci_dev *dev);
+void pci_enable_acs(struct pci_dev *dev);
struct pci_dev_reset_methods {
u16 vendor;
@@ -339,7 +306,7 @@ struct pci_dev_reset_methods {
};
#ifdef CONFIG_PCI_QUIRKS
-extern int pci_dev_specific_reset(struct pci_dev *dev, int probe);
+int pci_dev_specific_reset(struct pci_dev *dev, int probe);
#else
static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
{
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index dda70981b7a..7958e59d607 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -2,7 +2,7 @@
# PCI Express Port Bus Configuration
#
config PCIEPORTBUS
- bool "PCI Express support"
+ bool "PCI Express Port Bus support"
depends on PCI
help
This automatically enables PCI Express Port Bus support. Users can
@@ -14,15 +14,12 @@ config PCIEPORTBUS
# Include service Kconfig here
#
config HOTPLUG_PCI_PCIE
- tristate "PCI Express Hotplug driver"
+ bool "PCI Express Hotplug driver"
depends on HOTPLUG_PCI && PCIEPORTBUS
help
Say Y here if you have a motherboard that supports PCI Express Native
Hotplug
- To compile this driver as a module, choose M here: the
- module will be called pciehp.
-
When in doubt, say N.
source "drivers/pci/pcie/aer/Kconfig"
@@ -31,7 +28,7 @@ source "drivers/pci/pcie/aer/Kconfig"
# PCI Express ASPM
#
config PCIEASPM
- bool "PCI Express ASPM control" if EMBEDDED
+ bool "PCI Express ASPM control" if EXPERT
depends on PCI && PCIEPORTBUS
default y
help
@@ -39,7 +36,7 @@ config PCIEASPM
Power Management) and Clock Power Management. ASPM supports
state L0/L0s/L1.
- ASPM is initially set up the the firmware. With this option enabled,
+ ASPM is initially set up by the firmware. With this option enabled,
Linux can modify this state in order to disable ASPM on known-bad
hardware or configurations and enable it when known-safe.
@@ -55,6 +52,31 @@ config PCIEASPM_DEBUG
This enables PCI Express ASPM debug support. It will add per-device
interface to control ASPM.
+choice
+ prompt "Default ASPM policy"
+ default PCIEASPM_DEFAULT
+ depends on PCIEASPM
+
+config PCIEASPM_DEFAULT
+ bool "BIOS default"
+ depends on PCIEASPM
+ help
+ Use the BIOS defaults for PCI Express ASPM.
+
+config PCIEASPM_POWERSAVE
+ bool "Powersave"
+ depends on PCIEASPM
+ help
+ Enable PCI Express ASPM L0s and L1 where possible, even if the
+ BIOS did not.
+
+config PCIEASPM_PERFORMANCE
+ bool "Performance"
+ depends on PCIEASPM
+ help
+ Disable PCI Express ASPM L0s and L1, even if the BIOS enabled them.
+endchoice
+
config PCIE_PME
def_bool y
- depends on PCIEPORTBUS && PM_RUNTIME && EXPERIMENTAL && ACPI
+ depends on PCIEPORTBUS && PM_RUNTIME
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index b3cf6223f63..182224acedb 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -27,6 +27,10 @@
#include <linux/stddef.h>
#include "aerdrv.h"
+/* Override the existing corrected and uncorrected error masks */
+static bool aer_mask_override;
+module_param(aer_mask_override, bool, 0);
+
struct aer_error_inj {
u8 bus;
u8 dev;
@@ -208,8 +212,8 @@ out:
return ops->read(bus, devfn, where, size, val);
}
-int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where, int size,
- u32 val)
+static int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 val)
{
u32 *sim;
struct aer_error *err;
@@ -284,7 +288,7 @@ static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
while (1) {
if (!pci_is_pcie(dev))
break;
- if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
return dev;
if (!dev->bus->self)
break;
@@ -322,7 +326,7 @@ static int aer_inject(struct aer_error_inj *einj)
unsigned long flags;
unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
int pos_cap_err, rp_pos_cap_err;
- u32 sever, cor_mask, uncor_mask;
+ u32 sever, cor_mask, uncor_mask, cor_mask_orig = 0, uncor_mask_orig = 0;
int ret = 0;
dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn);
@@ -330,13 +334,13 @@ static int aer_inject(struct aer_error_inj *einj)
return -ENODEV;
rpdev = pcie_find_root_port(dev);
if (!rpdev) {
- ret = -ENOTTY;
+ ret = -ENODEV;
goto out_put;
}
pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (!pos_cap_err) {
- ret = -ENOTTY;
+ ret = -EPERM;
goto out_put;
}
pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
@@ -346,7 +350,7 @@ static int aer_inject(struct aer_error_inj *einj)
rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
if (!rp_pos_cap_err) {
- ret = -ENOTTY;
+ ret = -EPERM;
goto out_put;
}
@@ -361,6 +365,18 @@ static int aer_inject(struct aer_error_inj *einj)
goto out_put;
}
+ if (aer_mask_override) {
+ cor_mask_orig = cor_mask;
+ cor_mask &= !(einj->cor_status);
+ pci_write_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK,
+ cor_mask);
+
+ uncor_mask_orig = uncor_mask;
+ uncor_mask &= !(einj->uncor_status);
+ pci_write_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
+ uncor_mask);
+ }
+
spin_lock_irqsave(&inject_lock, flags);
err = __find_aer_error_by_dev(dev);
@@ -378,17 +394,17 @@ static int aer_inject(struct aer_error_inj *einj)
err->header_log2 = einj->header_log2;
err->header_log3 = einj->header_log3;
- if (einj->cor_status && !(einj->cor_status & ~cor_mask)) {
+ if (!aer_mask_override && einj->cor_status &&
+ !(einj->cor_status & ~cor_mask)) {
ret = -EINVAL;
- printk(KERN_WARNING "The correctable error(s) is masked "
- "by device\n");
+ printk(KERN_WARNING "The correctable error(s) is masked by device\n");
spin_unlock_irqrestore(&inject_lock, flags);
goto out_put;
}
- if (einj->uncor_status && !(einj->uncor_status & ~uncor_mask)) {
+ if (!aer_mask_override && einj->uncor_status &&
+ !(einj->uncor_status & ~uncor_mask)) {
ret = -EINVAL;
- printk(KERN_WARNING "The uncorrectable error(s) is masked "
- "by device\n");
+ printk(KERN_WARNING "The uncorrectable error(s) is masked by device\n");
spin_unlock_irqrestore(&inject_lock, flags);
goto out_put;
}
@@ -425,6 +441,13 @@ static int aer_inject(struct aer_error_inj *einj)
}
spin_unlock_irqrestore(&inject_lock, flags);
+ if (aer_mask_override) {
+ pci_write_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK,
+ cor_mask_orig);
+ pci_write_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
+ uncor_mask_orig);
+ }
+
ret = pci_bus_set_aer_ops(dev->bus);
if (ret)
goto out_put;
@@ -439,8 +462,7 @@ static int aer_inject(struct aer_error_inj *einj)
goto out_put;
}
aer_irq(-1, edev);
- }
- else
+ } else
ret = -EINVAL;
out_put:
kfree(err_alloc);
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 2b2b6508efd..0bf82a20a0f 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pci-acpi.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -40,14 +41,14 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-static int __devinit aer_probe(struct pcie_device *dev);
+static int aer_probe(struct pcie_device *dev);
static void aer_remove(struct pcie_device *dev);
static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
enum pci_channel_state error);
static void aer_error_resume(struct pci_dev *dev);
static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
-static struct pci_error_handlers aer_error_handlers = {
+static const struct pci_error_handlers aer_error_handlers = {
.error_detected = aer_error_detected,
.resume = aer_error_resume,
};
@@ -80,10 +81,11 @@ bool pci_aer_available(void)
static int set_device_error_reporting(struct pci_dev *dev, void *data)
{
bool enable = *((bool *)data);
+ int type = pci_pcie_type(dev);
- if ((dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) ||
- (dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) ||
- (dev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)) {
+ if ((type == PCI_EXP_TYPE_ROOT_PORT) ||
+ (type == PCI_EXP_TYPE_UPSTREAM) ||
+ (type == PCI_EXP_TYPE_DOWNSTREAM)) {
if (enable)
pci_enable_pcie_error_reporting(dev);
else
@@ -120,19 +122,17 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
static void aer_enable_rootport(struct aer_rpc *rpc)
{
struct pci_dev *pdev = rpc->rpd->port;
- int pos, aer_pos;
+ int aer_pos;
u16 reg16;
u32 reg32;
- pos = pci_pcie_cap(pdev);
/* Clear PCIe Capability's Device Status */
- pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16);
- pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16);
+ pcie_capability_read_word(pdev, PCI_EXP_DEVSTA, &reg16);
+ pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, reg16);
/* Disable system error generation in response to error messages */
- pci_read_config_word(pdev, pos + PCI_EXP_RTCTL, &reg16);
- reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK);
- pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16);
+ pcie_capability_clear_word(pdev, PCI_EXP_RTCTL,
+ SYSTEM_ERROR_INTR_ON_MESG_MASK);
aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
/* Clear error status */
@@ -300,7 +300,7 @@ static void aer_remove(struct pcie_device *dev)
*
* Invoked when PCI Express bus loads AER service driver.
*/
-static int __devinit aer_probe(struct pcie_device *dev)
+static int aer_probe(struct pcie_device *dev)
{
int status;
struct aer_rpc *rpc;
@@ -352,7 +352,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
- aer_do_secondary_bus_reset(dev);
+ pci_reset_bridge_secondary_bus(dev);
dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n");
/* Clear Root Error Status */
@@ -394,9 +394,8 @@ static void aer_error_resume(struct pci_dev *dev)
u16 reg16;
/* Clean up Root device status */
- pos = pci_pcie_cap(dev);
- pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &reg16);
- pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16);
+ pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &reg16);
+ pcie_capability_write_word(dev, PCI_EXP_DEVSTA, reg16);
/* Clean AER Root Error Status */
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 9656e306041..84420b7c945 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -13,10 +13,6 @@
#include <linux/aer.h>
#include <linux/interrupt.h>
-#define AER_NONFATAL 0
-#define AER_FATAL 1
-#define AER_CORRECTABLE 2
-
#define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \
PCI_EXP_RTCTL_SENFEE| \
PCI_EXP_RTCTL_SEFEE)
@@ -35,13 +31,6 @@
PCI_ERR_UNC_UNX_COMP| \
PCI_ERR_UNC_MALF_TLP)
-struct header_log_regs {
- unsigned int dw0;
- unsigned int dw1;
- unsigned int dw2;
- unsigned int dw3;
-};
-
#define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
struct aer_err_info {
struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
@@ -59,7 +48,7 @@ struct aer_err_info {
unsigned int status; /* COR/UNCOR Error Status */
unsigned int mask; /* COR/UNCOR Error Mask */
- struct header_log_regs tlp; /* TLP Header */
+ struct aer_header_log_regs tlp; /* TLP Header */
};
struct aer_err_source {
@@ -94,6 +83,9 @@ struct aer_broadcast_data {
static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
enum pci_ers_result new)
{
+ if (new == PCI_ERS_RESULT_NO_AER_DRIVER)
+ return PCI_ERS_RESULT_NO_AER_DRIVER;
+
if (new == PCI_ERS_RESULT_NONE)
return orig;
@@ -104,7 +96,7 @@ static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
break;
case PCI_ERS_RESULT_DISCONNECT:
if (new == PCI_ERS_RESULT_NEED_RESET)
- orig = new;
+ orig = PCI_ERS_RESULT_NEED_RESET;
break;
default:
break;
@@ -114,25 +106,14 @@ static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
}
extern struct bus_type pcie_port_bus_type;
-extern void aer_do_secondary_bus_reset(struct pci_dev *dev);
-extern int aer_init(struct pcie_device *dev);
-extern void aer_isr(struct work_struct *work);
-extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
-extern void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info);
-extern irqreturn_t aer_irq(int irq, void *context);
-
-#ifdef CONFIG_ACPI
-extern int aer_osc_setup(struct pcie_device *pciedev);
-#else
-static inline int aer_osc_setup(struct pcie_device *pciedev)
-{
- return 0;
-}
-#endif
+int aer_init(struct pcie_device *dev);
+void aer_isr(struct work_struct *work);
+void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info);
+irqreturn_t aer_irq(int irq, void *context);
#ifdef CONFIG_ACPI_APEI
-extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
-extern bool aer_acpi_firmware_first(void);
+int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
#else
static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
{
@@ -140,8 +121,6 @@ static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
return pci_dev->__aer_firmware_first;
return 0;
}
-
-static inline bool aer_acpi_firmware_first(void) { return false; }
#endif
static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 275bf158ffa..01906576ab9 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -23,10 +23,26 @@
static inline int hest_match_pci(struct acpi_hest_aer_common *p,
struct pci_dev *pci)
{
- return (0 == pci_domain_nr(pci->bus) &&
- p->bus == pci->bus->number &&
- p->device == PCI_SLOT(pci->devfn) &&
- p->function == PCI_FUNC(pci->devfn));
+ return ACPI_HEST_SEGMENT(p->bus) == pci_domain_nr(pci->bus) &&
+ ACPI_HEST_BUS(p->bus) == pci->bus->number &&
+ p->device == PCI_SLOT(pci->devfn) &&
+ p->function == PCI_FUNC(pci->devfn);
+}
+
+static inline bool hest_match_type(struct acpi_hest_header *hest_hdr,
+ struct pci_dev *dev)
+{
+ u16 hest_type = hest_hdr->type;
+ u8 pcie_type = pci_pcie_type(dev);
+
+ if ((hest_type == ACPI_HEST_TYPE_AER_ROOT_PORT &&
+ pcie_type == PCI_EXP_TYPE_ROOT_PORT) ||
+ (hest_type == ACPI_HEST_TYPE_AER_ENDPOINT &&
+ pcie_type == PCI_EXP_TYPE_ENDPOINT) ||
+ (hest_type == ACPI_HEST_TYPE_AER_BRIDGE &&
+ (dev->class >> 16) == PCI_BASE_CLASS_BRIDGE))
+ return true;
+ return false;
}
struct aer_hest_parse_info {
@@ -34,38 +50,43 @@ struct aer_hest_parse_info {
int firmware_first;
};
+static int hest_source_is_pcie_aer(struct acpi_hest_header *hest_hdr)
+{
+ if (hest_hdr->type == ACPI_HEST_TYPE_AER_ROOT_PORT ||
+ hest_hdr->type == ACPI_HEST_TYPE_AER_ENDPOINT ||
+ hest_hdr->type == ACPI_HEST_TYPE_AER_BRIDGE)
+ return 1;
+ return 0;
+}
+
static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
{
struct aer_hest_parse_info *info = data;
struct acpi_hest_aer_common *p;
- u8 pcie_type = 0;
- u8 bridge = 0;
- int ff = 0;
-
- switch (hest_hdr->type) {
- case ACPI_HEST_TYPE_AER_ROOT_PORT:
- pcie_type = PCI_EXP_TYPE_ROOT_PORT;
- break;
- case ACPI_HEST_TYPE_AER_ENDPOINT:
- pcie_type = PCI_EXP_TYPE_ENDPOINT;
- break;
- case ACPI_HEST_TYPE_AER_BRIDGE:
- if ((info->pci_dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
- bridge = 1;
- break;
- default:
+ int ff;
+
+ if (!hest_source_is_pcie_aer(hest_hdr))
return 0;
- }
p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
+ ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
+
+ /*
+ * If no specific device is supplied, determine whether
+ * FIRMWARE_FIRST is set for *any* PCIe device.
+ */
+ if (!info->pci_dev) {
+ info->firmware_first |= ff;
+ return 0;
+ }
+
+ /* Otherwise, check the specific device */
if (p->flags & ACPI_HEST_GLOBAL) {
- if ((info->pci_dev->is_pcie &&
- info->pci_dev->pcie_type == pcie_type) || bridge)
- ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
+ if (hest_match_type(hest_hdr, info->pci_dev))
+ info->firmware_first = ff;
} else
if (hest_match_pci(p, info->pci_dev))
- ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
- info->firmware_first = ff;
+ info->firmware_first = ff;
return 0;
}
@@ -89,6 +110,9 @@ static void aer_set_firmware_first(struct pci_dev *pci_dev)
int pcie_aer_get_firmware_first(struct pci_dev *dev)
{
+ if (!pci_is_pcie(dev))
+ return 0;
+
if (!dev->__aer_firmware_first_valid)
aer_set_firmware_first(dev);
return dev->__aer_firmware_first;
@@ -96,33 +120,20 @@ int pcie_aer_get_firmware_first(struct pci_dev *dev)
static bool aer_firmware_first;
-static int aer_hest_parse_aff(struct acpi_hest_header *hest_hdr, void *data)
-{
- struct acpi_hest_aer_common *p;
-
- if (aer_firmware_first)
- return 0;
-
- switch (hest_hdr->type) {
- case ACPI_HEST_TYPE_AER_ROOT_PORT:
- case ACPI_HEST_TYPE_AER_ENDPOINT:
- case ACPI_HEST_TYPE_AER_BRIDGE:
- p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
- aer_firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
- default:
- return 0;
- }
-}
-
/**
* aer_acpi_firmware_first - Check if APEI should control AER.
*/
bool aer_acpi_firmware_first(void)
{
static bool parsed = false;
+ struct aer_hest_parse_info info = {
+ .pci_dev = NULL, /* Check all PCIe devices */
+ .firmware_first = 0,
+ };
if (!parsed) {
- apei_hest_parse(aer_hest_parse_aff, NULL);
+ apei_hest_parse(aer_hest_parse, &info);
+ aer_firmware_first = info.firmware_first;
parsed = true;
}
return aer_firmware_first;
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 43421fbe080..5653ea94547 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -24,60 +24,36 @@
#include <linux/suspend.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/kfifo.h>
#include "aerdrv.h"
-static int forceload;
-static int nosourceid;
+static bool forceload;
+static bool nosourceid;
module_param(forceload, bool, 0);
module_param(nosourceid, bool, 0);
+#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
+ PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
+
int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
- u16 reg16 = 0;
- int pos;
-
if (pcie_aer_get_firmware_first(dev))
return -EIO;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
- if (!pos)
- return -EIO;
-
- pos = pci_pcie_cap(dev);
- if (!pos)
+ if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
return -EIO;
- pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
- reg16 |= (PCI_EXP_DEVCTL_CERE |
- PCI_EXP_DEVCTL_NFERE |
- PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_URRE);
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16);
-
- return 0;
+ return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
}
EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
int pci_disable_pcie_error_reporting(struct pci_dev *dev)
{
- u16 reg16 = 0;
- int pos;
-
if (pcie_aer_get_firmware_first(dev))
return -EIO;
- pos = pci_pcie_cap(dev);
- if (!pos)
- return -EIO;
-
- pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
- reg16 &= ~(PCI_EXP_DEVCTL_CERE |
- PCI_EXP_DEVCTL_NFERE |
- PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_URRE);
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16);
-
- return 0;
+ return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
+ PCI_EXP_AER_FLAGS);
}
EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
@@ -113,8 +89,6 @@ static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
return -ENOSPC;
}
-#define PCI_BUS(x) (((x) >> 8) & 0xff)
-
/**
* is_error_source - check whether the device is source of reported error
* @dev: pointer to pci_dev to be checked
@@ -130,7 +104,7 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
* When bus id is equal to 0, it might be a bad id
* reported by root port.
*/
- if (!nosourceid && (PCI_BUS(e_info->id) != 0)) {
+ if (!nosourceid && (PCI_BUS_NUM(e_info->id) != 0)) {
/* Device ID match? */
if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
return true;
@@ -150,18 +124,12 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
*/
if (atomic_read(&dev->enable_cnt) == 0)
return false;
- pos = pci_pcie_cap(dev);
- if (!pos)
- return false;
/* Check if AER is enabled */
- pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
- if (!(reg16 & (
- PCI_EXP_DEVCTL_CERE |
- PCI_EXP_DEVCTL_NFERE |
- PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_URRE)))
+ pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &reg16);
+ if (!(reg16 & PCI_EXP_AER_FLAGS))
return false;
+
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (!pos)
return false;
@@ -239,10 +207,11 @@ static bool find_source_device(struct pci_dev *parent,
static int report_error_detected(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote;
- struct pci_error_handlers *err_handler;
+ const struct pci_error_handlers *err_handler;
struct aer_broadcast_data *result_data;
result_data = (struct aer_broadcast_data *) data;
+ device_lock(&dev->dev);
dev->error_state = result_data->state;
if (!dev->driver ||
@@ -261,64 +230,89 @@ static int report_error_detected(struct pci_dev *dev, void *data)
dev->driver ?
"no AER-aware driver" : "no driver");
}
- return 0;
+
+ /*
+ * If there's any device in the subtree that does not
+ * have an error_detected callback, returning
+ * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
+ * the subsequent mmio_enabled/slot_reset/resume
+ * callbacks of "any" device in the subtree. All the
+ * devices in the subtree are left in the error state
+ * without recovery.
+ */
+
+ if (!(dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
+ vote = PCI_ERS_RESULT_NO_AER_DRIVER;
+ else
+ vote = PCI_ERS_RESULT_NONE;
+ } else {
+ err_handler = dev->driver->err_handler;
+ vote = err_handler->error_detected(dev, result_data->state);
}
- err_handler = dev->driver->err_handler;
- vote = err_handler->error_detected(dev, result_data->state);
result_data->result = merge_result(result_data->result, vote);
+ device_unlock(&dev->dev);
return 0;
}
static int report_mmio_enabled(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote;
- struct pci_error_handlers *err_handler;
+ const struct pci_error_handlers *err_handler;
struct aer_broadcast_data *result_data;
result_data = (struct aer_broadcast_data *) data;
+ device_lock(&dev->dev);
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->mmio_enabled)
- return 0;
+ goto out;
err_handler = dev->driver->err_handler;
vote = err_handler->mmio_enabled(dev);
result_data->result = merge_result(result_data->result, vote);
+out:
+ device_unlock(&dev->dev);
return 0;
}
static int report_slot_reset(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote;
- struct pci_error_handlers *err_handler;
+ const struct pci_error_handlers *err_handler;
struct aer_broadcast_data *result_data;
result_data = (struct aer_broadcast_data *) data;
+ device_lock(&dev->dev);
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->slot_reset)
- return 0;
+ goto out;
err_handler = dev->driver->err_handler;
vote = err_handler->slot_reset(dev);
result_data->result = merge_result(result_data->result, vote);
+out:
+ device_unlock(&dev->dev);
return 0;
}
static int report_resume(struct pci_dev *dev, void *data)
{
- struct pci_error_handlers *err_handler;
+ const struct pci_error_handlers *err_handler;
+ device_lock(&dev->dev);
dev->error_state = pci_channel_io_normal;
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->resume)
- return 0;
+ goto out;
err_handler = dev->driver->err_handler;
err_handler->resume(dev);
+out:
+ device_unlock(&dev->dev);
return 0;
}
@@ -373,49 +367,16 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
}
/**
- * aer_do_secondary_bus_reset - perform secondary bus reset
- * @dev: pointer to bridge's pci_dev data structure
- *
- * Invoked when performing link reset at Root Port or Downstream Port.
- */
-void aer_do_secondary_bus_reset(struct pci_dev *dev)
-{
- u16 p2p_ctrl;
-
- /* Assert Secondary Bus Reset */
- pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl);
- p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
-
- /*
- * we should send hot reset message for 2ms to allow it time to
- * propagate to all downstream ports
- */
- msleep(2);
-
- /* De-assert Secondary Bus Reset */
- p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
-
- /*
- * System software must wait for at least 100ms from the end
- * of a reset of one or more device before it is permitted
- * to issue Configuration Requests to those devices.
- */
- msleep(200);
-}
-
-/**
- * default_downstream_reset_link - default reset function for Downstream Port
- * @dev: pointer to downstream port's pci_dev data structure
+ * default_reset_link - default reset function
+ * @dev: pointer to pci_dev data structure
*
- * Invoked when performing link reset at Downstream Port w/ no aer driver.
+ * Invoked when performing link reset on a Downstream Port or a
+ * Root Port with no aer driver.
*/
-static pci_ers_result_t default_downstream_reset_link(struct pci_dev *dev)
+static pci_ers_result_t default_reset_link(struct pci_dev *dev)
{
- aer_do_secondary_bus_reset(dev);
- dev_printk(KERN_DEBUG, &dev->dev,
- "Downstream Port link has been reset\n");
+ pci_reset_bridge_secondary_bus(dev);
+ dev_printk(KERN_DEBUG, &dev->dev, "downstream link has been reset\n");
return PCI_ERS_RESULT_RECOVERED;
}
@@ -445,8 +406,7 @@ static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
return drv;
}
-static pci_ers_result_t reset_link(struct pcie_device *aerdev,
- struct pci_dev *dev)
+static pci_ers_result_t reset_link(struct pci_dev *dev)
{
struct pci_dev *udev;
pci_ers_result_t status;
@@ -465,8 +425,9 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
if (driver && driver->reset_link) {
status = driver->reset_link(udev);
- } else if (udev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) {
- status = default_downstream_reset_link(udev);
+ } else if (pci_pcie_type(udev) == PCI_EXP_TYPE_DOWNSTREAM ||
+ pci_pcie_type(udev) == PCI_EXP_TYPE_ROOT_PORT) {
+ status = default_reset_link(udev);
} else {
dev_printk(KERN_DEBUG, &dev->dev,
"no link-reset support at upstream device %s\n",
@@ -486,7 +447,6 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
/**
* do_recovery - handle nonfatal/fatal error recovery process
- * @aerdev: pointer to a pcie_device data structure of root port
* @dev: pointer to a pci_dev data structure of agent detecting an error
* @severity: error severity type
*
@@ -494,8 +454,7 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
* error detected message to all downstream drivers within a hierarchy in
* question and return the returned code.
*/
-static void do_recovery(struct pcie_device *aerdev, struct pci_dev *dev,
- int severity)
+static void do_recovery(struct pci_dev *dev, int severity)
{
pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
enum pci_channel_state state;
@@ -511,7 +470,7 @@ static void do_recovery(struct pcie_device *aerdev, struct pci_dev *dev,
report_error_detected);
if (severity == AER_FATAL) {
- result = reset_link(aerdev, dev);
+ result = reset_link(dev);
if (result != PCI_ERS_RESULT_RECOVERED)
goto failed;
}
@@ -542,14 +501,12 @@ static void do_recovery(struct pcie_device *aerdev, struct pci_dev *dev,
"resume",
report_resume);
- dev_printk(KERN_DEBUG, &dev->dev,
- "AER driver successfully recovered\n");
+ dev_info(&dev->dev, "AER: Device recovery successful\n");
return;
failed:
/* TODO: Should kernel panic here? */
- dev_printk(KERN_DEBUG, &dev->dev,
- "AER driver didn't recover\n");
+ dev_info(&dev->dev, "AER: Device recovery failed\n");
}
/**
@@ -568,7 +525,7 @@ static void handle_error_source(struct pcie_device *aerdev,
if (info->severity == AER_CORRECTABLE) {
/*
- * Correctable error does not need software intevention.
+ * Correctable error does not need software intervention.
* No need to go through error recovery process.
*/
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
@@ -576,8 +533,75 @@ static void handle_error_source(struct pcie_device *aerdev,
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
info->status);
} else
- do_recovery(aerdev, dev, info->severity);
+ do_recovery(dev, info->severity);
+}
+
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+static void aer_recover_work_func(struct work_struct *work);
+
+#define AER_RECOVER_RING_ORDER 4
+#define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER)
+
+struct aer_recover_entry {
+ u8 bus;
+ u8 devfn;
+ u16 domain;
+ int severity;
+ struct aer_capability_regs *regs;
+};
+
+static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
+ AER_RECOVER_RING_SIZE);
+/*
+ * Mutual exclusion for writers of aer_recover_ring, reader side don't
+ * need lock, because there is only one reader and lock is not needed
+ * between reader and writer.
+ */
+static DEFINE_SPINLOCK(aer_recover_ring_lock);
+static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
+
+void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
+ int severity, struct aer_capability_regs *aer_regs)
+{
+ unsigned long flags;
+ struct aer_recover_entry entry = {
+ .bus = bus,
+ .devfn = devfn,
+ .domain = domain,
+ .severity = severity,
+ .regs = aer_regs,
+ };
+
+ spin_lock_irqsave(&aer_recover_ring_lock, flags);
+ if (kfifo_put(&aer_recover_ring, entry))
+ schedule_work(&aer_recover_work);
+ else
+ pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
+ domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
+}
+EXPORT_SYMBOL_GPL(aer_recover_queue);
+
+static void aer_recover_work_func(struct work_struct *work)
+{
+ struct aer_recover_entry entry;
+ struct pci_dev *pdev;
+
+ while (kfifo_get(&aer_recover_ring, &entry)) {
+ pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
+ entry.devfn);
+ if (!pdev) {
+ pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
+ entry.domain, entry.bus,
+ PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
+ continue;
+ }
+ cper_print_aer(pdev, entry.severity, entry.regs);
+ do_recovery(pdev, entry.severity);
+ pci_dev_put(pdev);
+ }
}
+#endif
/**
* get_device_error_info - read error status from dev and store it to info
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 9d3e4c8d018..36ed31b5219 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -19,9 +19,13 @@
#include <linux/errno.h>
#include <linux/pm.h>
#include <linux/suspend.h>
+#include <linux/cper.h>
#include "aerdrv.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/ras.h>
+
#define AER_AGENT_RECEIVER 0
#define AER_AGENT_REQUESTER 1
#define AER_AGENT_COMPLETER 2
@@ -57,86 +61,44 @@
(e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \
AER_TRANSACTION_LAYER_ERROR)
-#define AER_PR(info, pdev, fmt, args...) \
- printk("%s%s %s: " fmt, (info->severity == AER_CORRECTABLE) ? \
- KERN_WARNING : KERN_ERR, dev_driver_string(&pdev->dev), \
- dev_name(&pdev->dev), ## args)
-
/*
* AER error strings
*/
-static char *aer_error_severity_string[] = {
+static const char *aer_error_severity_string[] = {
"Uncorrected (Non-Fatal)",
"Uncorrected (Fatal)",
"Corrected"
};
-static char *aer_error_layer[] = {
+static const char *aer_error_layer[] = {
"Physical Layer",
"Data Link Layer",
"Transaction Layer"
};
-static char *aer_correctable_error_string[] = {
- "Receiver Error ", /* Bit Position 0 */
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- "Bad TLP ", /* Bit Position 6 */
- "Bad DLLP ", /* Bit Position 7 */
- "RELAY_NUM Rollover ", /* Bit Position 8 */
- NULL,
- NULL,
- NULL,
- "Replay Timer Timeout ", /* Bit Position 12 */
- "Advisory Non-Fatal ", /* Bit Position 13 */
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
+
+static const char *aer_correctable_error_string[] = {
+ "Receiver Error", /* Bit Position 0 */
NULL,
NULL,
NULL,
NULL,
NULL,
+ "Bad TLP", /* Bit Position 6 */
+ "Bad DLLP", /* Bit Position 7 */
+ "RELAY_NUM Rollover", /* Bit Position 8 */
NULL,
NULL,
NULL,
+ "Replay Timer Timeout", /* Bit Position 12 */
+ "Advisory Non-Fatal", /* Bit Position 13 */
};
-static char *aer_uncorrectable_error_string[] = {
- NULL,
- NULL,
- NULL,
- NULL,
- "Data Link Protocol ", /* Bit Position 4 */
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- "Poisoned TLP ", /* Bit Position 12 */
- "Flow Control Protocol ", /* Bit Position 13 */
- "Completion Timeout ", /* Bit Position 14 */
- "Completer Abort ", /* Bit Position 15 */
- "Unexpected Completion ", /* Bit Position 16 */
- "Receiver Overflow ", /* Bit Position 17 */
- "Malformed TLP ", /* Bit Position 18 */
- "ECRC ", /* Bit Position 19 */
- "Unsupported Request ", /* Bit Position 20 */
+static const char *aer_uncorrectable_error_string[] = {
NULL,
NULL,
NULL,
NULL,
+ "Data Link Protocol", /* Bit Position 4 */
NULL,
NULL,
NULL,
@@ -144,20 +106,44 @@ static char *aer_uncorrectable_error_string[] = {
NULL,
NULL,
NULL,
+ "Poisoned TLP", /* Bit Position 12 */
+ "Flow Control Protocol", /* Bit Position 13 */
+ "Completion Timeout", /* Bit Position 14 */
+ "Completer Abort", /* Bit Position 15 */
+ "Unexpected Completion", /* Bit Position 16 */
+ "Receiver Overflow", /* Bit Position 17 */
+ "Malformed TLP", /* Bit Position 18 */
+ "ECRC", /* Bit Position 19 */
+ "Unsupported Request", /* Bit Position 20 */
};
-static char *aer_agent_string[] = {
+static const char *aer_agent_string[] = {
"Receiver ID",
"Requester ID",
"Completer ID",
"Transmitter ID"
};
-static void __aer_print_error(struct aer_err_info *info, struct pci_dev *dev)
+static void __print_tlp_header(struct pci_dev *dev,
+ struct aer_header_log_regs *t)
{
- int i, status;
- char *errmsg = NULL;
+ unsigned char *tlp = (unsigned char *)&t;
+
+ dev_err(&dev->dev, " TLP Header:"
+ " %02x%02x%02x%02x %02x%02x%02x%02x"
+ " %02x%02x%02x%02x %02x%02x%02x%02x\n",
+ *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
+ *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
+ *(tlp + 11), *(tlp + 10), *(tlp + 9),
+ *(tlp + 8), *(tlp + 15), *(tlp + 14),
+ *(tlp + 13), *(tlp + 12));
+}
+static void __aer_print_error(struct pci_dev *dev,
+ struct aer_err_info *info)
+{
+ int i, status;
+ const char *errmsg = NULL;
status = (info->status & ~info->mask);
for (i = 0; i < 32; i++) {
@@ -165,61 +151,54 @@ static void __aer_print_error(struct aer_err_info *info, struct pci_dev *dev)
continue;
if (info->severity == AER_CORRECTABLE)
- errmsg = aer_correctable_error_string[i];
+ errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ?
+ aer_correctable_error_string[i] : NULL;
else
- errmsg = aer_uncorrectable_error_string[i];
+ errmsg = i < ARRAY_SIZE(aer_uncorrectable_error_string) ?
+ aer_uncorrectable_error_string[i] : NULL;
if (errmsg)
- AER_PR(info, dev, " [%2d] %s%s\n", i, errmsg,
+ dev_err(&dev->dev, " [%2d] %-22s%s\n", i, errmsg,
info->first_error == i ? " (First)" : "");
else
- AER_PR(info, dev, " [%2d] Unknown Error Bit%s\n", i,
- info->first_error == i ? " (First)" : "");
+ dev_err(&dev->dev, " [%2d] Unknown Error Bit%s\n",
+ i, info->first_error == i ? " (First)" : "");
}
}
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
{
+ int layer, agent;
int id = ((dev->bus->number << 8) | dev->devfn);
- if (info->status == 0) {
- AER_PR(info, dev,
- "PCIe Bus Error: severity=%s, type=Unaccessible, "
- "id=%04x(Unregistered Agent ID)\n",
+ if (!info->status) {
+ dev_err(&dev->dev, "PCIe Bus Error: severity=%s, type=Unaccessible, id=%04x(Unregistered Agent ID)\n",
aer_error_severity_string[info->severity], id);
- } else {
- int layer, agent;
-
- layer = AER_GET_LAYER_ERROR(info->severity, info->status);
- agent = AER_GET_AGENT(info->severity, info->status);
-
- AER_PR(info, dev,
- "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
- aer_error_severity_string[info->severity],
- aer_error_layer[layer], id, aer_agent_string[agent]);
-
- AER_PR(info, dev,
- " device [%04x:%04x] error status/mask=%08x/%08x\n",
- dev->vendor, dev->device, info->status, info->mask);
-
- __aer_print_error(info, dev);
-
- if (info->tlp_header_valid) {
- unsigned char *tlp = (unsigned char *) &info->tlp;
- AER_PR(info, dev, " TLP Header:"
- " %02x%02x%02x%02x %02x%02x%02x%02x"
- " %02x%02x%02x%02x %02x%02x%02x%02x\n",
- *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
- *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
- *(tlp + 11), *(tlp + 10), *(tlp + 9),
- *(tlp + 8), *(tlp + 15), *(tlp + 14),
- *(tlp + 13), *(tlp + 12));
- }
+ goto out;
}
+ layer = AER_GET_LAYER_ERROR(info->severity, info->status);
+ agent = AER_GET_AGENT(info->severity, info->status);
+
+ dev_err(&dev->dev, "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
+ aer_error_severity_string[info->severity],
+ aer_error_layer[layer], id, aer_agent_string[agent]);
+
+ dev_err(&dev->dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
+ dev->vendor, dev->device,
+ info->status, info->mask);
+
+ __aer_print_error(dev, info);
+
+ if (info->tlp_header_valid)
+ __print_tlp_header(dev, &info->tlp);
+
+out:
if (info->id && info->error_dev_num > 1 && info->id == id)
- AER_PR(info, dev,
- " Error of this Agent(%04x) is reported first\n", id);
+ dev_err(&dev->dev, " Error of this Agent(%04x) is reported first\n", id);
+
+ trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
+ info->severity);
}
void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
@@ -228,3 +207,59 @@ void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
info->multi_error_valid ? "Multiple " : "",
aer_error_severity_string[info->severity], info->id);
}
+
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+int cper_severity_to_aer(int cper_severity)
+{
+ switch (cper_severity) {
+ case CPER_SEV_RECOVERABLE:
+ return AER_NONFATAL;
+ case CPER_SEV_FATAL:
+ return AER_FATAL;
+ default:
+ return AER_CORRECTABLE;
+ }
+}
+EXPORT_SYMBOL_GPL(cper_severity_to_aer);
+
+void cper_print_aer(struct pci_dev *dev, int cper_severity,
+ struct aer_capability_regs *aer)
+{
+ int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0;
+ u32 status, mask;
+ const char **status_strs;
+
+ aer_severity = cper_severity_to_aer(cper_severity);
+
+ if (aer_severity == AER_CORRECTABLE) {
+ status = aer->cor_status;
+ mask = aer->cor_mask;
+ status_strs = aer_correctable_error_string;
+ status_strs_size = ARRAY_SIZE(aer_correctable_error_string);
+ } else {
+ status = aer->uncor_status;
+ mask = aer->uncor_mask;
+ status_strs = aer_uncorrectable_error_string;
+ status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string);
+ tlp_header_valid = status & AER_LOG_TLP_MASKS;
+ }
+
+ layer = AER_GET_LAYER_ERROR(aer_severity, status);
+ agent = AER_GET_AGENT(aer_severity, status);
+
+ dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
+ cper_print_bits("", status, status_strs, status_strs_size);
+ dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n",
+ aer_error_layer[layer], aer_agent_string[agent]);
+
+ if (aer_severity != AER_CORRECTABLE)
+ dev_err(&dev->dev, "aer_uncor_severity: 0x%08x\n",
+ aer->uncor_severity);
+
+ if (tlp_header_valid)
+ __print_tlp_header(dev, &aer->header_log);
+
+ trace_aer_event(dev_name(&dev->dev), (status & ~mask),
+ aer_severity);
+}
+#endif
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 71222814c1e..e1e7026b838 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -69,13 +69,22 @@ struct pcie_link_state {
};
static int aspm_disabled, aspm_force;
+static bool aspm_support_enabled = true;
static DEFINE_MUTEX(aspm_lock);
static LIST_HEAD(link_list);
#define POLICY_DEFAULT 0 /* BIOS default setting */
#define POLICY_PERFORMANCE 1 /* high performance */
#define POLICY_POWERSAVE 2 /* high power saving */
+
+#ifdef CONFIG_PCIEASPM_PERFORMANCE
+static int aspm_policy = POLICY_PERFORMANCE;
+#elif defined CONFIG_PCIEASPM_POWERSAVE
+static int aspm_policy = POLICY_POWERSAVE;
+#else
static int aspm_policy;
+#endif
+
static const char *policy_str[] = {
[POLICY_DEFAULT] = "default",
[POLICY_PERFORMANCE] = "performance",
@@ -116,21 +125,16 @@ static int policy_to_clkpm_state(struct pcie_link_state *link)
static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
{
- int pos;
- u16 reg16;
struct pci_dev *child;
struct pci_bus *linkbus = link->pdev->subordinate;
list_for_each_entry(child, &linkbus->devices, bus_list) {
- pos = pci_pcie_cap(child);
- if (!pos)
- return;
- pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16);
if (enable)
- reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN;
+ pcie_capability_set_word(child, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CLKREQ_EN);
else
- reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
- pci_write_config_word(child, pos + PCI_EXP_LNKCTL, reg16);
+ pcie_capability_clear_word(child, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CLKREQ_EN);
}
link->clkpm_enabled = !!enable;
}
@@ -139,7 +143,7 @@ static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
{
/* Don't enable Clock PM if the link is not Clock PM capable */
if (!link->clkpm_capable && enable)
- return;
+ enable = 0;
/* Need nothing if the specified equals to current state */
if (link->clkpm_enabled == enable)
return;
@@ -148,7 +152,7 @@ static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
{
- int pos, capable = 1, enabled = 1;
+ int capable = 1, enabled = 1;
u32 reg32;
u16 reg16;
struct pci_dev *child;
@@ -156,16 +160,13 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
/* All functions should have the same cap and state, take the worst */
list_for_each_entry(child, &linkbus->devices, bus_list) {
- pos = pci_pcie_cap(child);
- if (!pos)
- return;
- pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, &reg32);
+ pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &reg32);
if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
capable = 0;
enabled = 0;
break;
}
- pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16);
+ pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
enabled = 0;
}
@@ -181,7 +182,7 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
*/
static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
{
- int ppos, cpos, same_clock = 1;
+ int same_clock = 1;
u16 reg16, parent_reg, child_reg[8];
unsigned long start_jiffies;
struct pci_dev *child, *parent = link->pdev;
@@ -194,46 +195,43 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
BUG_ON(!pci_is_pcie(child));
/* Check downstream component if bit Slot Clock Configuration is 1 */
- cpos = pci_pcie_cap(child);
- pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, &reg16);
+ pcie_capability_read_word(child, PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_SLC))
same_clock = 0;
/* Check upstream component if bit Slot Clock Configuration is 1 */
- ppos = pci_pcie_cap(parent);
- pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16);
+ pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_SLC))
same_clock = 0;
/* Configure downstream component, all functions */
list_for_each_entry(child, &linkbus->devices, bus_list) {
- cpos = pci_pcie_cap(child);
- pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, &reg16);
+ pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
child_reg[PCI_FUNC(child->devfn)] = reg16;
if (same_clock)
reg16 |= PCI_EXP_LNKCTL_CCC;
else
reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, reg16);
+ pcie_capability_write_word(child, PCI_EXP_LNKCTL, reg16);
}
/* Configure upstream component */
- pci_read_config_word(parent, ppos + PCI_EXP_LNKCTL, &reg16);
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
parent_reg = reg16;
if (same_clock)
reg16 |= PCI_EXP_LNKCTL_CCC;
else
reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16);
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
/* Retrain link */
reg16 |= PCI_EXP_LNKCTL_RL;
- pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16);
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
/* Wait for link training end. Break out after waiting for timeout */
start_jiffies = jiffies;
for (;;) {
- pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16);
+ pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_LT))
break;
if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
@@ -244,14 +242,11 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
return;
/* Training failed. Restore common clock configurations */
- dev_printk(KERN_ERR, &parent->dev,
- "ASPM: Could not configure common clock\n");
- list_for_each_entry(child, &linkbus->devices, bus_list) {
- cpos = pci_pcie_cap(child);
- pci_write_config_word(child, cpos + PCI_EXP_LNKCTL,
- child_reg[PCI_FUNC(child->devfn)]);
- }
- pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, parent_reg);
+ dev_err(&parent->dev, "ASPM: Could not configure common clock\n");
+ list_for_each_entry(child, &linkbus->devices, bus_list)
+ pcie_capability_write_word(child, PCI_EXP_LNKCTL,
+ child_reg[PCI_FUNC(child->devfn)]);
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg);
}
/* Convert L0s latency encoding to ns */
@@ -296,16 +291,14 @@ struct aspm_register_info {
static void pcie_get_aspm_reg(struct pci_dev *pdev,
struct aspm_register_info *info)
{
- int pos;
u16 reg16;
u32 reg32;
- pos = pci_pcie_cap(pdev);
- pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32);
+ pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &reg32);
info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &reg16);
info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC;
}
@@ -403,7 +396,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
* do ASPM for now.
*/
list_for_each_entry(child, &linkbus->devices, bus_list) {
- if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
+ if (pci_pcie_type(child) == PCI_EXP_TYPE_PCI_BRIDGE) {
link->aspm_disable = ASPM_STATE_ALL;
break;
}
@@ -411,17 +404,15 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
/* Get and check endpoint acceptable latencies */
list_for_each_entry(child, &linkbus->devices, bus_list) {
- int pos;
u32 reg32, encoding;
struct aspm_latency *acceptable =
&link->acceptable[PCI_FUNC(child->devfn)];
- if (child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
- child->pcie_type != PCI_EXP_TYPE_LEG_END)
+ if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT &&
+ pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)
continue;
- pos = pci_pcie_cap(child);
- pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
+ pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32);
/* Calculate endpoint L0s acceptable latency */
encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
acceptable->l0s = calc_l0s_acceptable(encoding);
@@ -435,13 +426,8 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
{
- u16 reg16;
- int pos = pci_pcie_cap(pdev);
-
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
- reg16 &= ~0x3;
- reg16 |= val;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+ pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, val);
}
static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
@@ -456,12 +442,12 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
return;
/* Convert ASPM state to upstream/downstream ASPM register state */
if (state & ASPM_STATE_L0S_UP)
- dwstream |= PCIE_LINK_STATE_L0S;
+ dwstream |= PCI_EXP_LNKCTL_ASPM_L0S;
if (state & ASPM_STATE_L0S_DW)
- upstream |= PCIE_LINK_STATE_L0S;
+ upstream |= PCI_EXP_LNKCTL_ASPM_L0S;
if (state & ASPM_STATE_L1) {
- upstream |= PCIE_LINK_STATE_L1;
- dwstream |= PCIE_LINK_STATE_L1;
+ upstream |= PCI_EXP_LNKCTL_ASPM_L1;
+ dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
}
/*
* Spec 2.0 suggests all functions should be configured the
@@ -496,25 +482,32 @@ static void free_link_state(struct pcie_link_state *link)
static int pcie_aspm_sanity_check(struct pci_dev *pdev)
{
struct pci_dev *child;
- int pos;
u32 reg32;
+
/*
* Some functions in a slot might not all be PCIe functions,
* very strange. Disable ASPM for the whole slot
*/
list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
- pos = pci_pcie_cap(child);
- if (!pos)
+ if (!pci_is_pcie(child))
return -EINVAL;
+
+ /*
+ * If ASPM is disabled then we're not going to change
+ * the BIOS state. It's safe to continue even if it's a
+ * pre-1.1 device
+ */
+
+ if (aspm_disabled)
+ continue;
+
/*
* Disable ASPM for pre-1.1 PCIe device, we follow MS to use
* RBER bit to determine if a function is 1.1 version device
*/
- pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
+ pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32);
if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
- dev_printk(KERN_INFO, &child->dev, "disabling ASPM"
- " on pre-1.1 PCIe device. You can enable it"
- " with 'pcie_aspm=force'\n");
+ dev_info(&child->dev, "disabling ASPM on pre-1.1 PCIe device. You can enable it with 'pcie_aspm=force'\n");
return -EINVAL;
}
}
@@ -532,7 +525,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
INIT_LIST_HEAD(&link->children);
INIT_LIST_HEAD(&link->link);
link->pdev = pdev;
- if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) {
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM) {
struct pcie_link_state *parent;
parent = pdev->bus->parent->self->link_state;
if (!parent) {
@@ -555,7 +548,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
/*
* pcie_aspm_init_link_state: Initiate PCI express link state.
- * It is called after the pcie and its children devices are scaned.
+ * It is called after the pcie and its children devices are scanned.
* @pdev: the root port or switch downstream port
*/
void pcie_aspm_init_link_state(struct pci_dev *pdev)
@@ -563,14 +556,17 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
struct pcie_link_state *link;
int blacklist = !!pcie_aspm_sanity_check(pdev);
- if (aspm_disabled || !pci_is_pcie(pdev) || pdev->link_state)
+ if (!aspm_support_enabled)
+ return;
+
+ if (!pci_is_pcie(pdev) || pdev->link_state)
return;
- if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
+ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
+ pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)
return;
/* VIA has a strange chipset, root port is under a bridge */
- if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT &&
pdev->bus->self)
return;
@@ -627,8 +623,8 @@ static void pcie_update_aspm_capable(struct pcie_link_state *root)
if (link->root != root)
continue;
list_for_each_entry(child, &linkbus->devices, bus_list) {
- if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT) &&
- (child->pcie_type != PCI_EXP_TYPE_LEG_END))
+ if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) &&
+ (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END))
continue;
pcie_aspm_check_latency(child);
}
@@ -641,11 +637,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
struct pci_dev *parent = pdev->bus->self;
struct pcie_link_state *link, *root, *parent_link;
- if (aspm_disabled || !pci_is_pcie(pdev) ||
- !parent || !parent->link_state)
- return;
- if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
- (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
+ if (!parent || !parent->link_state)
return;
down_read(&pci_bus_sem);
@@ -685,8 +677,8 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
if (aspm_disabled || !pci_is_pcie(pdev) || !link)
return;
- if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
- (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
+ if ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) &&
+ (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM))
return;
/*
* Devices changed PM state, we should recheck if latency
@@ -700,24 +692,58 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
up_read(&pci_bus_sem);
}
-/*
- * pci_disable_link_state - disable pci device's link state, so the link will
- * never enter specific states
- */
-void pci_disable_link_state(struct pci_dev *pdev, int state)
+void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
+{
+ struct pcie_link_state *link = pdev->link_state;
+
+ if (aspm_disabled || !pci_is_pcie(pdev) || !link)
+ return;
+
+ if (aspm_policy != POLICY_POWERSAVE)
+ return;
+
+ if ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) &&
+ (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM))
+ return;
+
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ pcie_config_aspm_path(link);
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
+ mutex_unlock(&aspm_lock);
+ up_read(&pci_bus_sem);
+}
+
+static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem,
+ bool force)
{
struct pci_dev *parent = pdev->bus->self;
struct pcie_link_state *link;
- if (aspm_disabled || !pci_is_pcie(pdev))
+ if (!pci_is_pcie(pdev))
return;
- if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
- pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
+
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM)
parent = pdev;
if (!parent || !parent->link_state)
return;
- down_read(&pci_bus_sem);
+ /*
+ * A driver requested that ASPM be disabled on this device, but
+ * if we don't have permission to manage ASPM (e.g., on ACPI
+ * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
+ * the _OSC method), we can't honor that request. Windows has
+ * a similar mechanism using "PciASPMOptOut", which is also
+ * ignored in this situation.
+ */
+ if (aspm_disabled && !force) {
+ dev_warn(&pdev->dev, "can't disable ASPM; OS doesn't have ASPM control\n");
+ return;
+ }
+
+ if (sem)
+ down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
link = parent->link_state;
if (state & PCIE_LINK_STATE_L0S)
@@ -731,15 +757,56 @@ void pci_disable_link_state(struct pci_dev *pdev, int state)
pcie_set_clkpm(link, 0);
}
mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
+ if (sem)
+ up_read(&pci_bus_sem);
+}
+
+void pci_disable_link_state_locked(struct pci_dev *pdev, int state)
+{
+ __pci_disable_link_state(pdev, state, false, false);
+}
+EXPORT_SYMBOL(pci_disable_link_state_locked);
+
+/**
+ * pci_disable_link_state - Disable device's link state, so the link will
+ * never enter specific states. Note that if the BIOS didn't grant ASPM
+ * control to the OS, this does nothing because we can't touch the LNKCTL
+ * register.
+ *
+ * @pdev: PCI device
+ * @state: ASPM link state to disable
+ */
+void pci_disable_link_state(struct pci_dev *pdev, int state)
+{
+ __pci_disable_link_state(pdev, state, true, false);
}
EXPORT_SYMBOL(pci_disable_link_state);
+void pcie_clear_aspm(struct pci_bus *bus)
+{
+ struct pci_dev *child;
+
+ if (aspm_force)
+ return;
+
+ /*
+ * Clear any ASPM setup that the firmware has carried out on this bus
+ */
+ list_for_each_entry(child, &bus->devices, bus_list) {
+ __pci_disable_link_state(child, PCIE_LINK_STATE_L0S |
+ PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM,
+ false, true);
+ }
+}
+
static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
{
int i;
struct pcie_link_state *link;
+ if (aspm_disabled)
+ return -EPERM;
for (i = 0; i < ARRAY_SIZE(policy_str); i++)
if (!strncmp(val, policy_str[i], strlen(policy_str[i])))
break;
@@ -794,6 +861,8 @@ static ssize_t link_state_store(struct device *dev,
struct pcie_link_state *link, *root = pdev->link_state->root;
u32 val = buf[0] - '0', state = 0;
+ if (aspm_disabled)
+ return -EPERM;
if (n < 1 || val > 3)
return -EINVAL;
@@ -855,8 +924,8 @@ void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
struct pcie_link_state *link_state = pdev->link_state;
if (!pci_is_pcie(pdev) ||
- (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
+ (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
+ pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
return;
if (link_state->aspm_support)
@@ -872,8 +941,8 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
struct pcie_link_state *link_state = pdev->link_state;
if (!pci_is_pcie(pdev) ||
- (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
+ (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
+ pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
return;
if (link_state->aspm_support)
@@ -888,11 +957,13 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
static int __init pcie_aspm_disable(char *str)
{
if (!strcmp(str, "off")) {
+ aspm_policy = POLICY_DEFAULT;
aspm_disabled = 1;
+ aspm_support_enabled = false;
printk(KERN_INFO "PCIe ASPM is disabled\n");
} else if (!strcmp(str, "force")) {
aspm_force = 1;
- printk(KERN_INFO "PCIe ASPM is forcedly enabled\n");
+ printk(KERN_INFO "PCIe ASPM is forcibly enabled\n");
}
return 1;
}
@@ -901,19 +972,20 @@ __setup("pcie_aspm=", pcie_aspm_disable);
void pcie_no_aspm(void)
{
- if (!aspm_force)
+ /*
+ * Disabling ASPM is intended to prevent the kernel from modifying
+ * existing hardware state, not to clear existing state. To that end:
+ * (a) set policy to POLICY_DEFAULT in order to avoid changing state
+ * (b) prevent userspace from changing policy
+ */
+ if (!aspm_force) {
+ aspm_policy = POLICY_DEFAULT;
aspm_disabled = 1;
+ }
}
-/**
- * pcie_aspm_enabled - is PCIe ASPM enabled?
- *
- * Returns true if ASPM has not been disabled by the command-line option
- * pcie_aspm=off.
- **/
-int pcie_aspm_enabled(void)
+bool pcie_aspm_support_enabled(void)
{
- return !aspm_disabled;
+ return aspm_support_enabled;
}
-EXPORT_SYMBOL(pcie_aspm_enabled);
-
+EXPORT_SYMBOL(pcie_aspm_support_enabled);
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 2f3c9040722..82e06a86cd7 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -19,16 +19,11 @@
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/pcieport_if.h>
-#include <linux/acpi.h>
-#include <linux/pci-acpi.h>
#include <linux/pm_runtime.h>
#include "../pci.h"
#include "portdrv.h"
-#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */
-#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */
-
/*
* If this switch is set, MSI will not be used for PCIe PME signaling. This
* causes the PCIe port driver to use INTx interrupts only, but it turns out
@@ -60,33 +55,12 @@ struct pcie_pme_service_data {
*/
void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
{
- int rtctl_pos;
- u16 rtctl;
-
- rtctl_pos = pci_pcie_cap(dev) + PCI_EXP_RTCTL;
-
- pci_read_config_word(dev, rtctl_pos, &rtctl);
if (enable)
- rtctl |= PCI_EXP_RTCTL_PMEIE;
+ pcie_capability_set_word(dev, PCI_EXP_RTCTL,
+ PCI_EXP_RTCTL_PMEIE);
else
- rtctl &= ~PCI_EXP_RTCTL_PMEIE;
- pci_write_config_word(dev, rtctl_pos, rtctl);
-}
-
-/**
- * pcie_pme_clear_status - Clear root port PME interrupt status.
- * @dev: PCIe root port or event collector.
- */
-static void pcie_pme_clear_status(struct pci_dev *dev)
-{
- int rtsta_pos;
- u32 rtsta;
-
- rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA;
-
- pci_read_config_dword(dev, rtsta_pos, &rtsta);
- rtsta |= PCI_EXP_RTSTA_PME;
- pci_write_config_dword(dev, rtsta_pos, rtsta);
+ pcie_capability_clear_word(dev, PCI_EXP_RTCTL,
+ PCI_EXP_RTCTL_PMEIE);
}
/**
@@ -103,8 +77,11 @@ static bool pcie_pme_walk_bus(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
/* Skip PCIe devices in case we started from a root port. */
if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) {
- pm_request_resume(&dev->dev);
+ if (dev->pme_poll)
+ dev->pme_poll = false;
+
pci_wakeup_event(dev);
+ pm_request_resume(&dev->dev);
ret = true;
}
@@ -136,7 +113,7 @@ static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn)
if (!dev)
return false;
- if (pci_is_pcie(dev) && dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
+ if (pci_is_pcie(dev) && pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) {
down_read(&pci_bus_sem);
if (pcie_pme_walk_bus(bus))
found = true;
@@ -161,6 +138,9 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
/* First, check if the PME is from the root port itself. */
if (port->devfn == devfn && port->bus->number == busnr) {
+ if (port->pme_poll)
+ port->pme_poll = false;
+
if (pci_check_pme_status(port)) {
pm_request_resume(&port->dev);
found = true;
@@ -206,8 +186,11 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
/* The device is there, but we have to check its PME status. */
found = pci_check_pme_status(dev);
if (found) {
- pm_request_resume(&dev->dev);
+ if (dev->pme_poll)
+ dev->pme_poll = false;
+
pci_wakeup_event(dev);
+ pm_request_resume(&dev->dev);
}
pci_dev_put(dev);
} else if (devfn) {
@@ -216,8 +199,7 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
* assuming that the PME was reported by a PCIe-PCI bridge that
* used devfn different from zero.
*/
- dev_dbg(&port->dev, "PME interrupt generated for "
- "non-existent device %02x:%02x.%d\n",
+ dev_dbg(&port->dev, "PME interrupt generated for non-existent device %02x:%02x.%d\n",
busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
found = pcie_pme_from_pci_bridge(bus, 0);
}
@@ -236,24 +218,21 @@ static void pcie_pme_work_fn(struct work_struct *work)
struct pcie_pme_service_data *data =
container_of(work, struct pcie_pme_service_data, work);
struct pci_dev *port = data->srv->port;
- int rtsta_pos;
u32 rtsta;
- rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA;
-
spin_lock_irq(&data->lock);
for (;;) {
if (data->noirq)
break;
- pci_read_config_dword(port, rtsta_pos, &rtsta);
+ pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
if (rtsta & PCI_EXP_RTSTA_PME) {
/*
* Clear PME status of the port. If there are other
* pending PMEs, the status will be set again.
*/
- pcie_pme_clear_status(port);
+ pcie_clear_root_pme_status(port);
spin_unlock_irq(&data->lock);
pcie_pme_handle_request(port, rtsta & 0xffff);
@@ -286,17 +265,14 @@ static irqreturn_t pcie_pme_irq(int irq, void *context)
{
struct pci_dev *port;
struct pcie_pme_service_data *data;
- int rtsta_pos;
u32 rtsta;
unsigned long flags;
port = ((struct pcie_device *)context)->port;
data = get_service_data((struct pcie_device *)context);
- rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA;
-
spin_lock_irqsave(&data->lock, flags);
- pci_read_config_dword(port, rtsta_pos, &rtsta);
+ pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
if (!(rtsta & PCI_EXP_RTSTA_PME)) {
spin_unlock_irqrestore(&data->lock, flags);
@@ -345,13 +321,13 @@ static void pcie_pme_mark_devices(struct pci_dev *port)
struct pci_dev *dev;
/* Check if this is a root port event collector. */
- if (port->pcie_type != PCI_EXP_TYPE_RC_EC || !bus)
+ if (pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC || !bus)
return;
down_read(&pci_bus_sem);
list_for_each_entry(dev, &bus->devices, bus_list)
if (pci_is_pcie(dev)
- && dev->pcie_type == PCI_EXP_TYPE_RC_END)
+ && pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
pcie_pme_set_native(dev, NULL);
up_read(&pci_bus_sem);
}
@@ -378,7 +354,7 @@ static int pcie_pme_probe(struct pcie_device *srv)
port = srv->port;
pcie_pme_interrupt_enable(port, false);
- pcie_pme_clear_status(port);
+ pcie_clear_root_pme_status(port);
ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
if (ret) {
@@ -402,7 +378,7 @@ static int pcie_pme_suspend(struct pcie_device *srv)
spin_lock_irq(&data->lock);
pcie_pme_interrupt_enable(port, false);
- pcie_pme_clear_status(port);
+ pcie_clear_root_pme_status(port);
data->noirq = true;
spin_unlock_irq(&data->lock);
@@ -422,7 +398,7 @@ static int pcie_pme_resume(struct pcie_device *srv)
spin_lock_irq(&data->lock);
data->noirq = false;
- pcie_pme_clear_status(port);
+ pcie_clear_root_pme_status(port);
pcie_pme_interrupt_enable(port, true);
spin_unlock_irq(&data->lock);
@@ -431,7 +407,7 @@ static int pcie_pme_resume(struct pcie_device *srv)
/**
* pcie_pme_remove - Prepare PCIe PME service device for removal.
- * @srv - PCIe service device to resume.
+ * @srv - PCIe service device to remove.
*/
static void pcie_pme_remove(struct pcie_device *srv)
{
@@ -442,8 +418,8 @@ static void pcie_pme_remove(struct pcie_device *srv)
static struct pcie_port_service_driver pcie_pme_driver = {
.name = "pcie_pme",
- .port_type = PCI_EXP_TYPE_ROOT_PORT,
- .service = PCIE_PORT_SERVICE_PME,
+ .port_type = PCI_EXP_TYPE_ROOT_PORT,
+ .service = PCIE_PORT_SERVICE_PME,
.probe = pcie_pme_probe,
.suspend = pcie_pme_suspend,
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 7b5aba0a329..d525548404d 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -14,27 +14,38 @@
#define PCIE_PORT_DEVICE_MAXSERVICES 4
/*
* According to the PCI Express Base Specification 2.0, the indices of
- * the MSI-X table entires used by port services must not exceed 31
+ * the MSI-X table entries used by port services must not exceed 31
*/
#define PCIE_PORT_MAX_MSIX_ENTRIES 32
#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
-extern bool pcie_ports_disabled;
-extern bool pcie_ports_auto;
-
extern struct bus_type pcie_port_bus_type;
-extern int pcie_port_device_register(struct pci_dev *dev);
+int pcie_port_device_register(struct pci_dev *dev);
#ifdef CONFIG_PM
-extern int pcie_port_device_suspend(struct device *dev);
-extern int pcie_port_device_resume(struct device *dev);
+int pcie_port_device_suspend(struct device *dev);
+int pcie_port_device_resume(struct device *dev);
#endif
-extern void pcie_port_device_remove(struct pci_dev *dev);
-extern int __must_check pcie_port_bus_register(void);
-extern void pcie_port_bus_unregister(void);
+void pcie_port_device_remove(struct pci_dev *dev);
+int __must_check pcie_port_bus_register(void);
+void pcie_port_bus_unregister(void);
struct pci_dev;
+void pcie_clear_root_pme_status(struct pci_dev *dev);
+
+#ifdef CONFIG_HOTPLUG_PCI_PCIE
+extern bool pciehp_msi_disabled;
+
+static inline bool pciehp_no_msi(void)
+{
+ return pciehp_msi_disabled;
+}
+
+#else /* !CONFIG_HOTPLUG_PCI_PCIE */
+static inline bool pciehp_no_msi(void) { return false; }
+#endif /* !CONFIG_HOTPLUG_PCI_PCIE */
+
#ifdef CONFIG_PCIE_PME
extern bool pcie_pme_msi_disabled;
@@ -48,7 +59,7 @@ static inline bool pcie_pme_no_msi(void)
return pcie_pme_msi_disabled;
}
-extern void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable);
+void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable);
#else /* !CONFIG_PCIE_PME */
static inline void pcie_pme_disable_msi(void) {}
static inline bool pcie_pme_no_msi(void) { return false; }
@@ -56,7 +67,7 @@ static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
#endif /* !CONFIG_PCIE_PME */
#ifdef CONFIG_ACPI
-extern int pcie_port_acpi_setup(struct pci_dev *port, int *mask);
+int pcie_port_acpi_setup(struct pci_dev *port, int *mask);
static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
{
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
index 5982b6a63b8..b4d2894ee3f 100644
--- a/drivers/pci/pcie/portdrv_acpi.c
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -17,6 +17,7 @@
#include "aer/aerdrv.h"
#include "../pci.h"
+#include "portdrv.h"
/**
* pcie_port_acpi_setup - Request the BIOS to release control of PCIe services.
@@ -33,7 +34,7 @@
*/
int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
{
- acpi_status status;
+ struct acpi_pci_root *root;
acpi_handle handle;
u32 flags;
@@ -44,26 +45,11 @@ int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
if (!handle)
return -EINVAL;
- flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
- | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
- | OSC_PCI_EXPRESS_PME_CONTROL;
-
- if (pci_aer_available()) {
- if (aer_acpi_firmware_first())
- dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n");
- else
- flags |= OSC_PCI_EXPRESS_AER_CONTROL;
- }
-
- status = acpi_pci_osc_control_set(handle, &flags,
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
- if (ACPI_FAILURE(status)) {
- dev_dbg(&port->dev, "ACPI _OSC request failed (code %d)\n",
- status);
+ root = acpi_pci_find_root(handle);
+ if (!root)
return -ENODEV;
- }
- dev_info(&port->dev, "ACPI _OSC control granted for 0x%02x\n", flags);
+ flags = root->osc_control_set;
*srv_mask = PCIE_PORT_SERVICE_VC;
if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c
index 18bf90f748f..87e79a6ffb5 100644
--- a/drivers/pci/pcie/portdrv_bus.c
+++ b/drivers/pci/pcie/portdrv_bus.c
@@ -18,8 +18,8 @@
static int pcie_port_bus_match(struct device *dev, struct device_driver *drv);
struct bus_type pcie_port_bus_type = {
- .name = "pci_express",
- .match = pcie_port_bus_match,
+ .name = "pci_express",
+ .match = pcie_port_bus_match,
};
EXPORT_SYMBOL_GPL(pcie_port_bus_type);
@@ -38,7 +38,7 @@ static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
return 0;
if ((driver->port_type != PCIE_ANY_PORT) &&
- (driver->port_type != pciedev->port->pcie_type))
+ (driver->port_type != pci_pcie_type(pciedev->port)))
return 0;
return 1;
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index a9c222d79eb..2f0ce668a77 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -15,11 +15,21 @@
#include <linux/slab.h>
#include <linux/pcieport_if.h>
#include <linux/aer.h>
-#include <linux/pci-aspm.h>
#include "../pci.h"
#include "portdrv.h"
+bool pciehp_msi_disabled;
+
+static int __init pciehp_setup(char *str)
+{
+ if (!strncmp(str, "nomsi", 5))
+ pciehp_msi_disabled = true;
+
+ return 1;
+}
+__setup("pcie_hp=", pciehp_setup);
+
/**
* release_pcie_device - free PCI Express port service device structure
* @dev: Port service device to release
@@ -36,7 +46,7 @@ static void release_pcie_device(struct device *dev)
* pcie_port_msix_add_entry - add entry to given array of MSI-X entries
* @entries: Array of MSI-X entries
* @new_entry: Index of the entry to add to the array
- * @nr_entries: Number of entries aleady in the array
+ * @nr_entries: Number of entries already in the array
*
* Return value: Position of the added entry in the array
*/
@@ -69,9 +79,10 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
u16 reg16;
u32 reg32;
- nr_entries = pci_msix_table_size(dev);
- if (!nr_entries)
- return -EINVAL;
+ nr_entries = pci_msix_vec_count(dev);
+ if (nr_entries < 0)
+ return nr_entries;
+ BUG_ON(!nr_entries);
if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES)
nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES;
@@ -88,7 +99,7 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
for (i = 0; i < nr_entries; i++)
msix_entries[i].entry = i;
- status = pci_enable_msix(dev, msix_entries, nr_entries);
+ status = pci_enable_msix_exact(dev, msix_entries, nr_entries);
if (status)
goto Exit;
@@ -110,8 +121,7 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
* the value in this field indicates which MSI-X Table entry is
* used to generate the interrupt message."
*/
- pos = pci_pcie_cap(dev);
- pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
+ pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
if (entry >= nr_entries)
goto Error;
@@ -161,7 +171,7 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
pci_disable_msix(dev);
/* Now allocate the MSI-X vectors for real */
- status = pci_enable_msix(dev, msix_entries, nvec);
+ status = pci_enable_msix_exact(dev, msix_entries, nvec);
if (status)
goto Exit;
}
@@ -190,9 +200,13 @@ static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
{
int i, irq = -1;
- /* We have to use INTx if MSI cannot be used for PCIe PME. */
- if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) {
- if (dev->pin)
+ /*
+ * If MSI cannot be used for PCIe PME or hotplug, we have to use
+ * INTx or other interrupts, e.g. system shared interrupt.
+ */
+ if (((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) ||
+ ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())) {
+ if (dev->irq)
irq = dev->irq;
goto no_msi;
}
@@ -201,8 +215,12 @@ static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
if (!pcie_port_enable_msix(dev, irqs, mask))
return 0;
- /* We're not going to use MSI-X, so try MSI and fall back to INTx */
- if (!pci_enable_msi(dev) || dev->pin)
+ /*
+ * We're not going to use MSI-X, so try MSI and fall back to INTx.
+ * If neither MSI/MSI-X nor INTx available, try other interrupt. On
+ * some platforms, root port doesn't support MSI/MSI-X/INTx in RC mode.
+ */
+ if (!pci_enable_msi(dev) || dev->irq)
irq = dev->irq;
no_msi:
@@ -235,30 +253,29 @@ static void cleanup_service_irqs(struct pci_dev *dev)
*/
static int get_port_device_capability(struct pci_dev *dev)
{
- int services = 0, pos;
- u16 reg16;
+ int services = 0;
u32 reg32;
- int cap_mask;
+ int cap_mask = 0;
int err;
- err = pcie_port_platform_notify(dev, &cap_mask);
+ if (pcie_ports_disabled)
+ return 0;
+
+ cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
+ | PCIE_PORT_SERVICE_VC;
+ if (pci_aer_available())
+ cap_mask |= PCIE_PORT_SERVICE_AER;
+
if (pcie_ports_auto) {
- if (err) {
- pcie_no_aspm();
+ err = pcie_port_platform_notify(dev, &cap_mask);
+ if (err)
return 0;
- }
- } else {
- cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
- | PCIE_PORT_SERVICE_VC;
- if (pci_aer_available())
- cap_mask |= PCIE_PORT_SERVICE_AER;
}
- pos = pci_pcie_cap(dev);
- pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
/* Hot-Plug Capable */
- if ((cap_mask & PCIE_PORT_SERVICE_HP) && (reg16 & PCI_EXP_FLAGS_SLOT)) {
- pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, &reg32);
+ if ((cap_mask & PCIE_PORT_SERVICE_HP) &&
+ pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT) {
+ pcie_capability_read_dword(dev, PCI_EXP_SLTCAP, &reg32);
if (reg32 & PCI_EXP_SLTCAP_HPC) {
services |= PCIE_PORT_SERVICE_HP;
/*
@@ -266,10 +283,8 @@ static int get_port_device_capability(struct pci_dev *dev)
* enabled by the BIOS and the hot-plug service driver
* is not loaded.
*/
- pos += PCI_EXP_SLTCTL;
- pci_read_config_word(dev, pos, &reg16);
- reg16 &= ~(PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
- pci_write_config_word(dev, pos, reg16);
+ pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
}
}
/* AER capable */
@@ -287,7 +302,7 @@ static int get_port_device_capability(struct pci_dev *dev)
services |= PCIE_PORT_SERVICE_VC;
/* Root ports are capable of generating PME too */
if ((cap_mask & PCIE_PORT_SERVICE_PME)
- && dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
+ && pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
services |= PCIE_PORT_SERVICE_PME;
/*
* Disable PME interrupt on this port in case it's been enabled
@@ -325,16 +340,17 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
device->release = release_pcie_device; /* callback to free pcie dev */
dev_set_name(device, "%s:pcie%02x",
pci_name(pdev),
- get_descriptor_id(pdev->pcie_type, service));
+ get_descriptor_id(pci_pcie_type(pdev), service));
device->parent = &pdev->dev;
device_enable_async_suspend(device);
retval = device_register(device);
- if (retval)
- kfree(pcie);
- else
- get_device(device);
- return retval;
+ if (retval) {
+ put_device(device);
+ return retval;
+ }
+
+ return 0;
}
/**
@@ -349,23 +365,27 @@ int pcie_port_device_register(struct pci_dev *dev)
int status, capabilities, i, nr_service;
int irqs[PCIE_PORT_DEVICE_MAXSERVICES];
- /* Get and check PCI Express port services */
- capabilities = get_port_device_capability(dev);
- if (!capabilities)
- return -ENODEV;
-
/* Enable PCI Express port device */
status = pci_enable_device(dev);
if (status)
return status;
+
+ /* Get and check PCI Express port services */
+ capabilities = get_port_device_capability(dev);
+ if (!capabilities)
+ return 0;
+
pci_set_master(dev);
/*
* Initialize service irqs. Don't use service devices that
* require interrupts if there is no way to generate them.
+ * However, some drivers may have a polling mode (e.g. pciehp_poll_mode)
+ * that can be used in the absence of irqs. Allow them to determine
+ * if that is to be used.
*/
status = init_service_irqs(dev, irqs, capabilities);
if (status) {
- capabilities &= PCIE_PORT_SERVICE_VC;
+ capabilities &= PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_HP;
if (!capabilities)
goto error_disable;
}
@@ -439,10 +459,8 @@ int pcie_port_device_resume(struct device *dev)
static int remove_iter(struct device *dev, void *data)
{
- if (dev->bus == &pcie_port_bus_type) {
- put_device(dev);
+ if (dev->bus == &pcie_port_bus_type)
device_unregister(dev);
- }
return 0;
}
@@ -483,12 +501,12 @@ static int pcie_port_probe_service(struct device *dev)
pciedev = to_pcie_device(dev);
status = driver->probe(pciedev);
- if (!status) {
- dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n",
- driver->name);
- get_device(dev);
- }
- return status;
+ if (status)
+ return status;
+
+ dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", driver->name);
+ get_device(dev);
+ return 0;
}
/**
@@ -539,7 +557,7 @@ int pcie_port_service_register(struct pcie_port_service_driver *new)
if (pcie_ports_disabled)
return -ENODEV;
- new->driver.name = (char *)new->name;
+ new->driver.name = new->name;
new->driver.bus = &pcie_port_bus_type;
new->driver.probe = pcie_port_probe_service;
new->driver.remove = pcie_port_remove_service;
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index f9033e190fb..80887eaa066 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/init.h>
#include <linux/pcieport_if.h>
#include <linux/aer.h>
@@ -57,6 +58,15 @@ __setup("pcie_ports=", pcie_port_setup);
/* global data */
+/**
+ * pcie_clear_root_pme_status - Clear root port PME interrupt status.
+ * @dev: PCIe root port or event collector.
+ */
+void pcie_clear_root_pme_status(struct pci_dev *dev)
+{
+ pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
+}
+
static int pcie_portdrv_restore_config(struct pci_dev *dev)
{
int retval;
@@ -69,6 +79,91 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev)
}
#ifdef CONFIG_PM
+static int pcie_port_resume_noirq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ /*
+ * Some BIOSes forget to clear Root PME Status bits after system wakeup
+ * which breaks ACPI-based runtime wakeup on PCI Express, so clear those
+ * bits now just in case (shouldn't hurt).
+ */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
+ pcie_clear_root_pme_status(pdev);
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+struct d3cold_info {
+ bool no_d3cold;
+ unsigned int d3cold_delay;
+};
+
+static int pci_dev_d3cold_info(struct pci_dev *pdev, void *data)
+{
+ struct d3cold_info *info = data;
+
+ info->d3cold_delay = max_t(unsigned int, pdev->d3cold_delay,
+ info->d3cold_delay);
+ if (pdev->no_d3cold)
+ info->no_d3cold = true;
+ return 0;
+}
+
+static int pcie_port_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct d3cold_info d3cold_info = {
+ .no_d3cold = false,
+ .d3cold_delay = PCI_PM_D3_WAIT,
+ };
+
+ /*
+ * If any subordinate device disable D3cold, we should not put
+ * the port into D3cold. The D3cold delay of port should be
+ * the max of that of all subordinate devices.
+ */
+ pci_walk_bus(pdev->subordinate, pci_dev_d3cold_info, &d3cold_info);
+ pdev->no_d3cold = d3cold_info.no_d3cold;
+ pdev->d3cold_delay = d3cold_info.d3cold_delay;
+ return 0;
+}
+
+static int pcie_port_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static int pci_dev_pme_poll(struct pci_dev *pdev, void *data)
+{
+ bool *pme_poll = data;
+
+ if (pdev->pme_poll)
+ *pme_poll = true;
+ return 0;
+}
+
+static int pcie_port_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ bool pme_poll = false;
+
+ /*
+ * If any subordinate device needs pme poll, we should keep
+ * the port in D0, because we need port in D0 to poll it.
+ */
+ pci_walk_bus(pdev->subordinate, pci_dev_pme_poll, &pme_poll);
+ /* Delay for a short while to prevent too frequent suspend/resume */
+ if (!pme_poll)
+ pm_schedule_suspend(dev, 10);
+ return -EBUSY;
+}
+#else
+#define pcie_port_runtime_suspend NULL
+#define pcie_port_runtime_resume NULL
+#define pcie_port_runtime_idle NULL
+#endif
+
static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.suspend = pcie_port_device_suspend,
.resume = pcie_port_device_resume,
@@ -76,6 +171,10 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.thaw = pcie_port_device_resume,
.poweroff = pcie_port_device_suspend,
.restore = pcie_port_device_resume,
+ .resume_noirq = pcie_port_resume_noirq,
+ .runtime_suspend = pcie_port_runtime_suspend,
+ .runtime_resume = pcie_port_runtime_resume,
+ .runtime_idle = pcie_port_runtime_idle,
};
#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
@@ -93,34 +192,37 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
* this port device.
*
*/
-static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
+static int pcie_portdrv_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
int status;
if (!pci_is_pcie(dev) ||
- ((dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
- (dev->pcie_type != PCI_EXP_TYPE_UPSTREAM) &&
- (dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)))
+ ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
+ (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM) &&
+ (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
return -ENODEV;
if (!dev->irq && dev->pin) {
- dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
- "check vendor BIOS\n", dev->vendor, dev->device);
+ dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; check vendor BIOS\n",
+ dev->vendor, dev->device);
}
status = pcie_port_device_register(dev);
if (status)
return status;
pci_save_state(dev);
-
+ /*
+ * D3cold may not work properly on some PCIe port, so disable
+ * it by default.
+ */
+ dev->d3cold_allowed = false;
return 0;
}
static void pcie_portdrv_remove(struct pci_dev *dev)
{
pcie_port_device_remove(dev);
- pci_disable_device(dev);
}
static int error_detected_iter(struct device *device, void *data)
@@ -156,11 +258,9 @@ static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
enum pci_channel_state error)
{
struct aer_broadcast_data data = {error, PCI_ERS_RESULT_CAN_RECOVER};
- int ret;
-
- /* can not fail */
- ret = device_for_each_child(&dev->dev, &data, error_detected_iter);
+ /* get true return value from &data */
+ device_for_each_child(&dev->dev, &data, error_detected_iter);
return data.result;
}
@@ -192,10 +292,9 @@ static int mmio_enabled_iter(struct device *device, void *data)
static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev)
{
pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
- int retval;
/* get true return value from &status */
- retval = device_for_each_child(&dev->dev, &status, mmio_enabled_iter);
+ device_for_each_child(&dev->dev, &status, mmio_enabled_iter);
return status;
}
@@ -227,7 +326,6 @@ static int slot_reset_iter(struct device *device, void *data)
static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
{
pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
- int retval;
/* If fatal, restore cfg space for possible link reset at upstream */
if (dev->error_state == pci_channel_io_frozen) {
@@ -238,8 +336,7 @@ static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
}
/* get true return value from &status */
- retval = device_for_each_child(&dev->dev, &status, slot_reset_iter);
-
+ device_for_each_child(&dev->dev, &status, slot_reset_iter);
return status;
}
@@ -265,9 +362,7 @@ static int resume_iter(struct device *device, void *data)
static void pcie_portdrv_err_resume(struct pci_dev *dev)
{
- int retval;
- /* nothing to do with error value, if it ever happens */
- retval = device_for_each_child(&dev->dev, NULL, resume_iter);
+ device_for_each_child(&dev->dev, NULL, resume_iter);
}
/*
@@ -280,11 +375,11 @@ static const struct pci_device_id port_pci_ids[] = { {
};
MODULE_DEVICE_TABLE(pci, port_pci_ids);
-static struct pci_error_handlers pcie_portdrv_err_handler = {
- .error_detected = pcie_portdrv_error_detected,
- .mmio_enabled = pcie_portdrv_mmio_enabled,
- .slot_reset = pcie_portdrv_slot_reset,
- .resume = pcie_portdrv_err_resume,
+static const struct pci_error_handlers pcie_portdrv_err_handler = {
+ .error_detected = pcie_portdrv_error_detected,
+ .mmio_enabled = pcie_portdrv_mmio_enabled,
+ .slot_reset = pcie_portdrv_slot_reset,
+ .resume = pcie_portdrv_err_resume,
};
static struct pci_driver pcie_portdriver = {
@@ -294,15 +389,15 @@ static struct pci_driver pcie_portdriver = {
.probe = pcie_portdrv_probe,
.remove = pcie_portdrv_remove,
- .err_handler = &pcie_portdrv_err_handler,
+ .err_handler = &pcie_portdrv_err_handler,
- .driver.pm = PCIE_PORTDRV_PM_OPS,
+ .driver.pm = PCIE_PORTDRV_PM_OPS,
};
static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
{
pr_notice("%s detected: will not use MSI for PCIe PME signaling\n",
- d->ident);
+ d->ident);
pcie_pme_disable_msi();
return 0;
}
@@ -316,7 +411,7 @@ static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = {
.ident = "MSI Wind U-100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
- "MICRO-STAR INTERNATIONAL CO., LTD"),
+ "MICRO-STAR INTERNATIONAL CO., LTD"),
DMI_MATCH(DMI_PRODUCT_NAME, "U-100"),
},
},
@@ -327,10 +422,8 @@ static int __init pcie_portdrv_init(void)
{
int retval;
- if (pcie_ports_disabled) {
- pcie_no_aspm();
- return -EACCES;
- }
+ if (pcie_ports_disabled)
+ return pci_register_driver(&pcie_portdriver);
dmi_check_system(pcie_portdrv_dmi_table);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index c84900da3c5..e3cf8a2e629 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -10,15 +10,52 @@
#include <linux/module.h>
#include <linux/cpumask.h>
#include <linux/pci-aspm.h>
+#include <asm-generic/pci-bridge.h>
#include "pci.h"
#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
#define CARDBUS_RESERVE_BUSNR 3
+static struct resource busn_resource = {
+ .name = "PCI busn",
+ .start = 0,
+ .end = 255,
+ .flags = IORESOURCE_BUS,
+};
+
/* Ugh. Need to stop exporting this to modules. */
LIST_HEAD(pci_root_buses);
EXPORT_SYMBOL(pci_root_buses);
+static LIST_HEAD(pci_domain_busn_res_list);
+
+struct pci_domain_busn_res {
+ struct list_head list;
+ struct resource res;
+ int domain_nr;
+};
+
+static struct resource *get_pci_domain_busn_res(int domain_nr)
+{
+ struct pci_domain_busn_res *r;
+
+ list_for_each_entry(r, &pci_domain_busn_res_list, list)
+ if (r->domain_nr == domain_nr)
+ return &r->res;
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return NULL;
+
+ r->domain_nr = domain_nr;
+ r->res.start = 0;
+ r->res.end = 0xff;
+ r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
+
+ list_add_tail(&r->list, &pci_domain_busn_res_list);
+
+ return &r->res;
+}
static int find_anything(struct device *dev, void *data)
{
@@ -43,43 +80,6 @@ int no_pci_devices(void)
EXPORT_SYMBOL(no_pci_devices);
/*
- * PCI Bus Class Devices
- */
-static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
- int type,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- const struct cpumask *cpumask;
-
- cpumask = cpumask_of_pcibus(to_pci_bus(dev));
- ret = type?
- cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask) :
- cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask);
- buf[ret++] = '\n';
- buf[ret] = '\0';
- return ret;
-}
-
-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
-}
-
-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return pci_bus_show_cpuaffinity(dev, 1, attr, buf);
-}
-
-DEVICE_ATTR(cpuaffinity, S_IRUGO, pci_bus_show_cpumaskaffinity, NULL);
-DEVICE_ATTR(cpulistaffinity, S_IRUGO, pci_bus_show_cpulistaffinity, NULL);
-
-/*
* PCI Bus Class
*/
static void release_pcibus_dev(struct device *dev)
@@ -89,12 +89,14 @@ static void release_pcibus_dev(struct device *dev)
if (pci_bus->bridge)
put_device(pci_bus->bridge);
pci_bus_remove_resources(pci_bus);
+ pci_release_bus_of_node(pci_bus);
kfree(pci_bus);
}
static struct class pcibus_class = {
.name = "pci_bus",
.dev_release = &release_pcibus_dev,
+ .dev_groups = pcibus_groups,
};
static int __init pcibus_class_init(void)
@@ -103,21 +105,6 @@ static int __init pcibus_class_init(void)
}
postcore_initcall(pcibus_class_init);
-/*
- * Translate the low bits of the PCI base
- * to the resource type
- */
-static inline unsigned int pci_calc_resource_flags(unsigned int flags)
-{
- if (flags & PCI_BASE_ADDRESS_SPACE_IO)
- return IORESOURCE_IO;
-
- if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
- return IORESOURCE_MEM | IORESOURCE_PREFETCH;
-
- return IORESOURCE_MEM;
-}
-
static u64 pci_size(u64 base, u64 maxbase, u64 mask)
{
u64 size = mask & maxbase; /* Find the significant bits */
@@ -136,20 +123,41 @@ static u64 pci_size(u64 base, u64 maxbase, u64 mask)
return size;
}
-static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar)
+static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
{
+ u32 mem_type;
+ unsigned long flags;
+
if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
- res->flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
- return pci_bar_io;
+ flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
+ flags |= IORESOURCE_IO;
+ return flags;
}
- res->flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
+ flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
+ flags |= IORESOURCE_MEM;
+ if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
+ flags |= IORESOURCE_PREFETCH;
- if (res->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
- return pci_bar_mem64;
- return pci_bar_mem32;
+ mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
+ switch (mem_type) {
+ case PCI_BASE_ADDRESS_MEM_TYPE_32:
+ break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+ /* 1M mem BAR treated as 32-bit BAR */
+ break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_64:
+ flags |= IORESOURCE_MEM_64;
+ break;
+ default:
+ /* mem unknown type treated as 32-bit BAR */
+ break;
+ }
+ return flags;
}
+#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
+
/**
* pci_read_base - read a PCI BAR
* @dev: the PCI device
@@ -160,17 +168,23 @@ static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar)
* Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
*/
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
- struct resource *res, unsigned int pos)
+ struct resource *res, unsigned int pos)
{
u32 l, sz, mask;
+ u64 l64, sz64, mask64;
u16 orig_cmd;
+ struct pci_bus_region region, inverted_region;
+ bool bar_too_big = false, bar_too_high = false, bar_invalid = false;
mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
+ /* No printks while decoding is disabled! */
if (!dev->mmio_always_on) {
pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
- pci_write_config_word(dev, PCI_COMMAND,
- orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO));
+ if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
+ pci_write_config_word(dev, PCI_COMMAND,
+ orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
+ }
}
res->name = pci_name(dev);
@@ -180,9 +194,6 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
pci_read_config_dword(dev, pos, &sz);
pci_write_config_dword(dev, pos, l);
- if (!dev->mmio_always_on)
- pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
-
/*
* All bits set in sz means the device isn't working properly.
* If the BAR isn't implemented, all bits must be 0. If it's a
@@ -200,11 +211,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
l = 0;
if (type == pci_bar_unknown) {
- type = decode_bar(res, l);
- res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
- if (type == pci_bar_io) {
+ res->flags = decode_bar(dev, l);
+ res->flags |= IORESOURCE_SIZEALIGN;
+ if (res->flags & IORESOURCE_IO) {
l &= PCI_BASE_ADDRESS_IO_MASK;
- mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT;
+ mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
} else {
l &= PCI_BASE_ADDRESS_MEM_MASK;
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
@@ -215,10 +226,10 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
mask = (u32)PCI_ROM_ADDRESS_MASK;
}
- if (type == pci_bar_mem64) {
- u64 l64 = l;
- u64 sz64 = sz;
- u64 mask64 = mask | (u64)~0 << 32;
+ if (res->flags & IORESOURCE_MEM_64) {
+ l64 = l;
+ sz64 = sz;
+ mask64 = mask | (u64)~0 << 32;
pci_read_config_dword(dev, pos + 4, &l);
pci_write_config_dword(dev, pos + 4, ~0);
@@ -233,24 +244,25 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
if (!sz64)
goto fail;
- if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
- dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n",
- pos);
- goto fail;
+ if ((sizeof(dma_addr_t) < 8 || sizeof(resource_size_t) < 8) &&
+ sz64 > 0x100000000ULL) {
+ res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
+ res->start = 0;
+ res->end = 0;
+ bar_too_big = true;
+ goto out;
}
- res->flags |= IORESOURCE_MEM_64;
- if ((sizeof(resource_size_t) < 8) && l) {
- /* Address above 32-bit boundary; disable the BAR */
- pci_write_config_dword(dev, pos, 0);
- pci_write_config_dword(dev, pos + 4, 0);
+ if ((sizeof(dma_addr_t) < 8) && l) {
+ /* Above 32-bit boundary; try to reallocate */
+ res->flags |= IORESOURCE_UNSET;
res->start = 0;
res->end = sz64;
+ bar_too_high = true;
+ goto out;
} else {
- res->start = l64;
- res->end = l64 + sz64;
- dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n",
- pos, res);
+ region.start = l64;
+ region.end = l64 + sz64;
}
} else {
sz = pci_size(l, sz, mask);
@@ -258,17 +270,54 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
if (!sz)
goto fail;
- res->start = l;
- res->end = l + sz;
+ region.start = l;
+ region.end = l + sz;
+ }
+
+ pcibios_bus_to_resource(dev->bus, res, &region);
+ pcibios_resource_to_bus(dev->bus, &inverted_region, res);
- dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
+ /*
+ * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
+ * the corresponding resource address (the physical address used by
+ * the CPU. Converting that resource address back to a bus address
+ * should yield the original BAR value:
+ *
+ * resource_to_bus(bus_to_resource(A)) == A
+ *
+ * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
+ * be claimed by the device.
+ */
+ if (inverted_region.start != region.start) {
+ res->flags |= IORESOURCE_UNSET;
+ res->start = 0;
+ res->end = region.end - region.start;
+ bar_invalid = true;
}
- out:
- return (type == pci_bar_mem64) ? 1 : 0;
- fail:
- res->flags = 0;
goto out;
+
+
+fail:
+ res->flags = 0;
+out:
+ if (!dev->mmio_always_on &&
+ (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
+ pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
+
+ if (bar_too_big)
+ dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
+ pos, (unsigned long long) sz64);
+ if (bar_too_high)
+ dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4G (bus address %#010llx)\n",
+ pos, (unsigned long long) l64);
+ if (bar_invalid)
+ dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
+ pos, (unsigned long long) region.start);
+ if (res->flags)
+ dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
+
+ return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
}
static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
@@ -291,80 +340,85 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
}
}
-static void __devinit pci_read_bridge_io(struct pci_bus *child)
+static void pci_read_bridge_io(struct pci_bus *child)
{
struct pci_dev *dev = child->self;
u8 io_base_lo, io_limit_lo;
- unsigned long base, limit;
+ unsigned long io_mask, io_granularity, base, limit;
+ struct pci_bus_region region;
struct resource *res;
+ io_mask = PCI_IO_RANGE_MASK;
+ io_granularity = 0x1000;
+ if (dev->io_window_1k) {
+ /* Support 1K I/O space granularity */
+ io_mask = PCI_IO_1K_RANGE_MASK;
+ io_granularity = 0x400;
+ }
+
res = child->resource[0];
pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
- base = (io_base_lo & PCI_IO_RANGE_MASK) << 8;
- limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8;
+ base = (io_base_lo & io_mask) << 8;
+ limit = (io_limit_lo & io_mask) << 8;
if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
u16 io_base_hi, io_limit_hi;
+
pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
- base |= (io_base_hi << 16);
- limit |= (io_limit_hi << 16);
+ base |= ((unsigned long) io_base_hi << 16);
+ limit |= ((unsigned long) io_limit_hi << 16);
}
- if (base && base <= limit) {
+ if (base <= limit) {
res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
- if (!res->start)
- res->start = base;
- if (!res->end)
- res->end = limit + 0xfff;
+ region.start = base;
+ region.end = limit + io_granularity - 1;
+ pcibios_bus_to_resource(dev->bus, res, &region);
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
- } else {
- dev_printk(KERN_DEBUG, &dev->dev,
- " bridge window [io %#06lx-%#06lx] (disabled)\n",
- base, limit);
}
}
-static void __devinit pci_read_bridge_mmio(struct pci_bus *child)
+static void pci_read_bridge_mmio(struct pci_bus *child)
{
struct pci_dev *dev = child->self;
u16 mem_base_lo, mem_limit_lo;
unsigned long base, limit;
+ struct pci_bus_region region;
struct resource *res;
res = child->resource[1];
pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
- base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
- limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
- if (base && base <= limit) {
+ base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
+ limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
+ if (base <= limit) {
res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
- res->start = base;
- res->end = limit + 0xfffff;
+ region.start = base;
+ region.end = limit + 0xfffff;
+ pcibios_bus_to_resource(dev->bus, res, &region);
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
- } else {
- dev_printk(KERN_DEBUG, &dev->dev,
- " bridge window [mem %#010lx-%#010lx] (disabled)\n",
- base, limit + 0xfffff);
}
}
-static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child)
+static void pci_read_bridge_mmio_pref(struct pci_bus *child)
{
struct pci_dev *dev = child->self;
u16 mem_base_lo, mem_limit_lo;
unsigned long base, limit;
+ struct pci_bus_region region;
struct resource *res;
res = child->resource[2];
pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
- base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
- limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
+ base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
+ limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
u32 mem_base_hi, mem_limit_hi;
+
pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
@@ -375,33 +429,29 @@ static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child)
*/
if (mem_base_hi <= mem_limit_hi) {
#if BITS_PER_LONG == 64
- base |= ((long) mem_base_hi) << 32;
- limit |= ((long) mem_limit_hi) << 32;
+ base |= ((unsigned long) mem_base_hi) << 32;
+ limit |= ((unsigned long) mem_limit_hi) << 32;
#else
if (mem_base_hi || mem_limit_hi) {
- dev_err(&dev->dev, "can't handle 64-bit "
- "address space for bridge\n");
+ dev_err(&dev->dev, "can't handle 64-bit address space for bridge\n");
return;
}
#endif
}
}
- if (base && base <= limit) {
+ if (base <= limit) {
res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
IORESOURCE_MEM | IORESOURCE_PREFETCH;
if (res->flags & PCI_PREF_RANGE_TYPE_64)
res->flags |= IORESOURCE_MEM_64;
- res->start = base;
- res->end = limit + 0xfffff;
+ region.start = base;
+ region.end = limit + 0xfffff;
+ pcibios_bus_to_resource(dev->bus, res, &region);
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
- } else {
- dev_printk(KERN_DEBUG, &dev->dev,
- " bridge window [mem %#010lx-%#010lx pref] (disabled)\n",
- base, limit + 0xfffff);
}
}
-void __devinit pci_read_bridge_bases(struct pci_bus *child)
+void pci_read_bridge_bases(struct pci_bus *child)
{
struct pci_dev *dev = child->self;
struct resource *res;
@@ -410,8 +460,8 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
return;
- dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n",
- child->secondary, child->subordinate,
+ dev_info(&dev->dev, "PCI bridge to %pR%s\n",
+ &child->busn_res,
dev->transparent ? " (subtractive decode)" : "");
pci_bus_remove_resources(child);
@@ -424,7 +474,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
if (dev->transparent) {
pci_bus_for_each_resource(child->parent, res, i) {
- if (res) {
+ if (res && res->flags) {
pci_bus_add_resource(child, res,
PCI_SUBTRACTIVE_DECODE);
dev_printk(KERN_DEBUG, &dev->dev,
@@ -435,24 +485,50 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
}
}
-static struct pci_bus * pci_alloc_bus(void)
+static struct pci_bus *pci_alloc_bus(void)
{
struct pci_bus *b;
b = kzalloc(sizeof(*b), GFP_KERNEL);
- if (b) {
- INIT_LIST_HEAD(&b->node);
- INIT_LIST_HEAD(&b->children);
- INIT_LIST_HEAD(&b->devices);
- INIT_LIST_HEAD(&b->slots);
- INIT_LIST_HEAD(&b->resources);
- b->max_bus_speed = PCI_SPEED_UNKNOWN;
- b->cur_bus_speed = PCI_SPEED_UNKNOWN;
- }
+ if (!b)
+ return NULL;
+
+ INIT_LIST_HEAD(&b->node);
+ INIT_LIST_HEAD(&b->children);
+ INIT_LIST_HEAD(&b->devices);
+ INIT_LIST_HEAD(&b->slots);
+ INIT_LIST_HEAD(&b->resources);
+ b->max_bus_speed = PCI_SPEED_UNKNOWN;
+ b->cur_bus_speed = PCI_SPEED_UNKNOWN;
return b;
}
-static unsigned char pcix_bus_speed[] = {
+static void pci_release_host_bridge_dev(struct device *dev)
+{
+ struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
+
+ if (bridge->release_fn)
+ bridge->release_fn(bridge);
+
+ pci_free_resource_list(&bridge->windows);
+
+ kfree(bridge);
+}
+
+static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
+{
+ struct pci_host_bridge *bridge;
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return NULL;
+
+ INIT_LIST_HEAD(&bridge->windows);
+ bridge->bus = b;
+ return bridge;
+}
+
+static const unsigned char pcix_bus_speed[] = {
PCI_SPEED_UNKNOWN, /* 0 */
PCI_SPEED_66MHz_PCIX, /* 1 */
PCI_SPEED_100MHz_PCIX, /* 2 */
@@ -471,7 +547,7 @@ static unsigned char pcix_bus_speed[] = {
PCI_SPEED_133MHz_PCIX_533 /* F */
};
-static unsigned char pcie_link_speed[] = {
+const unsigned char pcie_link_speed[] = {
PCI_SPEED_UNKNOWN, /* 0 */
PCIE_SPEED_2_5GT, /* 1 */
PCIE_SPEED_5_0GT, /* 2 */
@@ -492,7 +568,7 @@ static unsigned char pcie_link_speed[] = {
void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
{
- bus->cur_bus_speed = pcie_link_speed[linksta & 0xf];
+ bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
}
EXPORT_SYMBOL_GPL(pcie_update_link_speed);
@@ -516,7 +592,7 @@ static enum pci_bus_speed agp_speed(int agp3, int agpstat)
index = 1;
else
goto out;
-
+
if (agp3) {
index += 2;
if (index == 5)
@@ -527,7 +603,6 @@ static enum pci_bus_speed agp_speed(int agp3, int agpstat)
return agp_speeds[index];
}
-
static void pci_set_bus_speed(struct pci_bus *bus)
{
struct pci_dev *bridge = bus->self;
@@ -550,47 +625,48 @@ static void pci_set_bus_speed(struct pci_bus *bus)
if (pos) {
u16 status;
enum pci_bus_speed max;
- pci_read_config_word(bridge, pos + 2, &status);
- if (status & 0x8000) {
+ pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
+ &status);
+
+ if (status & PCI_X_SSTATUS_533MHZ) {
max = PCI_SPEED_133MHz_PCIX_533;
- } else if (status & 0x4000) {
+ } else if (status & PCI_X_SSTATUS_266MHZ) {
max = PCI_SPEED_133MHz_PCIX_266;
- } else if (status & 0x0002) {
- if (((status >> 12) & 0x3) == 2) {
+ } else if (status & PCI_X_SSTATUS_133MHZ) {
+ if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
max = PCI_SPEED_133MHz_PCIX_ECC;
- } else {
+ else
max = PCI_SPEED_133MHz_PCIX;
- }
} else {
max = PCI_SPEED_66MHz_PCIX;
}
bus->max_bus_speed = max;
- bus->cur_bus_speed = pcix_bus_speed[(status >> 6) & 0xf];
+ bus->cur_bus_speed = pcix_bus_speed[
+ (status & PCI_X_SSTATUS_FREQ) >> 6];
return;
}
- pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
- if (pos) {
+ if (pci_is_pcie(bridge)) {
u32 linkcap;
u16 linksta;
- pci_read_config_dword(bridge, pos + PCI_EXP_LNKCAP, &linkcap);
- bus->max_bus_speed = pcie_link_speed[linkcap & 0xf];
+ pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
+ bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
- pci_read_config_word(bridge, pos + PCI_EXP_LNKSTA, &linksta);
+ pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
pcie_update_link_speed(bus, linksta);
}
}
-
static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
struct pci_dev *bridge, int busnr)
{
struct pci_bus *child;
int i;
+ int ret;
/*
* Allocate a new bus, and inherit stuff from the parent..
@@ -601,12 +677,12 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
child->parent = parent;
child->ops = parent->ops;
+ child->msi = parent->msi;
child->sysdata = parent->sysdata;
child->bus_flags = parent->bus_flags;
/* initialize some portions of the bus device, but don't register it
- * now as the parent is not properly set up yet. This device will get
- * registered later in pci_bus_add_devices()
+ * now as the parent is not properly set up yet.
*/
child->dev.class = &pcibus_class;
dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
@@ -615,16 +691,19 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
* Set up the primary, secondary and subordinate
* bus numbers.
*/
- child->number = child->secondary = busnr;
- child->primary = parent->secondary;
- child->subordinate = 0xff;
+ child->number = child->busn_res.start = busnr;
+ child->primary = parent->busn_res.start;
+ child->busn_res.end = 0xff;
- if (!bridge)
- return child;
+ if (!bridge) {
+ child->dev.parent = parent->bridge;
+ goto add_dev;
+ }
child->self = bridge;
child->bridge = get_device(&bridge->dev);
-
+ child->dev.parent = child->bridge;
+ pci_set_bus_of_node(child);
pci_set_bus_speed(child);
/* Set up default resource pointers and names.. */
@@ -634,10 +713,20 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
}
bridge->subordinate = child;
+add_dev:
+ ret = device_register(&child->dev);
+ WARN_ON(ret < 0);
+
+ pcibios_add_bus(child);
+
+ /* Create legacy_io and legacy_mem files for this bus */
+ pci_create_legacy_files(child);
+
return child;
}
-struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
+struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
+ int busnr)
{
struct pci_bus *child;
@@ -649,22 +738,7 @@ struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *de
}
return child;
}
-
-static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
-{
- struct pci_bus *parent = child->parent;
-
- /* Attempts to fix that up are really dangerous unless
- we're going to re-assign all bus numbers. */
- if (!pcibios_assign_all_busses())
- return;
-
- while (parent->parent && parent->subordinate < max) {
- parent->subordinate = max;
- pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
- parent = parent->parent;
- }
-}
+EXPORT_SYMBOL(pci_add_new_bus);
/*
* If it's a bridge, configure it and scan the bus behind it.
@@ -676,7 +750,7 @@ static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
* them, we proceed to assigning numbers to the remaining buses in
* order to avoid overlaps between old and new bus numbers.
*/
-int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
+int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
{
struct pci_bus *child;
int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
@@ -693,15 +767,22 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
secondary, subordinate, pass);
+ if (!primary && (primary != bus->number) && secondary && subordinate) {
+ dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
+ primary = bus->number;
+ }
+
/* Check if setup is sensible at all */
if (!pass &&
- (primary != bus->number || secondary <= bus->number)) {
- dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n");
+ (primary != bus->number || secondary <= bus->number ||
+ secondary > subordinate || subordinate > bus->busn_res.end)) {
+ dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
+ secondary, subordinate);
broken = 1;
}
/* Disable MasterAbortMode during probing to avoid reporting
- of bus errors (in some architectures) */
+ of bus errors (in some architectures) */
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
@@ -717,11 +798,10 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
goto out;
/*
- * If we already got to this bus through a different bridge,
- * don't re-add it. This can happen with the i450NX chipset.
- *
- * However, we continue to descend down the hierarchy and
- * scan remaining child buses.
+ * The bus might already exist for two reasons: Either we are
+ * rescanning the bus or the bus is reachable through more than
+ * one bridge. The second case can happen with the i450NX
+ * chipset.
*/
child = pci_find_bus(pci_domain_nr(bus), secondary);
if (!child) {
@@ -729,22 +809,24 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
if (!child)
goto out;
child->primary = primary;
- child->subordinate = subordinate;
+ pci_bus_insert_busn_res(child, secondary, subordinate);
child->bridge_ctl = bctl;
}
cmax = pci_scan_child_bus(child);
- if (cmax > max)
- max = cmax;
- if (child->subordinate > max)
- max = child->subordinate;
+ if (cmax > subordinate)
+ dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
+ subordinate, cmax);
+ /* subordinate should equal child->busn_res.end */
+ if (subordinate > max)
+ max = subordinate;
} else {
/*
* We need to assign a number to this bus which we always
* do in the second pass.
*/
if (!pass) {
- if (pcibios_assign_all_busses() || broken)
+ if (pcibios_assign_all_busses() || broken || is_cardbus)
/* Temporarily disable forwarding of the
configuration cycles on all bridges in
this bus segment to avoid possible
@@ -756,18 +838,29 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
goto out;
}
+ if (max >= bus->busn_res.end) {
+ dev_warn(&dev->dev, "can't allocate child bus %02x from %pR\n",
+ max, &bus->busn_res);
+ goto out;
+ }
+
/* Clear errors */
pci_write_config_word(dev, PCI_STATUS, 0xffff);
- /* Prevent assigning a bus number that already exists.
- * This can happen when a bridge is hot-plugged */
- if (pci_find_bus(pci_domain_nr(bus), max+1))
- goto out;
- child = pci_add_new_bus(bus, dev, ++max);
+ /* The bus will already exist if we are rescanning */
+ child = pci_find_bus(pci_domain_nr(bus), max+1);
+ if (!child) {
+ child = pci_add_new_bus(bus, dev, max+1);
+ if (!child)
+ goto out;
+ pci_bus_insert_busn_res(child, max+1,
+ bus->busn_res.end);
+ }
+ max++;
buses = (buses & 0xff000000)
| ((unsigned int)(child->primary) << 0)
- | ((unsigned int)(child->secondary) << 8)
- | ((unsigned int)(child->subordinate) << 16);
+ | ((unsigned int)(child->busn_res.start) << 8)
+ | ((unsigned int)(child->busn_res.end) << 16);
/*
* yenta.c forces a secondary latency timer of 176.
@@ -777,7 +870,7 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
buses &= ~0xff000000;
buses |= CARDBUS_LATENCY_TIMER << 24;
}
-
+
/*
* We need to blast all three values with a single write.
*/
@@ -785,35 +878,22 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
if (!is_cardbus) {
child->bridge_ctl = bctl;
- /*
- * Adjust subordinate busnr in parent buses.
- * We do this before scanning for children because
- * some devices may not be detected if the bios
- * was lazy.
- */
- pci_fixup_parent_subordinate_busnr(child, max);
- /* Now we can scan all subordinate buses... */
max = pci_scan_child_bus(child);
- /*
- * now fix it up again since we have found
- * the real value of max.
- */
- pci_fixup_parent_subordinate_busnr(child, max);
} else {
/*
* For CardBus bridges, we leave 4 bus numbers
* as cards with a PCI-to-PCI bridge can be
* inserted later.
*/
- for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
+ for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
struct pci_bus *parent = bus;
if (pci_find_bus(pci_domain_nr(bus),
max+i+1))
break;
while (parent->parent) {
if ((!pcibios_assign_all_busses()) &&
- (parent->subordinate > max) &&
- (parent->subordinate <= max+i)) {
+ (parent->busn_res.end > max) &&
+ (parent->busn_res.end <= max+i)) {
j = 1;
}
parent = parent->parent;
@@ -829,12 +909,16 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
}
}
max += i;
- pci_fixup_parent_subordinate_busnr(child, max);
}
/*
* Set the subordinate bus number to its real value.
*/
- child->subordinate = max;
+ if (max > bus->busn_res.end) {
+ dev_warn(&dev->dev, "max busn %02x is outside %pR\n",
+ max, &bus->busn_res);
+ max = bus->busn_res.end;
+ }
+ pci_bus_update_busn_res_end(child, max);
pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
}
@@ -844,19 +928,18 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
/* Has only triggered on CardBus, fixup is in yenta_socket */
while (bus->parent) {
- if ((child->subordinate > bus->subordinate) ||
- (child->number > bus->subordinate) ||
+ if ((child->busn_res.end > bus->busn_res.end) ||
+ (child->number > bus->busn_res.end) ||
(child->number < bus->number) ||
- (child->subordinate < bus->number)) {
- dev_info(&child->dev, "[bus %02x-%02x] %s "
- "hidden behind%s bridge %s [bus %02x-%02x]\n",
- child->number, child->subordinate,
- (bus->number > child->subordinate &&
- bus->subordinate < child->number) ?
+ (child->busn_res.end < bus->number)) {
+ dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
+ &child->busn_res,
+ (bus->number > child->busn_res.end &&
+ bus->busn_res.end < child->number) ?
"wholly" : "partially",
bus->self->transparent ? " transparent" : "",
dev_name(&bus->dev),
- bus->number, bus->subordinate);
+ &bus->busn_res);
}
bus = bus->parent;
}
@@ -866,6 +949,7 @@ out:
return max;
}
+EXPORT_SYMBOL(pci_scan_bridge);
/*
* Read interrupt line and base address registers.
@@ -890,36 +974,119 @@ void set_pcie_port_type(struct pci_dev *pdev)
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (!pos)
return;
- pdev->is_pcie = 1;
pdev->pcie_cap = pos;
pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
- pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
+ pdev->pcie_flags_reg = reg16;
+ pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
+ pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
}
void set_pcie_hotplug_bridge(struct pci_dev *pdev)
{
- int pos;
- u16 reg16;
u32 reg32;
- pos = pci_pcie_cap(pdev);
- if (!pos)
- return;
- pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
- if (!(reg16 & PCI_EXP_FLAGS_SLOT))
- return;
- pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &reg32);
+ pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
if (reg32 & PCI_EXP_SLTCAP_HPC)
pdev->is_hotplug_bridge = 1;
}
+/**
+ * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
+ * @dev: PCI device
+ *
+ * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
+ * when forwarding a type1 configuration request the bridge must check that
+ * the extended register address field is zero. The bridge is not permitted
+ * to forward the transactions and must handle it as an Unsupported Request.
+ * Some bridges do not follow this rule and simply drop the extended register
+ * bits, resulting in the standard config space being aliased, every 256
+ * bytes across the entire configuration space. Test for this condition by
+ * comparing the first dword of each potential alias to the vendor/device ID.
+ * Known offenders:
+ * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
+ * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
+ */
+static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
+{
+#ifdef CONFIG_PCI_QUIRKS
+ int pos;
+ u32 header, tmp;
+
+ pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
+
+ for (pos = PCI_CFG_SPACE_SIZE;
+ pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
+ if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
+ || header != tmp)
+ return false;
+ }
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+/**
+ * pci_cfg_space_size - get the configuration space size of the PCI device.
+ * @dev: PCI device
+ *
+ * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
+ * have 4096 bytes. Even if the device is capable, that doesn't mean we can
+ * access it. Maybe we don't have a way to generate extended config space
+ * accesses, or the device is behind a reverse Express bridge. So we try
+ * reading the dword at 0x100 which must either be 0 or a valid extended
+ * capability header.
+ */
+static int pci_cfg_space_size_ext(struct pci_dev *dev)
+{
+ u32 status;
+ int pos = PCI_CFG_SPACE_SIZE;
+
+ if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
+ goto fail;
+ if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
+ goto fail;
+
+ return PCI_CFG_SPACE_EXP_SIZE;
+
+ fail:
+ return PCI_CFG_SPACE_SIZE;
+}
+
+int pci_cfg_space_size(struct pci_dev *dev)
+{
+ int pos;
+ u32 status;
+ u16 class;
+
+ class = dev->class >> 8;
+ if (class == PCI_CLASS_BRIDGE_HOST)
+ return pci_cfg_space_size_ext(dev);
+
+ if (!pci_is_pcie(dev)) {
+ pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (!pos)
+ goto fail;
+
+ pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
+ if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
+ goto fail;
+ }
+
+ return pci_cfg_space_size_ext(dev);
+
+ fail:
+ return PCI_CFG_SPACE_SIZE;
+}
+
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
/**
* pci_setup_device - fill in class and map information of a device
* @dev: the device structure to fill
*
- * Initialize the device structure with information about the device's
+ * Initialize the device structure with information about the device's
* vendor,class,memory and IO-space addresses,IRQ lines etc.
* Called at initialisation of the PCI subsystem and by CardBus services.
* Returns 0 on success and negative if unknown type of device (not normal,
@@ -931,6 +1098,8 @@ int pci_setup_device(struct pci_dev *dev)
u8 hdr_type;
struct pci_slot *slot;
int pos = 0;
+ struct pci_bus_region region;
+ struct resource *res;
if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
return -EIO;
@@ -957,12 +1126,10 @@ int pci_setup_device(struct pci_dev *dev)
pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
dev->revision = class & 0xff;
- class >>= 8; /* upper 3 bytes */
- dev->class = class;
- class >>= 8;
+ dev->class = class >> 8; /* upper 3 bytes */
- dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %d class %#08x\n",
- dev->vendor, dev->device, dev->hdr_type, class);
+ dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
+ dev->vendor, dev->device, dev->hdr_type, dev->class);
/* need to have dev->class ready */
dev->cfg_size = pci_cfg_space_size(dev);
@@ -985,29 +1152,45 @@ int pci_setup_device(struct pci_dev *dev)
pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
/*
- * Do the ugly legacy mode stuff here rather than broken chip
- * quirk code. Legacy mode ATA controllers have fixed
- * addresses. These are not always echoed in BAR0-3, and
- * BAR0-3 in a few cases contain junk!
+ * Do the ugly legacy mode stuff here rather than broken chip
+ * quirk code. Legacy mode ATA controllers have fixed
+ * addresses. These are not always echoed in BAR0-3, and
+ * BAR0-3 in a few cases contain junk!
*/
if (class == PCI_CLASS_STORAGE_IDE) {
u8 progif;
pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
if ((progif & 1) == 0) {
- dev->resource[0].start = 0x1F0;
- dev->resource[0].end = 0x1F7;
- dev->resource[0].flags = LEGACY_IO_RESOURCE;
- dev->resource[1].start = 0x3F6;
- dev->resource[1].end = 0x3F6;
- dev->resource[1].flags = LEGACY_IO_RESOURCE;
+ region.start = 0x1F0;
+ region.end = 0x1F7;
+ res = &dev->resource[0];
+ res->flags = LEGACY_IO_RESOURCE;
+ pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
+ res);
+ region.start = 0x3F6;
+ region.end = 0x3F6;
+ res = &dev->resource[1];
+ res->flags = LEGACY_IO_RESOURCE;
+ pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
+ res);
}
if ((progif & 4) == 0) {
- dev->resource[2].start = 0x170;
- dev->resource[2].end = 0x177;
- dev->resource[2].flags = LEGACY_IO_RESOURCE;
- dev->resource[3].start = 0x376;
- dev->resource[3].end = 0x376;
- dev->resource[3].flags = LEGACY_IO_RESOURCE;
+ region.start = 0x170;
+ region.end = 0x177;
+ res = &dev->resource[2];
+ res->flags = LEGACY_IO_RESOURCE;
+ pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
+ res);
+ region.start = 0x376;
+ region.end = 0x376;
+ res = &dev->resource[3];
+ res->flags = LEGACY_IO_RESOURCE;
+ pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
+ res);
}
}
break;
@@ -1017,7 +1200,7 @@ int pci_setup_device(struct pci_dev *dev)
goto bad;
/* The PCI-to-PCI bridge spec requires that subtractive
decoding (i.e. transparent) bridge must have programming
- interface code of 0x01. */
+ interface code of 0x01. */
pci_read_irq(dev);
dev->transparent = ((dev->class & 0xff) == 1);
pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
@@ -1039,13 +1222,13 @@ int pci_setup_device(struct pci_dev *dev)
break;
default: /* unknown header */
- dev_err(&dev->dev, "unknown header type %02x, "
- "ignoring device\n", dev->hdr_type);
+ dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
+ dev->hdr_type);
return -EIO;
bad:
- dev_err(&dev->dev, "ignoring class %02x (doesn't match header "
- "type %02x)\n", class, dev->hdr_type);
+ dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
+ dev->class, dev->hdr_type);
dev->class = PCI_CLASS_NOT_DEFINED;
}
@@ -1057,6 +1240,7 @@ static void pci_release_capabilities(struct pci_dev *dev)
{
pci_vpd_release(dev);
pci_iov_release(dev);
+ pci_free_cap_save_buffers(dev);
}
/**
@@ -1072,69 +1256,14 @@ static void pci_release_dev(struct device *dev)
pci_dev = to_pci_dev(dev);
pci_release_capabilities(pci_dev);
+ pci_release_of_node(pci_dev);
+ pcibios_release_device(pci_dev);
+ pci_bus_put(pci_dev->bus);
+ kfree(pci_dev->driver_override);
kfree(pci_dev);
}
-/**
- * pci_cfg_space_size - get the configuration space size of the PCI device.
- * @dev: PCI device
- *
- * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
- * have 4096 bytes. Even if the device is capable, that doesn't mean we can
- * access it. Maybe we don't have a way to generate extended config space
- * accesses, or the device is behind a reverse Express bridge. So we try
- * reading the dword at 0x100 which must either be 0 or a valid extended
- * capability header.
- */
-int pci_cfg_space_size_ext(struct pci_dev *dev)
-{
- u32 status;
- int pos = PCI_CFG_SPACE_SIZE;
-
- if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
- goto fail;
- if (status == 0xffffffff)
- goto fail;
-
- return PCI_CFG_SPACE_EXP_SIZE;
-
- fail:
- return PCI_CFG_SPACE_SIZE;
-}
-
-int pci_cfg_space_size(struct pci_dev *dev)
-{
- int pos;
- u32 status;
- u16 class;
-
- class = dev->class >> 8;
- if (class == PCI_CLASS_BRIDGE_HOST)
- return pci_cfg_space_size_ext(dev);
-
- pos = pci_pcie_cap(dev);
- if (!pos) {
- pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
- if (!pos)
- goto fail;
-
- pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
- if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
- goto fail;
- }
-
- return pci_cfg_space_size_ext(dev);
-
- fail:
- return PCI_CFG_SPACE_SIZE;
-}
-
-static void pci_release_bus_bridge_dev(struct device *dev)
-{
- kfree(dev);
-}
-
-struct pci_dev *alloc_pci_dev(void)
+struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -1143,55 +1272,72 @@ struct pci_dev *alloc_pci_dev(void)
return NULL;
INIT_LIST_HEAD(&dev->bus_list);
+ dev->dev.type = &pci_dev_type;
+ dev->bus = pci_bus_get(bus);
return dev;
}
-EXPORT_SYMBOL(alloc_pci_dev);
+EXPORT_SYMBOL(pci_alloc_dev);
-/*
- * Read the config data for a PCI device, sanity-check it
- * and fill in the dev structure...
- */
-static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
+bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
+ int crs_timeout)
{
- struct pci_dev *dev;
- u32 l;
int delay = 1;
- if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
- return NULL;
+ if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
+ return false;
/* some broken boards return 0 or ~0 if a slot is empty: */
- if (l == 0xffffffff || l == 0x00000000 ||
- l == 0x0000ffff || l == 0xffff0000)
- return NULL;
+ if (*l == 0xffffffff || *l == 0x00000000 ||
+ *l == 0x0000ffff || *l == 0xffff0000)
+ return false;
/* Configuration request Retry Status */
- while (l == 0xffff0001) {
+ while (*l == 0xffff0001) {
+ if (!crs_timeout)
+ return false;
+
msleep(delay);
delay *= 2;
- if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
- return NULL;
+ if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
+ return false;
/* Card hasn't responded in 60 seconds? Must be stuck. */
- if (delay > 60 * 1000) {
- printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
- "responding\n", pci_domain_nr(bus),
- bus->number, PCI_SLOT(devfn),
- PCI_FUNC(devfn));
- return NULL;
+ if (delay > crs_timeout) {
+ printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
+ pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn));
+ return false;
}
}
- dev = alloc_pci_dev();
+ return true;
+}
+EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
+
+/*
+ * Read the config data for a PCI device, sanity-check it
+ * and fill in the dev structure...
+ */
+static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
+{
+ struct pci_dev *dev;
+ u32 l;
+
+ if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
+ return NULL;
+
+ dev = pci_alloc_dev(bus);
if (!dev)
return NULL;
- dev->bus = bus;
dev->devfn = devfn;
dev->vendor = l & 0xffff;
dev->device = (l >> 16) & 0xffff;
+ pci_set_of_node(dev);
+
if (pci_setup_device(dev)) {
+ pci_bus_put(dev->bus);
kfree(dev);
return NULL;
}
@@ -1209,13 +1355,12 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Power Management */
pci_pm_init(dev);
- platform_pci_wakeup_init(dev);
/* Vital Product Data */
pci_vpd_pci22_init(dev);
/* Alternative Routing-ID Forwarding */
- pci_enable_ari(dev);
+ pci_configure_ari(dev);
/* Single Root I/O Virtualization */
pci_iov_init(dev);
@@ -1226,10 +1371,12 @@ static void pci_init_capabilities(struct pci_dev *dev)
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
{
+ int ret;
+
device_initialize(&dev->dev);
dev->dev.release = pci_release_dev;
- pci_dev_get(dev);
+ set_dev_node(&dev->dev, pcibus_to_node(bus));
dev->dev.dma_mask = &dev->dma_mask;
dev->dev.dma_parms = &dev->dma_parms;
dev->dev.coherent_dma_mask = 0xffffffffull;
@@ -1240,6 +1387,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
/* Fix up broken headers */
pci_fixup_device(pci_fixup_header, dev);
+ /* moved out from quirk header fixup code */
+ pci_reassigndev_resource_alignment(dev);
+
/* Clear the state_saved flag. */
dev->state_saved = false;
@@ -1253,9 +1403,17 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
down_write(&pci_bus_sem);
list_add_tail(&dev->bus_list, &bus->devices);
up_write(&pci_bus_sem);
+
+ ret = pcibios_add_device(dev);
+ WARN_ON(ret < 0);
+
+ /* Notifier could use PCI capabilities */
+ dev->match_driver = false;
+ ret = device_add(&dev->dev);
+ WARN_ON(ret < 0);
}
-struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
+struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
{
struct pci_dev *dev;
@@ -1275,41 +1433,44 @@ struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
}
EXPORT_SYMBOL(pci_scan_single_device);
-static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn)
+static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
{
- u16 cap;
- unsigned pos, next_fn;
+ int pos;
+ u16 cap = 0;
+ unsigned next_fn;
- if (!dev)
- return 0;
+ if (pci_ari_enabled(bus)) {
+ if (!dev)
+ return 0;
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
+ if (!pos)
+ return 0;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
- if (!pos)
- return 0;
- pci_read_config_word(dev, pos + 4, &cap);
- next_fn = cap >> 8;
- if (next_fn <= fn)
- return 0;
- return next_fn;
-}
+ pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
+ next_fn = PCI_ARI_CAP_NFN(cap);
+ if (next_fn <= fn)
+ return 0; /* protect against malformed list */
-static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn)
-{
- return (fn + 1) % 8;
-}
+ return next_fn;
+ }
+
+ /* dev may be NULL for non-contiguous multifunction devices */
+ if (!dev || dev->multifunction)
+ return (fn + 1) % 8;
-static unsigned no_next_fn(struct pci_dev *dev, unsigned fn)
-{
return 0;
}
static int only_one_child(struct pci_bus *bus)
{
struct pci_dev *parent = bus->self;
+
if (!parent || !pci_is_pcie(parent))
return 0;
- if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
- parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
+ if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
+ return 1;
+ if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
+ !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
return 1;
return 0;
}
@@ -1329,7 +1490,6 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
{
unsigned fn, nr = 0;
struct pci_dev *dev;
- unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn;
if (only_one_child(bus) && (devfn > 0))
return 0; /* Already scanned the entire slot */
@@ -1340,12 +1500,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
if (!dev->is_added)
nr++;
- if (pci_ari_enabled(bus))
- next_fn = next_ari_fn;
- else if (dev->multifunction)
- next_fn = next_trad_fn;
-
- for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) {
+ for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
dev = pci_scan_single_device(bus, devfn + fn);
if (dev) {
if (!dev->is_added)
@@ -1360,10 +1515,181 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
return nr;
}
+EXPORT_SYMBOL(pci_scan_slot);
+
+static int pcie_find_smpss(struct pci_dev *dev, void *data)
+{
+ u8 *smpss = data;
+
+ if (!pci_is_pcie(dev))
+ return 0;
+
+ /*
+ * We don't have a way to change MPS settings on devices that have
+ * drivers attached. A hot-added device might support only the minimum
+ * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
+ * where devices may be hot-added, we limit the fabric MPS to 128 so
+ * hot-added devices will work correctly.
+ *
+ * However, if we hot-add a device to a slot directly below a Root
+ * Port, it's impossible for there to be other existing devices below
+ * the port. We don't limit the MPS in this case because we can
+ * reconfigure MPS on both the Root Port and the hot-added device,
+ * and there are no other devices involved.
+ *
+ * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
+ */
+ if (dev->is_hotplug_bridge &&
+ pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
+ *smpss = 0;
+
+ if (*smpss > dev->pcie_mpss)
+ *smpss = dev->pcie_mpss;
+
+ return 0;
+}
+
+static void pcie_write_mps(struct pci_dev *dev, int mps)
+{
+ int rc;
+
+ if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
+ mps = 128 << dev->pcie_mpss;
+
+ if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
+ dev->bus->self)
+ /* For "Performance", the assumption is made that
+ * downstream communication will never be larger than
+ * the MRRS. So, the MPS only needs to be configured
+ * for the upstream communication. This being the case,
+ * walk from the top down and set the MPS of the child
+ * to that of the parent bus.
+ *
+ * Configure the device MPS with the smaller of the
+ * device MPSS or the bridge MPS (which is assumed to be
+ * properly configured at this point to the largest
+ * allowable MPS based on its parent bus).
+ */
+ mps = min(mps, pcie_get_mps(dev->bus->self));
+ }
+
+ rc = pcie_set_mps(dev, mps);
+ if (rc)
+ dev_err(&dev->dev, "Failed attempting to set the MPS\n");
+}
+
+static void pcie_write_mrrs(struct pci_dev *dev)
+{
+ int rc, mrrs;
+
+ /* In the "safe" case, do not configure the MRRS. There appear to be
+ * issues with setting MRRS to 0 on a number of devices.
+ */
+ if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
+ return;
+
+ /* For Max performance, the MRRS must be set to the largest supported
+ * value. However, it cannot be configured larger than the MPS the
+ * device or the bus can support. This should already be properly
+ * configured by a prior call to pcie_write_mps.
+ */
+ mrrs = pcie_get_mps(dev);
+
+ /* MRRS is a R/W register. Invalid values can be written, but a
+ * subsequent read will verify if the value is acceptable or not.
+ * If the MRRS value provided is not acceptable (e.g., too large),
+ * shrink the value until it is acceptable to the HW.
+ */
+ while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
+ rc = pcie_set_readrq(dev, mrrs);
+ if (!rc)
+ break;
+
+ dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
+ mrrs /= 2;
+ }
+
+ if (mrrs < 128)
+ dev_err(&dev->dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
+}
+
+static void pcie_bus_detect_mps(struct pci_dev *dev)
+{
+ struct pci_dev *bridge = dev->bus->self;
+ int mps, p_mps;
+
+ if (!bridge)
+ return;
+
+ mps = pcie_get_mps(dev);
+ p_mps = pcie_get_mps(bridge);
+
+ if (mps != p_mps)
+ dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
+ mps, pci_name(bridge), p_mps);
+}
+
+static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
+{
+ int mps, orig_mps;
+
+ if (!pci_is_pcie(dev))
+ return 0;
+
+ if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
+ pcie_bus_detect_mps(dev);
+ return 0;
+ }
+
+ mps = 128 << *(u8 *)data;
+ orig_mps = pcie_get_mps(dev);
+
+ pcie_write_mps(dev, mps);
+ pcie_write_mrrs(dev);
+
+ dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
+ pcie_get_mps(dev), 128 << dev->pcie_mpss,
+ orig_mps, pcie_get_readrq(dev));
+
+ return 0;
+}
+
+/* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
+ * parents then children fashion. If this changes, then this code will not
+ * work as designed.
+ */
+void pcie_bus_configure_settings(struct pci_bus *bus)
+{
+ u8 smpss = 0;
+
+ if (!bus->self)
+ return;
+
+ if (!pci_is_pcie(bus->self))
+ return;
+
+ /* FIXME - Peer to peer DMA is possible, though the endpoint would need
+ * to be aware of the MPS of the destination. To work around this,
+ * simply force the MPS of the entire system to the smallest possible.
+ */
+ if (pcie_bus_config == PCIE_BUS_PEER2PEER)
+ smpss = 0;
+
+ if (pcie_bus_config == PCIE_BUS_SAFE) {
+ smpss = bus->self->pcie_mpss;
+
+ pcie_find_smpss(bus->self, &smpss);
+ pci_walk_bus(bus, pcie_find_smpss, &smpss);
+ }
+
+ pcie_bus_configure_set(bus->self, &smpss);
+ pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
+}
+EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
-unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
+unsigned int pci_scan_child_bus(struct pci_bus *bus)
{
- unsigned int devfn, pass, max = bus->secondary;
+ unsigned int devfn, pass, max = bus->busn_res.start;
struct pci_dev *dev;
dev_dbg(&bus->dev, "scanning bus\n");
@@ -1382,14 +1708,12 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
if (!bus->is_added) {
dev_dbg(&bus->dev, "fixups for bus\n");
pcibios_fixup_bus(bus);
- if (pci_is_root_bus(bus))
- bus->is_added = 1;
+ bus->is_added = 1;
}
- for (pass=0; pass < 2; pass++)
+ for (pass = 0; pass < 2; pass++)
list_for_each_entry(dev, &bus->devices, bus_list) {
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
- dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+ if (pci_is_bridge(dev))
max = pci_scan_bridge(bus, dev, max, pass);
}
@@ -1403,27 +1727,47 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
return max;
}
+EXPORT_SYMBOL_GPL(pci_scan_child_bus);
-struct pci_bus * pci_create_bus(struct device *parent,
- int bus, struct pci_ops *ops, void *sysdata)
+/**
+ * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
+ * @bridge: Host bridge to set up.
+ *
+ * Default empty implementation. Replace with an architecture-specific setup
+ * routine, if necessary.
+ */
+int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ return 0;
+}
+
+void __weak pcibios_add_bus(struct pci_bus *bus)
+{
+}
+
+void __weak pcibios_remove_bus(struct pci_bus *bus)
+{
+}
+
+struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata, struct list_head *resources)
{
int error;
+ struct pci_host_bridge *bridge;
struct pci_bus *b, *b2;
- struct device *dev;
+ struct pci_host_bridge_window *window, *n;
+ struct resource *res;
+ resource_size_t offset;
+ char bus_addr[64];
+ char *fmt;
b = pci_alloc_bus();
if (!b)
return NULL;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev){
- kfree(b);
- return NULL;
- }
-
b->sysdata = sysdata;
b->ops = ops;
-
+ b->number = b->busn_res.start = bus;
b2 = pci_find_bus(pci_domain_nr(b), bus);
if (b2) {
/* If we already got to this bus through a different bridge, ignore it */
@@ -1431,18 +1775,27 @@ struct pci_bus * pci_create_bus(struct device *parent,
goto err_out;
}
- down_write(&pci_bus_sem);
- list_add_tail(&b->node, &pci_root_buses);
- up_write(&pci_bus_sem);
+ bridge = pci_alloc_host_bridge(b);
+ if (!bridge)
+ goto err_out;
- dev->parent = parent;
- dev->release = pci_release_bus_bridge_dev;
- dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus);
- error = device_register(dev);
- if (error)
- goto dev_reg_err;
- b->bridge = get_device(dev);
+ bridge->dev.parent = parent;
+ bridge->dev.release = pci_release_host_bridge_dev;
+ dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
+ error = pcibios_root_bridge_prepare(bridge);
+ if (error) {
+ kfree(bridge);
+ goto err_out;
+ }
+
+ error = device_register(&bridge->dev);
+ if (error) {
+ put_device(&bridge->dev);
+ goto err_out;
+ }
+ b->bridge = get_device(&bridge->dev);
device_enable_async_suspend(b->bridge);
+ pci_set_bus_of_node(b);
if (!parent)
set_dev_node(b->bridge, pcibus_to_node(b));
@@ -1453,46 +1806,215 @@ struct pci_bus * pci_create_bus(struct device *parent,
error = device_register(&b->dev);
if (error)
goto class_dev_reg_err;
- error = device_create_file(&b->dev, &dev_attr_cpuaffinity);
- if (error)
- goto dev_create_file_err;
+
+ pcibios_add_bus(b);
/* Create legacy_io and legacy_mem files for this bus */
pci_create_legacy_files(b);
- b->number = b->secondary = bus;
- b->resource[0] = &ioport_resource;
- b->resource[1] = &iomem_resource;
+ if (parent)
+ dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
+ else
+ printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
+
+ /* Add initial resources to the bus */
+ list_for_each_entry_safe(window, n, resources, list) {
+ list_move_tail(&window->list, &bridge->windows);
+ res = window->res;
+ offset = window->offset;
+ if (res->flags & IORESOURCE_BUS)
+ pci_bus_insert_busn_res(b, bus, res->end);
+ else
+ pci_bus_add_resource(b, res, 0);
+ if (offset) {
+ if (resource_type(res) == IORESOURCE_IO)
+ fmt = " (bus address [%#06llx-%#06llx])";
+ else
+ fmt = " (bus address [%#010llx-%#010llx])";
+ snprintf(bus_addr, sizeof(bus_addr), fmt,
+ (unsigned long long) (res->start - offset),
+ (unsigned long long) (res->end - offset));
+ } else
+ bus_addr[0] = '\0';
+ dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
+ }
+
+ down_write(&pci_bus_sem);
+ list_add_tail(&b->node, &pci_root_buses);
+ up_write(&pci_bus_sem);
return b;
-dev_create_file_err:
- device_unregister(&b->dev);
class_dev_reg_err:
- device_unregister(dev);
-dev_reg_err:
- down_write(&pci_bus_sem);
- list_del(&b->node);
- up_write(&pci_bus_sem);
+ put_device(&bridge->dev);
+ device_unregister(&bridge->dev);
err_out:
- kfree(dev);
kfree(b);
return NULL;
}
-struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
+int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
+{
+ struct resource *res = &b->busn_res;
+ struct resource *parent_res, *conflict;
+
+ res->start = bus;
+ res->end = bus_max;
+ res->flags = IORESOURCE_BUS;
+
+ if (!pci_is_root_bus(b))
+ parent_res = &b->parent->busn_res;
+ else {
+ parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
+ res->flags |= IORESOURCE_PCI_FIXED;
+ }
+
+ conflict = request_resource_conflict(parent_res, res);
+
+ if (conflict)
+ dev_printk(KERN_DEBUG, &b->dev,
+ "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
+ res, pci_is_root_bus(b) ? "domain " : "",
+ parent_res, conflict->name, conflict);
+
+ return conflict == NULL;
+}
+
+int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
+{
+ struct resource *res = &b->busn_res;
+ struct resource old_res = *res;
+ resource_size_t size;
+ int ret;
+
+ if (res->start > bus_max)
+ return -EINVAL;
+
+ size = bus_max - res->start + 1;
+ ret = adjust_resource(res, res->start, size);
+ dev_printk(KERN_DEBUG, &b->dev,
+ "busn_res: %pR end %s updated to %02x\n",
+ &old_res, ret ? "can not be" : "is", bus_max);
+
+ if (!ret && !res->parent)
+ pci_bus_insert_busn_res(b, res->start, res->end);
+
+ return ret;
+}
+
+void pci_bus_release_busn_res(struct pci_bus *b)
+{
+ struct resource *res = &b->busn_res;
+ int ret;
+
+ if (!res->flags || !res->parent)
+ return;
+
+ ret = release_resource(res);
+ dev_printk(KERN_DEBUG, &b->dev,
+ "busn_res: %pR %s released\n",
+ res, ret ? "can not be" : "is");
+}
+
+struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata, struct list_head *resources)
+{
+ struct pci_host_bridge_window *window;
+ bool found = false;
+ struct pci_bus *b;
+ int max;
+
+ list_for_each_entry(window, resources, list)
+ if (window->res->flags & IORESOURCE_BUS) {
+ found = true;
+ break;
+ }
+
+ b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
+ if (!b)
+ return NULL;
+
+ if (!found) {
+ dev_info(&b->dev,
+ "No busn resource found for root bus, will use [bus %02x-ff]\n",
+ bus);
+ pci_bus_insert_busn_res(b, bus, 255);
+ }
+
+ max = pci_scan_child_bus(b);
+
+ if (!found)
+ pci_bus_update_busn_res_end(b, max);
+
+ pci_bus_add_devices(b);
+ return b;
+}
+EXPORT_SYMBOL(pci_scan_root_bus);
+
+/* Deprecated; use pci_scan_root_bus() instead */
+struct pci_bus *pci_scan_bus_parented(struct device *parent,
int bus, struct pci_ops *ops, void *sysdata)
{
+ LIST_HEAD(resources);
struct pci_bus *b;
- b = pci_create_bus(parent, bus, ops, sysdata);
+ pci_add_resource(&resources, &ioport_resource);
+ pci_add_resource(&resources, &iomem_resource);
+ pci_add_resource(&resources, &busn_resource);
+ b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
if (b)
- b->subordinate = pci_scan_child_bus(b);
+ pci_scan_child_bus(b);
+ else
+ pci_free_resource_list(&resources);
return b;
}
EXPORT_SYMBOL(pci_scan_bus_parented);
-#ifdef CONFIG_HOTPLUG
+struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
+ void *sysdata)
+{
+ LIST_HEAD(resources);
+ struct pci_bus *b;
+
+ pci_add_resource(&resources, &ioport_resource);
+ pci_add_resource(&resources, &iomem_resource);
+ pci_add_resource(&resources, &busn_resource);
+ b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
+ if (b) {
+ pci_scan_child_bus(b);
+ pci_bus_add_devices(b);
+ } else {
+ pci_free_resource_list(&resources);
+ }
+ return b;
+}
+EXPORT_SYMBOL(pci_scan_bus);
+
+/**
+ * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
+ * @bridge: PCI bridge for the bus to scan
+ *
+ * Scan a PCI bus and child buses for new devices, add them,
+ * and enable them, resizing bridge mmio/io resource if necessary
+ * and possible. The caller must ensure the child devices are already
+ * removed for resizing to occur.
+ *
+ * Returns the max number of subordinate bus discovered.
+ */
+unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
+{
+ unsigned int max;
+ struct pci_bus *bus = bridge->subordinate;
+
+ max = pci_scan_child_bus(bus);
+
+ pci_assign_unassigned_bridge_resources(bridge);
+
+ pci_bus_add_devices(bus);
+
+ return max;
+}
+
/**
* pci_rescan_bus - scan a PCI bus for devices.
* @bus: PCI bus to scan
@@ -1502,36 +2024,38 @@ EXPORT_SYMBOL(pci_scan_bus_parented);
*
* Returns the max number of subordinate bus discovered.
*/
-unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
+unsigned int pci_rescan_bus(struct pci_bus *bus)
{
unsigned int max;
- struct pci_dev *dev;
max = pci_scan_child_bus(bus);
-
- down_read(&pci_bus_sem);
- list_for_each_entry(dev, &bus->devices, bus_list)
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
- dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
- if (dev->subordinate)
- pci_bus_size_bridges(dev->subordinate);
- up_read(&pci_bus_sem);
-
- pci_bus_assign_resources(bus);
- pci_enable_bridges(bus);
+ pci_assign_unassigned_bus_resources(bus);
pci_bus_add_devices(bus);
return max;
}
EXPORT_SYMBOL_GPL(pci_rescan_bus);
-EXPORT_SYMBOL(pci_add_new_bus);
-EXPORT_SYMBOL(pci_scan_slot);
-EXPORT_SYMBOL(pci_scan_bridge);
-EXPORT_SYMBOL_GPL(pci_scan_child_bus);
-#endif
+/*
+ * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
+ * routines should always be executed under this mutex.
+ */
+static DEFINE_MUTEX(pci_rescan_remove_lock);
+
+void pci_lock_rescan_remove(void)
+{
+ mutex_lock(&pci_rescan_remove_lock);
+}
+EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
+
+void pci_unlock_rescan_remove(void)
+{
+ mutex_unlock(&pci_rescan_remove_lock);
+}
+EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
-static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
+static int __init pci_sort_bf_cmp(const struct device *d_a,
+ const struct device *d_b)
{
const struct pci_dev *a = to_pci_dev(d_a);
const struct pci_dev *b = to_pci_dev(d_b);
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 297b72c880a..3f155e78513 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/smp_lock.h>
#include <linux/capability.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
@@ -18,38 +17,16 @@
static int proc_initialized; /* = 0 */
-static loff_t
-proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
+static loff_t proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
{
- loff_t new = -1;
- struct inode *inode = file->f_path.dentry->d_inode;
-
- mutex_lock(&inode->i_mutex);
- switch (whence) {
- case 0:
- new = off;
- break;
- case 1:
- new = file->f_pos + off;
- break;
- case 2:
- new = inode->i_size + off;
- break;
- }
- if (new < 0 || new > inode->i_size)
- new = -EINVAL;
- else
- file->f_pos = new;
- mutex_unlock(&inode->i_mutex);
- return new;
+ struct pci_dev *dev = PDE_DATA(file_inode(file));
+ return fixed_size_llseek(file, off, whence, dev->cfg_size);
}
-static ssize_t
-proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+static ssize_t proc_bus_pci_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
{
- const struct inode *ino = file->f_path.dentry->d_inode;
- const struct proc_dir_entry *dp = PDE(ino);
- struct pci_dev *dev = dp->data;
+ struct pci_dev *dev = PDE_DATA(file_inode(file));
unsigned int pos = *ppos;
unsigned int cnt, size;
@@ -60,7 +37,7 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
*/
if (capable(CAP_SYS_ADMIN))
- size = dp->size;
+ size = dev->cfg_size;
else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
size = 128;
else
@@ -77,6 +54,8 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
if (!access_ok(VERIFY_WRITE, buf, cnt))
return -EINVAL;
+ pci_config_pm_runtime_get(dev);
+
if ((pos & 1) && cnt) {
unsigned char val;
pci_user_read_config_byte(dev, pos, &val);
@@ -122,18 +101,19 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
cnt--;
}
+ pci_config_pm_runtime_put(dev);
+
*ppos = pos;
return nbytes;
}
-static ssize_t
-proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos)
+static ssize_t proc_bus_pci_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
{
- struct inode *ino = file->f_path.dentry->d_inode;
- const struct proc_dir_entry *dp = PDE(ino);
- struct pci_dev *dev = dp->data;
+ struct inode *ino = file_inode(file);
+ struct pci_dev *dev = PDE_DATA(ino);
int pos = *ppos;
- int size = dp->size;
+ int size = dev->cfg_size;
int cnt;
if (pos >= size)
@@ -147,6 +127,8 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof
if (!access_ok(VERIFY_READ, buf, cnt))
return -EINVAL;
+ pci_config_pm_runtime_get(dev);
+
if ((pos & 1) && cnt) {
unsigned char val;
__get_user(val, buf);
@@ -192,8 +174,10 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof
cnt--;
}
+ pci_config_pm_runtime_put(dev);
+
*ppos = pos;
- i_size_write(ino, dp->size);
+ i_size_write(ino, dev->cfg_size);
return nbytes;
}
@@ -205,8 +189,7 @@ struct pci_filp_private {
static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- const struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
- struct pci_dev *dev = dp->data;
+ struct pci_dev *dev = PDE_DATA(file_inode(file));
#ifdef HAVE_PCI_MMAP
struct pci_filp_private *fpriv = file->private_data;
#endif /* HAVE_PCI_MMAP */
@@ -238,7 +221,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
default:
ret = -EINVAL;
break;
- };
+ }
return ret;
}
@@ -246,9 +229,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
#ifdef HAVE_PCI_MMAP
static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct inode *inode = file->f_path.dentry->d_inode;
- const struct proc_dir_entry *dp = PDE(inode);
- struct pci_dev *dev = dp->data;
+ struct pci_dev *dev = PDE_DATA(file_inode(file));
struct pci_filp_private *fpriv = file->private_data;
int i, ret;
@@ -257,7 +238,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
/* Make sure the caller is mapping a real resource for this device */
for (i = 0; i < PCI_ROM_RESOURCE; i++) {
- if (pci_mmap_fits(dev, i, vma))
+ if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
break;
}
@@ -418,7 +399,7 @@ int pci_proc_attach_device(struct pci_dev *dev)
&proc_bus_pci_operations, dev);
if (!e)
return -ENOMEM;
- e->size = dev->cfg_size;
+ proc_set_size(e, dev->cfg_size);
dev->procent = e;
return 0;
@@ -426,39 +407,14 @@ int pci_proc_attach_device(struct pci_dev *dev)
int pci_proc_detach_device(struct pci_dev *dev)
{
- struct proc_dir_entry *e;
-
- if ((e = dev->procent)) {
- remove_proc_entry(e->name, dev->bus->procdir);
- dev->procent = NULL;
- }
+ proc_remove(dev->procent);
+ dev->procent = NULL;
return 0;
}
-#if 0
-int pci_proc_attach_bus(struct pci_bus* bus)
+int pci_proc_detach_bus(struct pci_bus *bus)
{
- struct proc_dir_entry *de = bus->procdir;
-
- if (!proc_initialized)
- return -EACCES;
-
- if (!de) {
- char name[16];
- sprintf(name, "%02x", bus->number);
- de = bus->procdir = proc_mkdir(name, proc_bus_pci_dir);
- if (!de)
- return -ENOMEM;
- }
- return 0;
-}
-#endif /* 0 */
-
-int pci_proc_detach_bus(struct pci_bus* bus)
-{
- struct proc_dir_entry *de = bus->procdir;
- if (de)
- remove_proc_entry(de->name, proc_bus_pci_dir);
+ proc_remove(bus->procdir);
return 0;
}
@@ -466,6 +422,7 @@ static int proc_bus_pci_dev_open(struct inode *inode, struct file *file)
{
return seq_open(file, &proc_bus_pci_devices_op);
}
+
static const struct file_operations proc_bus_pci_dev_operations = {
.owner = THIS_MODULE,
.open = proc_bus_pci_dev_open,
@@ -486,6 +443,4 @@ static int __init pci_proc_init(void)
return 0;
}
-
device_initcall(pci_proc_init);
-
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index f5c63fe9db5..d0f69269eb6 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -9,14 +9,11 @@
*
* Init/reset quirks for USB host controllers should be in the
* USB quirks file, where their drivers can access reuse it.
- *
- * The bridge optimization stuff has been removed. If you really
- * have a silly BIOS which is unable to set your host bridge right,
- * use the PowerTweak utility (see http://powertweak.sourceforge.net).
*/
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -25,97 +22,36 @@
#include <linux/dmi.h>
#include <linux/pci-aspm.h>
#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/ktime.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
/*
- * This quirk function disables memory decoding and releases memory resources
- * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
- * It also rounds up size to specified alignment.
- * Later on, the kernel will assign page-aligned memory resource back
- * to the device.
- */
-static void __devinit quirk_resource_alignment(struct pci_dev *dev)
-{
- int i;
- struct resource *r;
- resource_size_t align, size;
- u16 command;
-
- if (!pci_is_reassigndev(dev))
- return;
-
- if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
- (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
- dev_warn(&dev->dev,
- "Can't reassign resources to host bridge.\n");
- return;
- }
-
- dev_info(&dev->dev,
- "Disabling memory decoding and releasing memory resources.\n");
- pci_read_config_word(dev, PCI_COMMAND, &command);
- command &= ~PCI_COMMAND_MEMORY;
- pci_write_config_word(dev, PCI_COMMAND, command);
-
- align = pci_specified_resource_alignment(dev);
- for (i=0; i < PCI_BRIDGE_RESOURCES; i++) {
- r = &dev->resource[i];
- if (!(r->flags & IORESOURCE_MEM))
- continue;
- size = resource_size(r);
- if (size < align) {
- size = align;
- dev_info(&dev->dev,
- "Rounding up size of resource #%d to %#llx.\n",
- i, (unsigned long long)size);
- }
- r->end = size - 1;
- r->start = 0;
- }
- /* Need to disable bridge's resource window,
- * to enable the kernel to reassign new resource
- * window later on.
- */
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
- (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
- for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
- r = &dev->resource[i];
- if (!(r->flags & IORESOURCE_MEM))
- continue;
- r->end = resource_size(r) - 1;
- r->start = 0;
- }
- pci_disable_bridge_window(dev);
- }
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_resource_alignment);
-
-/*
* Decoding should be disabled for a PCI device during BAR sizing to avoid
* conflict. But doing so may cause problems on host bridge and perhaps other
* key system devices. For devices that need to have mmio decoding always-on,
* we need to set the dev->mmio_always_on bit.
*/
-static void __devinit quirk_mmio_always_on(struct pci_dev *dev)
+static void quirk_mmio_always_on(struct pci_dev *dev)
{
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
- dev->mmio_always_on = 1;
+ dev->mmio_always_on = 1;
}
-DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, quirk_mmio_always_on);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
/* The Mellanox Tavor device gives false positive parity errors
* Mark this device with a broken_parity_status, to allow
* PCI scanning code to "skip" this now blacklisted device.
*/
-static void __devinit quirk_mellanox_tavor(struct pci_dev *dev)
+static void quirk_mellanox_tavor(struct pci_dev *dev)
{
dev->broken_parity_status = 1; /* This device gives false positives */
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor);
-/* Deal with broken BIOS'es that neglect to enable passive release,
+/* Deal with broken BIOSes that neglect to enable passive release,
which can cause problems in combination with the 82441FX/PPro MTRRs */
static void quirk_passive_release(struct pci_dev *dev)
{
@@ -138,15 +74,15 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_p
/* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround
but VIA don't answer queries. If you happen to have good contacts at VIA
- ask them for me please -- Alan
-
- This appears to be BIOS not version dependent. So presumably there is a
+ ask them for me please -- Alan
+
+ This appears to be BIOS not version dependent. So presumably there is a
chipset level fix */
-
-static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev)
+
+static void quirk_isa_dma_hangs(struct pci_dev *dev)
{
if (!isa_dma_bridge_buggy) {
- isa_dma_bridge_buggy=1;
+ isa_dma_bridge_buggy = 1;
dev_info(&dev->dev, "Activating ISA DMA hang workarounds\n");
}
}
@@ -157,7 +93,7 @@ static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
@@ -166,7 +102,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_d
* Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
* for some HT machines to use C4 w/o hanging.
*/
-static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev)
+static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
{
u32 pmbase;
u16 pm1a;
@@ -185,9 +121,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk
/*
* Chipsets where PCI->PCI transfers vanish or hang
*/
-static void __devinit quirk_nopcipci(struct pci_dev *dev)
+static void quirk_nopcipci(struct pci_dev *dev)
{
- if ((pci_pci_problems & PCIPCI_FAIL)==0) {
+ if ((pci_pci_problems & PCIPCI_FAIL) == 0) {
dev_info(&dev->dev, "Disabling direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_FAIL;
}
@@ -195,7 +131,7 @@ static void __devinit quirk_nopcipci(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci);
-static void __devinit quirk_nopciamd(struct pci_dev *dev)
+static void quirk_nopciamd(struct pci_dev *dev)
{
u8 rev;
pci_read_config_byte(dev, 0x08, &rev);
@@ -210,28 +146,28 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopci
/*
* Triton requires workarounds to be used by the drivers
*/
-static void __devinit quirk_triton(struct pci_dev *dev)
+static void quirk_triton(struct pci_dev *dev)
{
- if ((pci_pci_problems&PCIPCI_TRITON)==0) {
+ if ((pci_pci_problems&PCIPCI_TRITON) == 0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_TRITON;
}
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
/*
* VIA Apollo KT133 needs PCI latency patch
* Made according to a windows driver based patch by George E. Breese
* see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
* and http://www.georgebreese.com/net/software/#PCI
- * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for
- * the info on which Mr Breese based his work.
+ * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for
+ * the info on which Mr Breese based his work.
*
* Updated based on further information from the site and also on
- * information provided by VIA
+ * information provided by VIA
*/
static void quirk_vialatency(struct pci_dev *dev)
{
@@ -239,37 +175,37 @@ static void quirk_vialatency(struct pci_dev *dev)
u8 busarb;
/* Ok we have a potential problem chipset here. Now see if we have
a buggy southbridge */
-
+
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
- if (p!=NULL) {
+ if (p != NULL) {
/* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */
/* Check for buggy part revisions */
if (p->revision < 0x40 || p->revision > 0x42)
goto exit;
} else {
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
- if (p==NULL) /* No problem parts */
+ if (p == NULL) /* No problem parts */
goto exit;
/* Check for buggy part revisions */
if (p->revision < 0x10 || p->revision > 0x12)
goto exit;
}
-
+
/*
- * Ok we have the problem. Now set the PCI master grant to
+ * Ok we have the problem. Now set the PCI master grant to
* occur every master grant. The apparent bug is that under high
* PCI load (quite common in Linux of course) you can get data
* loss when the CPU is held off the bus for 3 bus master requests
* This happens to include the IDE controllers....
*
* VIA only apply this fix when an SB Live! is present but under
- * both Linux and Windows this isnt enough, and we have seen
+ * both Linux and Windows this isn't enough, and we have seen
* corruption without SB Live! but with things like 3 UDMA IDE
* controllers. So we ignore that bit of the VIA recommendation..
*/
pci_read_config_byte(dev, 0x76, &busarb);
- /* Set bit 4 and bi 5 of byte 76 to 0x01
+ /* Set bit 4 and bi 5 of byte 76 to 0x01
"Master priority rotation on every PCI master grant */
busarb &= ~(1<<5);
busarb |= (1<<4);
@@ -289,18 +225,18 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_viala
/*
* VIA Apollo VP3 needs ETBF on BT848/878
*/
-static void __devinit quirk_viaetbf(struct pci_dev *dev)
+static void quirk_viaetbf(struct pci_dev *dev)
{
- if ((pci_pci_problems&PCIPCI_VIAETBF)==0) {
+ if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_VIAETBF;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_viaetbf);
-static void __devinit quirk_vsfx(struct pci_dev *dev)
+static void quirk_vsfx(struct pci_dev *dev)
{
- if ((pci_pci_problems&PCIPCI_VSFX)==0) {
+ if ((pci_pci_problems&PCIPCI_VSFX) == 0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_VSFX;
}
@@ -312,40 +248,40 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx)
* that DMA to AGP space. Latency must be set to 0xA and triton
* workaround applied too
* [Info kindly provided by ALi]
- */
-static void __init quirk_alimagik(struct pci_dev *dev)
+ */
+static void quirk_alimagik(struct pci_dev *dev)
{
- if ((pci_pci_problems&PCIPCI_ALIMAGIK)==0) {
+ if ((pci_pci_problems&PCIPCI_ALIMAGIK) == 0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
}
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
/*
* Natoma has some interesting boundary conditions with Zoran stuff
* at least
*/
-static void __devinit quirk_natoma(struct pci_dev *dev)
+static void quirk_natoma(struct pci_dev *dev)
{
- if ((pci_pci_problems&PCIPCI_NATOMA)==0) {
+ if ((pci_pci_problems&PCIPCI_NATOMA) == 0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_NATOMA;
}
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
/*
* This chip can cause PCI parity errors if config register 0xA0 is read
* while DMAs are occurring.
*/
-static void __devinit quirk_citrine(struct pci_dev *dev)
+static void quirk_citrine(struct pci_dev *dev)
{
dev->cfg_size = 0xA0;
}
@@ -355,11 +291,12 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_cit
* S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
* If it's needed, re-allocate the region.
*/
-static void __devinit quirk_s3_64M(struct pci_dev *dev)
+static void quirk_s3_64M(struct pci_dev *dev)
{
struct resource *r = &dev->resource[0];
if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
+ r->flags |= IORESOURCE_UNSET;
r->start = 0;
r->end = 0x3ffffff;
}
@@ -373,46 +310,46 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
* BAR0 should be 8 bytes; instead, it may be set to something like 8k
* (which conflicts w/ BAR1's memory range).
*/
-static void __devinit quirk_cs5536_vsa(struct pci_dev *dev)
+static void quirk_cs5536_vsa(struct pci_dev *dev)
{
if (pci_resource_len(dev, 0) != 8) {
struct resource *res = &dev->resource[0];
res->end = res->start + 8 - 1;
- dev_info(&dev->dev, "CS5536 ISA bridge bug detected "
- "(incorrect header); workaround applied.\n");
+ dev_info(&dev->dev, "CS5536 ISA bridge bug detected (incorrect header); workaround applied\n");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
-static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region,
- unsigned size, int nr, const char *name)
+static void quirk_io_region(struct pci_dev *dev, int port,
+ unsigned size, int nr, const char *name)
{
- region &= ~(size-1);
- if (region) {
- struct pci_bus_region bus_region;
- struct resource *res = dev->resource + nr;
+ u16 region;
+ struct pci_bus_region bus_region;
+ struct resource *res = dev->resource + nr;
- res->name = pci_name(dev);
- res->start = region;
- res->end = region + size - 1;
- res->flags = IORESOURCE_IO;
+ pci_read_config_word(dev, port, &region);
+ region &= ~(size - 1);
- /* Convert from PCI bus to resource space. */
- bus_region.start = res->start;
- bus_region.end = res->end;
- pcibios_bus_to_resource(dev, res, &bus_region);
+ if (!region)
+ return;
- if (pci_claim_resource(dev, nr) == 0)
- dev_info(&dev->dev, "quirk: %pR claimed by %s\n",
- res, name);
- }
-}
+ res->name = pci_name(dev);
+ res->flags = IORESOURCE_IO;
+
+ /* Convert from PCI bus to resource space */
+ bus_region.start = region;
+ bus_region.end = region + size - 1;
+ pcibios_bus_to_resource(dev->bus, res, &bus_region);
+
+ if (!pci_claim_resource(dev, nr))
+ dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name);
+}
/*
* ATI Northbridge setups MCE the processor if you even
* read somewhere between 0x3b0->0x3bb or read 0x3d3
*/
-static void __devinit quirk_ati_exploding_mce(struct pci_dev *dev)
+static void quirk_ati_exploding_mce(struct pci_dev *dev)
{
dev_info(&dev->dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n");
/* Mae rhaid i ni beidio ag edrych ar y lleoliadiau I/O hyn */
@@ -432,14 +369,10 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
* 0xE0 (64 bytes of ACPI registers)
* 0xE2 (32 bytes of SMB registers)
*/
-static void __devinit quirk_ali7101_acpi(struct pci_dev *dev)
+static void quirk_ali7101_acpi(struct pci_dev *dev)
{
- u16 region;
-
- pci_read_config_word(dev, 0xE0, &region);
- quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
- pci_read_config_word(dev, 0xE2, &region);
- quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
+ quirk_io_region(dev, 0xE0, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
+ quirk_io_region(dev, 0xE2, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi);
@@ -463,10 +396,11 @@ static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int p
/*
* For now we only print it out. Eventually we'll want to
* reserve it (at least if it's in the 0x1000+ range), but
- * let's get enough confirmation reports first.
+ * let's get enough confirmation reports first.
*/
base &= -size;
- dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1);
+ dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base,
+ base + size - 1);
}
static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
@@ -488,10 +422,11 @@ static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int
}
/*
* For now we only print it out. Eventually we'll want to
- * reserve it, but let's get enough confirmation reports first.
+ * reserve it, but let's get enough confirmation reports first.
*/
base &= -size;
- dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1);
+ dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base,
+ base + size - 1);
}
/*
@@ -500,14 +435,12 @@ static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int
* 0x90 (16 bytes of SMB registers)
* and a few strange programmable PIIX4 device resources.
*/
-static void __devinit quirk_piix4_acpi(struct pci_dev *dev)
+static void quirk_piix4_acpi(struct pci_dev *dev)
{
- u32 region, res_a;
+ u32 res_a;
- pci_read_config_dword(dev, 0x40, &region);
- quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
- pci_read_config_dword(dev, 0x90, &region);
- quirk_io_region(dev, region, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
+ quirk_io_region(dev, 0x40, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
+ quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
/* Device resource A has enables for some of the other ones */
pci_read_config_dword(dev, 0x5c, &res_a);
@@ -533,20 +466,43 @@ static void __devinit quirk_piix4_acpi(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi);
+#define ICH_PMBASE 0x40
+#define ICH_ACPI_CNTL 0x44
+#define ICH4_ACPI_EN 0x10
+#define ICH6_ACPI_EN 0x80
+#define ICH4_GPIOBASE 0x58
+#define ICH4_GPIO_CNTL 0x5c
+#define ICH4_GPIO_EN 0x10
+#define ICH6_GPIOBASE 0x48
+#define ICH6_GPIO_CNTL 0x4c
+#define ICH6_GPIO_EN 0x10
+
/*
* ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at
* 0x40 (128 bytes of ACPI, GPIO & TCO registers)
* 0x58 (64 bytes of GPIO I/O space)
*/
-static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev)
+static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
{
- u32 region;
+ u8 enable;
- pci_read_config_dword(dev, 0x40, &region);
- quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO");
-
- pci_read_config_dword(dev, 0x58, &region);
- quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO");
+ /*
+ * The check for PCIBIOS_MIN_IO is to ensure we won't create a conflict
+ * with low legacy (and fixed) ports. We don't know the decoding
+ * priority and can't tell whether the legacy device or the one created
+ * here is really at that address. This happens on boards with broken
+ * BIOSes.
+ */
+
+ pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
+ if (enable & ICH4_ACPI_EN)
+ quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
+ "ICH4 ACPI/GPIO/TCO");
+
+ pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable);
+ if (enable & ICH4_GPIO_EN)
+ quirk_io_region(dev, ICH4_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
+ "ICH4 GPIO");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi);
@@ -559,18 +515,22 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi);
-static void __devinit ich6_lpc_acpi_gpio(struct pci_dev *dev)
+static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
{
- u32 region;
+ u8 enable;
- pci_read_config_dword(dev, 0x40, &region);
- quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH6 ACPI/GPIO/TCO");
+ pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
+ if (enable & ICH6_ACPI_EN)
+ quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
+ "ICH6 ACPI/GPIO/TCO");
- pci_read_config_dword(dev, 0x48, &region);
- quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO");
+ pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable);
+ if (enable & ICH6_GPIO_EN)
+ quirk_io_region(dev, ICH6_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
+ "ICH6 GPIO");
}
-static void __devinit ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize)
+static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize)
{
u32 val;
u32 size, base;
@@ -598,7 +558,7 @@ static void __devinit ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
}
-static void __devinit quirk_ich6_lpc(struct pci_dev *dev)
+static void quirk_ich6_lpc(struct pci_dev *dev)
{
/* Shared ACPI/GPIO decode with all ICH6+ */
ich6_lpc_acpi_gpio(dev);
@@ -610,7 +570,7 @@ static void __devinit quirk_ich6_lpc(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
-static void __devinit ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name)
+static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name)
{
u32 val;
u32 mask, base;
@@ -634,9 +594,9 @@ static void __devinit ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
}
/* ICH7-10 has the same common LPC generic IO decode registers */
-static void __devinit quirk_ich7_lpc(struct pci_dev *dev)
+static void quirk_ich7_lpc(struct pci_dev *dev)
{
- /* We share the common ACPI/DPIO decode with ICH6 */
+ /* We share the common ACPI/GPIO decode with ICH6 */
ich6_lpc_acpi_gpio(dev);
/* And have 4 ICH7+ generic decodes */
@@ -663,15 +623,11 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, qui
* VIA ACPI: One IO region pointed to by longword at
* 0x48 or 0x20 (256 bytes of ACPI registers)
*/
-static void __devinit quirk_vt82c586_acpi(struct pci_dev *dev)
+static void quirk_vt82c586_acpi(struct pci_dev *dev)
{
- u32 region;
-
- if (dev->revision & 0x10) {
- pci_read_config_dword(dev, 0x48, &region);
- region &= PCI_BASE_ADDRESS_IO_MASK;
- quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES, "vt82c586 ACPI");
- }
+ if (dev->revision & 0x10)
+ quirk_io_region(dev, 0x48, 256, PCI_BRIDGE_RESOURCES,
+ "vt82c586 ACPI");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi);
@@ -681,20 +637,14 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt
* 0x70 (128 bytes of hardware monitoring register)
* 0x90 (16 bytes of SMB registers)
*/
-static void __devinit quirk_vt82c686_acpi(struct pci_dev *dev)
+static void quirk_vt82c686_acpi(struct pci_dev *dev)
{
- u16 hm;
- u32 smb;
-
quirk_vt82c586_acpi(dev);
- pci_read_config_word(dev, 0x70, &hm);
- hm &= PCI_BASE_ADDRESS_IO_MASK;
- quirk_io_region(dev, hm, 128, PCI_BRIDGE_RESOURCES + 1, "vt82c686 HW-mon");
+ quirk_io_region(dev, 0x70, 128, PCI_BRIDGE_RESOURCES+1,
+ "vt82c686 HW-mon");
- pci_read_config_dword(dev, 0x90, &smb);
- smb &= PCI_BASE_ADDRESS_IO_MASK;
- quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 2, "vt82c686 SMB");
+ quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+2, "vt82c686 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi);
@@ -703,17 +653,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt
* 0x88 (128 bytes of power management registers)
* 0xd0 (16 bytes of SMB registers)
*/
-static void __devinit quirk_vt8235_acpi(struct pci_dev *dev)
+static void quirk_vt8235_acpi(struct pci_dev *dev)
{
- u16 pm, smb;
-
- pci_read_config_word(dev, 0x88, &pm);
- pm &= PCI_BASE_ADDRESS_IO_MASK;
- quirk_io_region(dev, pm, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
-
- pci_read_config_word(dev, 0xd0, &smb);
- smb &= PCI_BASE_ADDRESS_IO_MASK;
- quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 1, "vt8235 SMB");
+ quirk_io_region(dev, 0x88, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
+ quirk_io_region(dev, 0xd0, 16, PCI_BRIDGE_RESOURCES+1, "vt8235 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
@@ -721,13 +664,12 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235
* TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast back-to-back:
* Disable fast back-to-back on the secondary bus segment
*/
-static void __devinit quirk_xio2000a(struct pci_dev *dev)
+static void quirk_xio2000a(struct pci_dev *dev)
{
struct pci_dev *pdev;
u16 command;
- dev_warn(&dev->dev, "TI XIO2000a quirk detected; "
- "secondary bus fast back-to-back transfers disabled\n");
+ dev_warn(&dev->dev, "TI XIO2000a quirk detected; secondary bus fast back-to-back transfers disabled\n");
list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
pci_read_config_word(pdev, PCI_COMMAND, &command);
if (command & PCI_COMMAND_FAST_BACK)
@@ -737,7 +679,7 @@ static void __devinit quirk_xio2000a(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
quirk_xio2000a);
-#ifdef CONFIG_X86_IO_APIC
+#ifdef CONFIG_X86_IO_APIC
#include <asm/io_apic.h>
@@ -751,23 +693,23 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
static void quirk_via_ioapic(struct pci_dev *dev)
{
u8 tmp;
-
+
if (nr_ioapics < 1)
tmp = 0; /* nothing routed to external APIC */
else
tmp = 0x1f; /* all known bits (4-0) routed to external APIC */
-
+
dev_info(&dev->dev, "%sbling VIA external APIC routing\n",
tmp == 0 ? "Disa" : "Ena");
/* Offset 0x58: External APIC IRQ output control */
- pci_write_config_byte (dev, 0x58, tmp);
+ pci_write_config_byte(dev, 0x58, tmp);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
/*
- * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit.
+ * VIA 8237: Some BIOSes don't set the 'Bypass APIC De-Assert Message' Bit.
* This leads to doubled level interrupt rates.
* Set this bit to get rid of cycle wastage.
* Otherwise uncritical.
@@ -795,7 +737,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk
* noapic specified. For the moment we assume it's the erratum. We may be wrong
* of course. However the advice is demonstrably good even if so..
*/
-static void __devinit quirk_amd_ioapic(struct pci_dev *dev)
+static void quirk_amd_ioapic(struct pci_dev *dev)
{
if (dev->revision >= 0x02) {
dev_warn(&dev->dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n");
@@ -804,7 +746,7 @@ static void __devinit quirk_amd_ioapic(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic);
-static void __init quirk_ioapic_rmw(struct pci_dev *dev)
+static void quirk_ioapic_rmw(struct pci_dev *dev)
{
if (dev->devfn == 0 && dev->bus->number == 0)
sis_apic_bug = 1;
@@ -816,11 +758,11 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw);
* Some settings of MMRBC can lead to data corruption so block changes.
* See AMD 8131 HyperTransport PCI-X Tunnel Revision Guide
*/
-static void __init quirk_amd_8131_mmrbc(struct pci_dev *dev)
+static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
{
if (dev->subordinate && dev->revision <= 0x12) {
- dev_info(&dev->dev, "AMD8131 rev %x detected; "
- "disabling PCI-X MMRBC\n", dev->revision);
+ dev_info(&dev->dev, "AMD8131 rev %x detected; disabling PCI-X MMRBC\n",
+ dev->revision);
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
}
}
@@ -834,7 +776,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_
* value of the ACPI SCI interrupt is only done for convenience.
* -jgarzik
*/
-static void __devinit quirk_via_acpi(struct pci_dev *d)
+static void quirk_via_acpi(struct pci_dev *d)
{
/*
* VIA ACPI device: SCI IRQ line in PCI config byte 0x42
@@ -941,7 +883,7 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink);
* We need to switch it off to be able to recognize the real
* type of the chip.
*/
-static void __devinit quirk_vt82c598_id(struct pci_dev *dev)
+static void quirk_vt82c598_id(struct pci_dev *dev)
{
pci_write_config_byte(dev, 0xfc, 0);
pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device);
@@ -956,12 +898,12 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt
*/
static void quirk_cardbus_legacy(struct pci_dev *dev)
{
- if ((PCI_CLASS_BRIDGE_CARDBUS << 8) ^ dev->class)
- return;
pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy);
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
+DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
/*
* Following the PCI ordering rules is optional on the AMD762. I'm not
@@ -974,12 +916,12 @@ static void quirk_amd_ordering(struct pci_dev *dev)
{
u32 pcic;
pci_read_config_dword(dev, 0x4C, &pcic);
- if ((pcic&6)!=6) {
+ if ((pcic & 6) != 6) {
pcic |= 6;
dev_warn(&dev->dev, "BIOS failed to enable PCI standards compliance; fixing this error\n");
pci_write_config_dword(dev, 0x4C, pcic);
pci_read_config_dword(dev, 0x84, &pcic);
- pcic |= (1<<23); /* Required in this mode */
+ pcic |= (1 << 23); /* Required in this mode */
pci_write_config_dword(dev, 0x84, pcic);
}
}
@@ -993,9 +935,11 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C
* assigned to it. We force a larger allocation to ensure that
* nothing gets put too close to it.
*/
-static void __devinit quirk_dunord ( struct pci_dev * dev )
+static void quirk_dunord(struct pci_dev *dev)
{
- struct resource *r = &dev->resource [1];
+ struct resource *r = &dev->resource[1];
+
+ r->flags |= IORESOURCE_UNSET;
r->start = 0;
r->end = 0xffffff;
}
@@ -1007,7 +951,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk
* in the ProgIf. Unfortunately, the ProgIf value is wrong - 0x80
* instead of 0x01.
*/
-static void __devinit quirk_transparent_bridge(struct pci_dev *dev)
+static void quirk_transparent_bridge(struct pci_dev *dev)
{
dev->transparent = 1;
}
@@ -1023,11 +967,13 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge)
static void quirk_mediagx_master(struct pci_dev *dev)
{
u8 reg;
+
pci_read_config_byte(dev, 0x41, &reg);
if (reg & 2) {
reg &= ~2;
- dev_info(&dev->dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n", reg);
- pci_write_config_byte(dev, 0x41, reg);
+ dev_info(&dev->dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n",
+ reg);
+ pci_write_config_byte(dev, 0x41, reg);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
@@ -1041,7 +987,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, qu
static void quirk_disable_pxb(struct pci_dev *pdev)
{
u16 config;
-
+
if (pdev->revision != 0x04) /* Only C0 requires this */
return;
pci_read_config_word(pdev, 0x40, &config);
@@ -1054,7 +1000,7 @@ static void quirk_disable_pxb(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
-static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev)
+static void quirk_amd_ide_mode(struct pci_dev *pdev)
{
/* set SBX00/Hudson-2 SATA in IDE mode to AHCI mode */
u8 tmp;
@@ -1077,11 +1023,13 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
/*
* Serverworks CSB5 IDE does not fully support native mode
*/
-static void __devinit quirk_svwks_csb5ide(struct pci_dev *pdev)
+static void quirk_svwks_csb5ide(struct pci_dev *pdev)
{
u8 prog;
pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
@@ -1097,7 +1045,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB
/*
* Intel 82801CAM ICH3-M datasheet says IDE modes must be the same
*/
-static void __init quirk_ide_samemode(struct pci_dev *pdev)
+static void quirk_ide_samemode(struct pci_dev *pdev)
{
u8 prog;
@@ -1116,24 +1064,27 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, qui
* Some ATA devices break if put into D3
*/
-static void __devinit quirk_no_ata_d3(struct pci_dev *pdev)
+static void quirk_no_ata_d3(struct pci_dev *pdev)
{
- /* Quirk the legacy ATA devices only. The AHCI ones are ok */
- if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE)
- pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
+ pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, quirk_no_ata_d3);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, quirk_no_ata_d3);
+/* Quirk the legacy ATA devices only. The AHCI ones are ok */
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
/* ALi loses some register settings that we cannot then restore */
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, quirk_no_ata_d3);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
/* VIA comes back fine but we need to keep it alive or ACPI GTM failures
occur when mode detecting */
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_no_ata_d3);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
/* This was originally an Alpha specific thing, but it really fits here.
* The i82375 PCI/EISA bridge appears as non-classified. Fix that.
*/
-static void __init quirk_eisa_bridge(struct pci_dev *dev)
+static void quirk_eisa_bridge(struct pci_dev *dev)
{
dev->class = PCI_CLASS_BRIDGE_EISA << 8;
}
@@ -1144,11 +1095,11 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_e
* On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
* is not activated. The myth is that Asus said that they do not want the
* users to be irritated by just another PCI Device in the Win98 device
- * manager. (see the file prog/hotplug/README.p4b in the lm_sensors
+ * manager. (see the file prog/hotplug/README.p4b in the lm_sensors
* package 2.7.0 for details)
*
- * The SMBus PCI Device can be activated by setting a bit in the ICH LPC
- * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it
+ * The SMBus PCI Device can be activated by setting a bit in the ICH LPC
+ * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it
* becomes necessary to do this tweak in two steps -- the chosen trigger
* is either the Host bridge (preferred) or on-board VGA controller.
*
@@ -1167,11 +1118,11 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_e
*/
static int asus_hides_smbus;
-static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
+static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
{
if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
- switch(dev->subsystem_device) {
+ switch (dev->subsystem_device) {
case 0x8025: /* P4B-LX */
case 0x8070: /* P4B */
case 0x8088: /* P4B533 */
@@ -1179,14 +1130,14 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
- switch(dev->subsystem_device) {
+ switch (dev->subsystem_device) {
case 0x80b1: /* P4GE-V */
case 0x80b2: /* P4PE */
case 0x8093: /* P4B533-V */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
- switch(dev->subsystem_device) {
+ switch (dev->subsystem_device) {
case 0x8030: /* P4T533 */
asus_hides_smbus = 1;
}
@@ -1226,7 +1177,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
}
} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
- switch(dev->subsystem_device) {
+ switch (dev->subsystem_device) {
case 0x088C: /* HP Compaq nc8000 */
case 0x0890: /* HP Compaq nc6000 */
asus_hides_smbus = 1;
@@ -1243,20 +1194,20 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
case 0x12bf: /* HP xw4100 */
asus_hides_smbus = 1;
}
- } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
- if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
- switch(dev->subsystem_device) {
- case 0xC00C: /* Samsung P35 notebook */
- asus_hides_smbus = 1;
- }
+ } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
+ if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
+ switch (dev->subsystem_device) {
+ case 0xC00C: /* Samsung P35 notebook */
+ asus_hides_smbus = 1;
+ }
} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
- switch(dev->subsystem_device) {
+ switch (dev->subsystem_device) {
case 0x0058: /* Compaq Evo N620c */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3)
- switch(dev->subsystem_device) {
+ switch (dev->subsystem_device) {
case 0xB16C: /* Compaq Deskpro EP 401963-001 (PCA# 010174) */
/* Motherboard doesn't have Host bridge
* subvendor/subdevice IDs, therefore checking
@@ -1264,7 +1215,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
- switch(dev->subsystem_device) {
+ switch (dev->subsystem_device) {
case 0x00b8: /* Compaq Evo D510 CMT */
case 0x00b9: /* Compaq Evo D510 SFF */
case 0x00ba: /* Compaq Evo D510 USDT */
@@ -1303,7 +1254,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asu
static void asus_hides_smbus_lpc(struct pci_dev *dev)
{
u16 val;
-
+
if (likely(!asus_hides_smbus))
return;
@@ -1312,7 +1263,8 @@ static void asus_hides_smbus_lpc(struct pci_dev *dev)
pci_write_config_word(dev, 0xF2, val & (~0x8));
pci_read_config_word(dev, 0xF2, &val);
if (val & 0x8)
- dev_info(&dev->dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n", val);
+ dev_info(&dev->dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n",
+ val);
else
dev_info(&dev->dev, "Enabled i801 SMBus device\n");
}
@@ -1460,7 +1412,8 @@ static void asus_hides_ac97_lpc(struct pci_dev *dev)
pci_write_config_byte(dev, 0x50, val & (~0xc0));
pci_read_config_byte(dev, 0x50, &val);
if (val & 0xc0)
- dev_info(&dev->dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n", val);
+ dev_info(&dev->dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n",
+ val);
else
dev_info(&dev->dev, "Enabled onboard AC97/MC97 devices\n");
}
@@ -1550,7 +1503,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB3
#endif
#ifdef CONFIG_X86_IO_APIC
-static void __init quirk_alder_ioapic(struct pci_dev *pdev)
+static void quirk_alder_ioapic(struct pci_dev *pdev)
{
int i;
@@ -1565,15 +1518,13 @@ static void __init quirk_alder_ioapic(struct pci_dev *pdev)
/* The next five BARs all seem to be rubbish, so just clean
* them out */
- for (i=1; i < 6; i++) {
+ for (i = 1; i < 6; i++)
memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
- }
-
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
#endif
-static void __devinit quirk_pcie_mch(struct pci_dev *pdev)
+static void quirk_pcie_mch(struct pci_dev *pdev)
{
pci_msi_off(pdev);
pdev->no_msi = 1;
@@ -1587,7 +1538,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quir
* It's possible for the MSI to get corrupted if shpc and acpi
* are used together on certain PXH-based systems.
*/
-static void __devinit quirk_pcie_pxh(struct pci_dev *dev)
+static void quirk_pcie_pxh(struct pci_dev *dev)
{
pci_msi_off(dev);
dev->no_msi = 1;
@@ -1603,7 +1554,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pci
* Some Intel PCI Express chipsets have trouble with downstream
* device power management.
*/
-static void quirk_intel_pcie_pm(struct pci_dev * dev)
+static void quirk_intel_pcie_pm(struct pci_dev *dev)
{
pci_pm_d3_delay = 120;
dev->no_d1d2 = 1;
@@ -1690,8 +1641,8 @@ static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
/*
* disable boot interrupts on HT-1000
@@ -1723,8 +1674,8 @@ static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
/*
* disable boot interrupts on AMD and ATI chipsets
@@ -1772,16 +1723,16 @@ static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
if (!pci_config_word) {
- dev_info(&dev->dev, "boot interrupts on device [%04x:%04x] "
- "already disabled\n", dev->vendor, dev->device);
+ dev_info(&dev->dev, "boot interrupts on device [%04x:%04x] already disabled\n",
+ dev->vendor, dev->device);
return;
}
pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
#endif /* CONFIG_X86_IO_APIC */
/*
@@ -1789,11 +1740,12 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, qui
* but the PIO transfers won't work if BAR0 falls at the odd 8 bytes.
* Re-allocate the region if needed...
*/
-static void __init quirk_tc86c001_ide(struct pci_dev *dev)
+static void quirk_tc86c001_ide(struct pci_dev *dev)
{
struct resource *r = &dev->resource[0];
if (r->start & 0x8) {
+ r->flags |= IORESOURCE_UNSET;
r->start = 0;
r->end = 0xf;
}
@@ -1802,7 +1754,46 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
quirk_tc86c001_ide);
-static void __devinit quirk_netmos(struct pci_dev *dev)
+/*
+ * PLX PCI 9050 PCI Target bridge controller has an errata that prevents the
+ * local configuration registers accessible via BAR0 (memory) or BAR1 (i/o)
+ * being read correctly if bit 7 of the base address is set.
+ * The BAR0 or BAR1 region may be disabled (size 0) or enabled (size 128).
+ * Re-allocate the regions to a 256-byte boundary if necessary.
+ */
+static void quirk_plx_pci9050(struct pci_dev *dev)
+{
+ unsigned int bar;
+
+ /* Fixed in revision 2 (PCI 9052). */
+ if (dev->revision >= 2)
+ return;
+ for (bar = 0; bar <= 1; bar++)
+ if (pci_resource_len(dev, bar) == 0x80 &&
+ (pci_resource_start(dev, bar) & 0x80)) {
+ struct resource *r = &dev->resource[bar];
+ dev_info(&dev->dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
+ bar);
+ r->flags |= IORESOURCE_UNSET;
+ r->start = 0;
+ r->end = 0xff;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ quirk_plx_pci9050);
+/*
+ * The following Meilhaus (vendor ID 0x1402) device IDs (amongst others)
+ * may be using the PLX PCI 9050: 0x0630, 0x0940, 0x0950, 0x0960, 0x100b,
+ * 0x1400, 0x140a, 0x140b, 0x14e0, 0x14ea, 0x14eb, 0x1604, 0x1608, 0x160c,
+ * 0x168f, 0x2000, 0x2600, 0x3000, 0x810a, 0x810b.
+ *
+ * Currently, device IDs 0x2000 and 0x2600 are used by the Comedi "me_daq"
+ * driver.
+ */
+DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2000, quirk_plx_pci9050);
+DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2600, quirk_plx_pci9050);
+
+static void quirk_netmos(struct pci_dev *dev)
{
unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
unsigned int num_serial = dev->subsystem_device & 0xf;
@@ -1827,25 +1818,22 @@ static void __devinit quirk_netmos(struct pci_dev *dev)
case PCI_DEVICE_ID_NETMOS_9745:
case PCI_DEVICE_ID_NETMOS_9845:
case PCI_DEVICE_ID_NETMOS_9855:
- if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_SERIAL &&
- num_parallel) {
- dev_info(&dev->dev, "Netmos %04x (%u parallel, "
- "%u serial); changing class SERIAL to OTHER "
- "(use parport_serial)\n",
+ if (num_parallel) {
+ dev_info(&dev->dev, "Netmos %04x (%u parallel, %u serial); changing class SERIAL to OTHER (use parport_serial)\n",
dev->device, num_parallel, num_serial);
dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
(dev->class & 0xff);
}
}
}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos);
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
-static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
+static void quirk_e100_interrupt(struct pci_dev *dev)
{
u16 command, pmcsr;
u8 __iomem *csr;
u8 cmd_hi;
- int pm;
switch (dev->device) {
/* PCI IDs taken from drivers/net/e100.c */
@@ -1883,9 +1871,8 @@ static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
* Check that the device is in the D0 power state. If it's not,
* there is no point to look any further.
*/
- pm = pci_find_capability(dev, PCI_CAP_ID_PM);
- if (pm) {
- pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
+ if (dev->pm_cap) {
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0)
return;
}
@@ -1899,20 +1886,20 @@ static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
cmd_hi = readb(csr + 3);
if (cmd_hi == 0) {
- dev_warn(&dev->dev, "Firmware left e100 interrupts enabled; "
- "disabling\n");
+ dev_warn(&dev->dev, "Firmware left e100 interrupts enabled; disabling\n");
writeb(1, csr + 3);
}
iounmap(csr);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ PCI_CLASS_NETWORK_ETHERNET, 8, quirk_e100_interrupt);
/*
* The 82575 and 82598 may experience data corruption issues when transitioning
* out of L0S. To prevent this we need to disable L0S on the pci-e link
*/
-static void __devinit quirk_disable_aspm_l0s(struct pci_dev *dev)
+static void quirk_disable_aspm_l0s(struct pci_dev *dev)
{
dev_info(&dev->dev, "Disabling L0s\n");
pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
@@ -1932,7 +1919,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
-static void __devinit fixup_rev1_53c810(struct pci_dev* dev)
+static void fixup_rev1_53c810(struct pci_dev *dev)
{
/* rev 1 ncr53c810 chips don't set the class at all which means
* they don't get their resources remapped. Fix that here.
@@ -1946,56 +1933,19 @@ static void __devinit fixup_rev1_53c810(struct pci_dev* dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
/* Enable 1k I/O space granularity on the Intel P64H2 */
-static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev)
+static void quirk_p64h2_1k_io(struct pci_dev *dev)
{
u16 en1k;
- u8 io_base_lo, io_limit_lo;
- unsigned long base, limit;
- struct resource *res = dev->resource + PCI_BRIDGE_RESOURCES;
pci_read_config_word(dev, 0x40, &en1k);
if (en1k & 0x200) {
dev_info(&dev->dev, "Enable I/O Space to 1KB granularity\n");
-
- pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
- pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
- base = (io_base_lo & (PCI_IO_RANGE_MASK | 0x0c)) << 8;
- limit = (io_limit_lo & (PCI_IO_RANGE_MASK | 0x0c)) << 8;
-
- if (base <= limit) {
- res->start = base;
- res->end = limit + 0x3ff;
- }
+ dev->io_window_1k = 1;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
-/* Fix the IOBL_ADR for 1k I/O space granularity on the Intel P64H2
- * The IOBL_ADR gets re-written to 4k boundaries in pci_setup_bridge()
- * in drivers/pci/setup-bus.c
- */
-static void __devinit quirk_p64h2_1k_io_fix_iobl(struct pci_dev *dev)
-{
- u16 en1k, iobl_adr, iobl_adr_1k;
- struct resource *res = dev->resource + PCI_BRIDGE_RESOURCES;
-
- pci_read_config_word(dev, 0x40, &en1k);
-
- if (en1k & 0x200) {
- pci_read_config_word(dev, PCI_IO_BASE, &iobl_adr);
-
- iobl_adr_1k = iobl_adr | (res->start >> 8) | (res->end & 0xfc00);
-
- if (iobl_adr != iobl_adr_1k) {
- dev_info(&dev->dev, "Fixing P64H2 IOBL_ADR from 0x%x to 0x%x for 1KB granularity\n",
- iobl_adr,iobl_adr_1k);
- pci_write_config_word(dev, PCI_IO_BASE, iobl_adr_1k);
- }
- }
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io_fix_iobl);
-
/* Under some circumstances, AER is not linked with extended capabilities.
* Force it to be linked by setting the corresponding control bit in the
* config space.
@@ -2006,8 +1956,7 @@ static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
if (!(b & 0x20)) {
pci_write_config_byte(dev, 0xf41, b | 0x20);
- dev_info(&dev->dev,
- "Linking AER extended capability\n");
+ dev_info(&dev->dev, "Linking AER extended capability\n");
}
}
}
@@ -2016,7 +1965,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
quirk_nvidia_ck804_pcie_aer_ext_cap);
-static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
+static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
{
/*
* Disable PCI Bus Parking and PCI Master read caching on CX700
@@ -2045,8 +1994,7 @@ static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
/* Turn off PCI Bus Parking */
pci_write_config_byte(dev, 0x76, b ^ 0x40);
- dev_info(&dev->dev,
- "Disabling VIA CX700 PCI parking\n");
+ dev_info(&dev->dev, "Disabling VIA CX700 PCI parking\n");
}
}
@@ -2061,8 +2009,7 @@ static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
/* Disable "Read FIFO Timer" */
pci_write_config_byte(dev, 0x77, 0x0);
- dev_info(&dev->dev,
- "Disabling VIA CX700 PCI caching\n");
+ dev_info(&dev->dev, "Disabling VIA CX700 PCI caching\n");
}
}
}
@@ -2079,7 +2026,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_c
* We believe that it is legal to read beyond the end tag and
* therefore the solution is to limit the read/write length.
*/
-static void __devinit quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
+static void quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
{
/*
* Only disable the VPD capability for 5706, 5706S, 5708,
@@ -2115,13 +2062,31 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5709S,
quirk_brcm_570x_limit_vpd);
+static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
+{
+ u32 rev;
+
+ pci_read_config_dword(dev, 0xf4, &rev);
+
+ /* Only CAP the MRRS if the device is a 5719 A0 */
+ if (rev == 0x05719000) {
+ int readrq = pcie_get_readrq(dev);
+ if (readrq > 2048)
+ pcie_set_readrq(dev, 2048);
+ }
+}
+
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_TIGON3_5719,
+ quirk_brcm_5719_limit_mrrs);
+
/* Originally in EDAC sources for i82875P:
* Intel tells BIOS developers to hide device 6 which
* configures the overflow device access containing
* the DRBs - this is where we expose device 6.
* http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
*/
-static void __devinit quirk_unhide_mch_dev6(struct pci_dev *dev)
+static void quirk_unhide_mch_dev6(struct pci_dev *dev)
{
u8 reg;
@@ -2136,15 +2101,33 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
quirk_unhide_mch_dev6);
+#ifdef CONFIG_TILEPRO
+/*
+ * The Tilera TILEmpower tilepro platform needs to set the link speed
+ * to 2.5GT(Giga-Transfers)/s (Gen 1). The default link speed
+ * setting is 5GT/s (Gen 2). 0x98 is the Link Control2 PCIe
+ * capability register of the PEX8624 PCIe switch. The switch
+ * supports link speed auto negotiation, but falsely sets
+ * the link speed to 5GT/s.
+ */
+static void quirk_tile_plx_gen1(struct pci_dev *dev)
+{
+ if (tile_plx_gen1) {
+ pci_write_config_dword(dev, 0x98, 0x1);
+ mdelay(50);
+ }
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1);
+#endif /* CONFIG_TILEPRO */
#ifdef CONFIG_PCI_MSI
/* Some chipsets do not support MSI. We cannot easily rely on setting
* PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
- * some other busses controlled by the chipset even if Linux is not
- * aware of it. Instead of setting the flag on all busses in the
+ * some other buses controlled by the chipset even if Linux is not
+ * aware of it. Instead of setting the flag on all buses in the
* machine, simply disable MSI globally.
*/
-static void __init quirk_disable_all_msi(struct pci_dev *dev)
+static void quirk_disable_all_msi(struct pci_dev *dev)
{
pci_no_msi();
dev_warn(&dev->dev, "MSI quirk detected; MSI disabled\n");
@@ -2158,11 +2141,10 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disab
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
/* Disable MSI on chipsets that are known to not support it */
-static void __devinit quirk_disable_msi(struct pci_dev *dev)
+static void quirk_disable_msi(struct pci_dev *dev)
{
if (dev->subordinate) {
- dev_warn(&dev->dev, "MSI quirk detected; "
- "subordinate MSI disabled\n");
+ dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n");
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
}
}
@@ -2176,7 +2158,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
* we use the possible vendor/device IDs of the host bridge for the
* declared quirk, and search for the APC bridge by slot number.
*/
-static void __devinit quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
+static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
{
struct pci_dev *apc_bridge;
@@ -2192,7 +2174,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
/* Go through the list of Hypertransport capabilities and
* return 1 if a HT MSI capability is found and enabled */
-static int __devinit msi_ht_cap_enabled(struct pci_dev *dev)
+static int msi_ht_cap_enabled(struct pci_dev *dev)
{
int pos, ttl = 48;
@@ -2201,8 +2183,7 @@ static int __devinit msi_ht_cap_enabled(struct pci_dev *dev)
u8 flags;
if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
- &flags) == 0)
- {
+ &flags) == 0) {
dev_info(&dev->dev, "Found %s HT MSI Mapping\n",
flags & HT_MSI_FLAGS_ENABLE ?
"enabled" : "disabled");
@@ -2216,11 +2197,10 @@ static int __devinit msi_ht_cap_enabled(struct pci_dev *dev)
}
/* Check the hypertransport MSI mapping to know whether MSI is enabled or not */
-static void __devinit quirk_msi_ht_cap(struct pci_dev *dev)
+static void quirk_msi_ht_cap(struct pci_dev *dev)
{
if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
- dev_warn(&dev->dev, "MSI quirk detected; "
- "subordinate MSI disabled\n");
+ dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n");
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
}
}
@@ -2230,7 +2210,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2
/* The nVidia CK804 chipset may have 2 HT MSI mappings.
* MSI are supported if the MSI capability set in any of these mappings.
*/
-static void __devinit quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
+static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
{
struct pci_dev *pdev;
@@ -2244,8 +2224,7 @@ static void __devinit quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
if (!pdev)
return;
if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) {
- dev_warn(&dev->dev, "MSI quirk detected; "
- "subordinate MSI disabled\n");
+ dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n");
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
}
pci_dev_put(pdev);
@@ -2254,7 +2233,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
quirk_nvidia_ck804_msi_ht_cap);
/* Force enable MSI mapping capability on HT bridges */
-static void __devinit ht_enable_msi_mapping(struct pci_dev *dev)
+static void ht_enable_msi_mapping(struct pci_dev *dev)
{
int pos, ttl = 48;
@@ -2284,12 +2263,14 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
* for the MCP55 NIC. It is not yet determined whether the msi problem
* also affects other devices. As for now, turn off msi for this device.
*/
-static void __devinit nvenet_msi_disable(struct pci_dev *dev)
+static void nvenet_msi_disable(struct pci_dev *dev)
{
- if (dmi_name_in_vendors("P5N32-SLI PREMIUM") ||
- dmi_name_in_vendors("P5N32-E SLI")) {
- dev_info(&dev->dev,
- "Disabling msi for MCP55 NIC on P5N32-SLI\n");
+ const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
+
+ if (board_name &&
+ (strstr(board_name, "P5N32-SLI PREMIUM") ||
+ strstr(board_name, "P5N32-E SLI"))) {
+ dev_info(&dev->dev, "Disabling msi for MCP55 NIC on P5N32-SLI\n");
dev->no_msi = 1;
}
}
@@ -2298,19 +2279,22 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
nvenet_msi_disable);
/*
- * Some versions of the MCP55 bridge from nvidia have a legacy irq routing
- * config register. This register controls the routing of legacy interrupts
- * from devices that route through the MCP55. If this register is misprogramed
- * interrupts are only sent to the bsp, unlike conventional systems where the
- * irq is broadxast to all online cpus. Not having this register set
- * properly prevents kdump from booting up properly, so lets make sure that
- * we have it set correctly.
- * Note this is an undocumented register.
+ * Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing
+ * config register. This register controls the routing of legacy
+ * interrupts from devices that route through the MCP55. If this register
+ * is misprogrammed, interrupts are only sent to the BSP, unlike
+ * conventional systems where the IRQ is broadcast to all online CPUs. Not
+ * having this register set properly prevents kdump from booting up
+ * properly, so let's make sure that we have it set correctly.
+ * Note that this is an undocumented register.
*/
-static void __devinit nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
+static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
{
u32 cfg;
+ if (!pci_find_capability(dev, PCI_CAP_ID_HT))
+ return;
+
pci_read_config_dword(dev, 0x74, &cfg);
if (cfg & ((1 << 2) | (1 << 15))) {
@@ -2328,7 +2312,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
nvbridge_check_legacy_irq_routing);
-static int __devinit ht_check_msi_mapping(struct pci_dev *dev)
+static int ht_check_msi_mapping(struct pci_dev *dev)
{
int pos, ttl = 48;
int found = 0;
@@ -2356,7 +2340,7 @@ static int __devinit ht_check_msi_mapping(struct pci_dev *dev)
return found;
}
-static int __devinit host_bridge_with_leaf(struct pci_dev *host_bridge)
+static int host_bridge_with_leaf(struct pci_dev *host_bridge)
{
struct pci_dev *dev;
int pos;
@@ -2390,7 +2374,7 @@ static int __devinit host_bridge_with_leaf(struct pci_dev *host_bridge)
#define PCI_HT_CAP_SLAVE_CTRL0 4 /* link control */
#define PCI_HT_CAP_SLAVE_CTRL1 8 /* link control to */
-static int __devinit is_end_of_ht_chain(struct pci_dev *dev)
+static int is_end_of_ht_chain(struct pci_dev *dev)
{
int pos, ctrl_off;
int end = 0;
@@ -2414,7 +2398,7 @@ out:
return end;
}
-static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev)
+static void nv_ht_enable_msi_mapping(struct pci_dev *dev)
{
struct pci_dev *host_bridge;
int pos;
@@ -2453,7 +2437,7 @@ out:
pci_dev_put(host_bridge);
}
-static void __devinit ht_disable_msi_mapping(struct pci_dev *dev)
+static void ht_disable_msi_mapping(struct pci_dev *dev)
{
int pos, ttl = 48;
@@ -2473,7 +2457,7 @@ static void __devinit ht_disable_msi_mapping(struct pci_dev *dev)
}
}
-static void __devinit __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
+static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
{
struct pci_dev *host_bridge;
int pos;
@@ -2495,8 +2479,7 @@ static void __devinit __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
*/
host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
if (host_bridge == NULL) {
- dev_warn(&dev->dev,
- "nv_msi_ht_cap_quirk didn't locate host bridge\n");
+ dev_warn(&dev->dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n");
return;
}
@@ -2510,23 +2493,26 @@ static void __devinit __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
else
nv_ht_enable_msi_mapping(dev);
}
- return;
+ goto out;
}
/* HT MSI is not enabled */
if (found == 1)
- return;
+ goto out;
/* Host bridge is not to HT, disable HT MSI mapping on this device */
ht_disable_msi_mapping(dev);
+
+out:
+ pci_dev_put(host_bridge);
}
-static void __devinit nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
+static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
{
return __nv_msi_ht_cap_quirk(dev, 1);
}
-static void __devinit nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
+static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
{
return __nv_msi_ht_cap_quirk(dev, 0);
}
@@ -2537,11 +2523,11 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_q
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
-static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev)
+static void quirk_msi_intx_disable_bug(struct pci_dev *dev)
{
dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
}
-static void __devinit quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
+static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
{
struct pci_dev *p;
@@ -2558,6 +2544,14 @@ static void __devinit quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
pci_dev_put(p);
}
+static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
+{
+ /* AR816X/AR817X/E210X MSI is fixed at HW level from revision 0x18 */
+ if (dev->revision < 0x18) {
+ dev_info(&dev->dev, "set MSI_INTX_DISABLE_BUG flag\n");
+ dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
+ }
+}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5780,
quirk_msi_intx_disable_bug);
@@ -2595,67 +2589,37 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374,
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1090,
+ quirk_msi_intx_disable_qca_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1091,
+ quirk_msi_intx_disable_qca_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a0,
+ quirk_msi_intx_disable_qca_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1,
+ quirk_msi_intx_disable_qca_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
+ quirk_msi_intx_disable_qca_bug);
#endif /* CONFIG_PCI_MSI */
-#ifdef CONFIG_PCI_IOV
-
-/*
- * For Intel 82576 SR-IOV NIC, if BIOS doesn't allocate resources for the
- * SR-IOV BARs, zero the Flash BAR and program the SR-IOV BARs to use the
- * old Flash Memory Space.
- */
-static void __devinit quirk_i82576_sriov(struct pci_dev *dev)
-{
- int pos, flags;
- u32 bar, start, size;
-
- if (PAGE_SIZE > 0x10000)
- return;
-
- flags = pci_resource_flags(dev, 0);
- if ((flags & PCI_BASE_ADDRESS_SPACE) !=
- PCI_BASE_ADDRESS_SPACE_MEMORY ||
- (flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) !=
- PCI_BASE_ADDRESS_MEM_TYPE_32)
- return;
-
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
- if (!pos)
- return;
-
- pci_read_config_dword(dev, pos + PCI_SRIOV_BAR, &bar);
- if (bar & PCI_BASE_ADDRESS_MEM_MASK)
- return;
-
- start = pci_resource_start(dev, 1);
- size = pci_resource_len(dev, 1);
- if (!start || size != 0x400000 || start & (size - 1))
- return;
-
- pci_resource_flags(dev, 1) = 0;
- pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 0);
- pci_write_config_dword(dev, pos + PCI_SRIOV_BAR, start);
- pci_write_config_dword(dev, pos + PCI_SRIOV_BAR + 12, start + size / 2);
-
- dev_info(&dev->dev, "use Flash Memory Space for SR-IOV BARs\n");
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c9, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov);
-
-#endif /* CONFIG_PCI_IOV */
-
/* Allow manual resource allocation for PCI hotplug bridges
* via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For
* some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6),
- * kernel fails to allocate resources when hotplug device is
+ * kernel fails to allocate resources when hotplug device is
* inserted and PCI bus is rescanned.
*/
-static void __devinit quirk_hotplug_bridge(struct pci_dev *dev)
+static void quirk_hotplug_bridge(struct pci_dev *dev)
{
dev->is_hotplug_bridge = 1;
}
@@ -2666,7 +2630,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
* This is a quirk for the Ricoh MMC controller found as a part of
* some mulifunction chips.
- * This is very similiar and based on the ricoh_mmc driver written by
+ * This is very similar and based on the ricoh_mmc driver written by
* Philip Langdale. Thank you for these magic sequences.
*
* These chips implement the four main memory card controllers (SD, MMC, MS, xD)
@@ -2728,6 +2692,29 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
/* disable must be done via function #0 */
if (PCI_FUNC(dev->devfn))
return;
+ /*
+ * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize
+ * certain types of SD/MMC cards. Lowering the SD base
+ * clock frequency from 200Mhz to 50Mhz fixes this issue.
+ *
+ * 0x150 - SD2.0 mode enable for changing base clock
+ * frequency to 50Mhz
+ * 0xe1 - Base clock frequency
+ * 0x32 - 50Mhz new clock frequency
+ * 0xf9 - Key register for 0x150
+ * 0xfc - key register for 0xe1
+ */
+ if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
+ dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
+ pci_write_config_byte(dev, 0xf9, 0xfc);
+ pci_write_config_byte(dev, 0x150, 0x10);
+ pci_write_config_byte(dev, 0xf9, 0x00);
+ pci_write_config_byte(dev, 0xfc, 0x01);
+ pci_write_config_byte(dev, 0xe1, 0x32);
+ pci_write_config_byte(dev, 0xfc, 0x00);
+
+ dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n");
+ }
pci_read_config_byte(dev, 0xCB, &disable);
@@ -2741,23 +2728,280 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
+
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
#endif /*CONFIG_MMC_RICOH_MMC*/
+#ifdef CONFIG_DMAR_TABLE
+#define VTUNCERRMSK_REG 0x1ac
+#define VTD_MSK_SPEC_ERRORS (1 << 31)
+/*
+ * This is a quirk for masking vt-d spec defined errors to platform error
+ * handling logic. With out this, platforms using Intel 7500, 5500 chipsets
+ * (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based
+ * on the RAS config settings of the platform) when a vt-d fault happens.
+ * The resulting SMI caused the system to hang.
+ *
+ * VT-d spec related errors are already handled by the VT-d OS code, so no
+ * need to report the same error through other channels.
+ */
+static void vtd_mask_spec_errors(struct pci_dev *dev)
+{
+ u32 word;
+
+ pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
+ pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
+#endif
+
+static void fixup_ti816x_class(struct pci_dev *dev)
+{
+ /* TI 816x devices do not have class code set when in PCIe boot mode */
+ dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n");
+ dev->class = PCI_CLASS_MULTIMEDIA_VIDEO;
+}
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
+ PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
+
+/* Some PCIe devices do not work reliably with the claimed maximum
+ * payload size supported.
+ */
+static void fixup_mpss_256(struct pci_dev *dev)
+{
+ dev->pcie_mpss = 1; /* 256 bytes */
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
+
+/* Intel 5000 and 5100 Memory controllers have an errata with read completion
+ * coalescing (which is enabled by default on some BIOSes) and MPS of 256B.
+ * Since there is no way of knowing what the PCIE MPS on each fabric will be
+ * until all of the devices are discovered and buses walked, read completion
+ * coalescing must be disabled. Unfortunately, it cannot be re-enabled because
+ * it is possible to hotplug a device with MPS of 256B.
+ */
+static void quirk_intel_mc_errata(struct pci_dev *dev)
+{
+ int err;
+ u16 rcc;
+
+ if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
+ return;
+
+ /* Intel errata specifies bits to change but does not say what they are.
+ * Keeping them magical until such time as the registers and values can
+ * be explained.
+ */
+ err = pci_read_config_word(dev, 0x48, &rcc);
+ if (err) {
+ dev_err(&dev->dev, "Error attempting to read the read completion coalescing register\n");
+ return;
+ }
+
+ if (!(rcc & (1 << 10)))
+ return;
+
+ rcc &= ~(1 << 10);
+
+ err = pci_write_config_word(dev, 0x48, rcc);
+ if (err) {
+ dev_err(&dev->dev, "Error attempting to write the read completion coalescing register\n");
+ return;
+ }
+
+ pr_info_once("Read completion coalescing disabled due to hardware errata relating to 256B MPS\n");
+}
+/* Intel 5000 series memory controllers and ports 2-7 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata);
+/* Intel 5100 series memory controllers and ports 2-7 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
+
+
+/*
+ * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum. To
+ * work around this, query the size it should be configured to by the device and
+ * modify the resource end to correspond to this new size.
+ */
+static void quirk_intel_ntb(struct pci_dev *dev)
+{
+ int rc;
+ u8 val;
+
+ rc = pci_read_config_byte(dev, 0x00D0, &val);
+ if (rc)
+ return;
+
+ dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1;
+
+ rc = pci_read_config_byte(dev, 0x00D1, &val);
+ if (rc)
+ return;
+
+ dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
+
+static ktime_t fixup_debug_start(struct pci_dev *dev,
+ void (*fn)(struct pci_dev *dev))
+{
+ ktime_t calltime = ktime_set(0, 0);
+
+ dev_dbg(&dev->dev, "calling %pF\n", fn);
+ if (initcall_debug) {
+ pr_debug("calling %pF @ %i for %s\n",
+ fn, task_pid_nr(current), dev_name(&dev->dev));
+ calltime = ktime_get();
+ }
+
+ return calltime;
+}
+
+static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
+ void (*fn)(struct pci_dev *dev))
+{
+ ktime_t delta, rettime;
+ unsigned long long duration;
+
+ if (initcall_debug) {
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+ pr_debug("pci fixup %pF returned after %lld usecs for %s\n",
+ fn, duration, dev_name(&dev->dev));
+ }
+}
+
+/*
+ * Some BIOS implementations leave the Intel GPU interrupts enabled,
+ * even though no one is handling them (f.e. i915 driver is never loaded).
+ * Additionally the interrupt destination is not set up properly
+ * and the interrupt ends up -somewhere-.
+ *
+ * These spurious interrupts are "sticky" and the kernel disables
+ * the (shared) interrupt line after 100.000+ generated interrupts.
+ *
+ * Fix it by disabling the still enabled interrupts.
+ * This resolves crashes often seen on monitor unplug.
+ */
+#define I915_DEIER_REG 0x4400c
+static void disable_igfx_irq(struct pci_dev *dev)
+{
+ void __iomem *regs = pci_iomap(dev, 0, 0);
+ if (regs == NULL) {
+ dev_warn(&dev->dev, "igfx quirk: Can't iomap PCI device\n");
+ return;
+ }
+
+ /* Check if any interrupt line is still enabled */
+ if (readl(regs + I915_DEIER_REG) != 0) {
+ dev_warn(&dev->dev, "BIOS left Intel GPU interrupts enabled; disabling\n");
+
+ writel(0, regs + I915_DEIER_REG);
+ }
+
+ pci_iounmap(dev, regs);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
+
+/*
+ * PCI devices which are on Intel chips can skip the 10ms delay
+ * before entering D3 mode.
+ */
+static void quirk_remove_d3_delay(struct pci_dev *dev)
+{
+ dev->d3_delay = 0;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
+
+/*
+ * Some devices may pass our check in pci_intx_mask_supported if
+ * PCI_COMMAND_INTX_DISABLE works though they actually do not properly
+ * support this feature.
+ */
+static void quirk_broken_intx_masking(struct pci_dev *dev)
+{
+ dev->broken_intx_masking = 1;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, 0x0030,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
+ quirk_broken_intx_masking);
+/*
+ * Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10)
+ * Subsystem: Realtek RTL8169/8110 Family PCI Gigabit Ethernet NIC
+ *
+ * RTL8110SC - Fails under PCI device assignment using DisINTx masking.
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
+ quirk_broken_intx_masking);
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end)
{
- while (f < end) {
- if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) &&
- (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) {
- dev_dbg(&dev->dev, "calling %pF\n", f->hook);
+ ktime_t calltime;
+
+ for (; f < end; f++)
+ if ((f->class == (u32) (dev->class >> f->class_shift) ||
+ f->class == (u32) PCI_ANY_ID) &&
+ (f->vendor == dev->vendor ||
+ f->vendor == (u16) PCI_ANY_ID) &&
+ (f->device == dev->device ||
+ f->device == (u16) PCI_ANY_ID)) {
+ calltime = fixup_debug_start(dev, f->hook);
f->hook(dev);
+ fixup_debug_report(dev, calltime, f->hook);
}
- f++;
- }
}
extern struct pci_fixup __start_pci_fixups_early[];
@@ -2775,12 +3019,13 @@ extern struct pci_fixup __end_pci_fixups_resume_early[];
extern struct pci_fixup __start_pci_fixups_suspend[];
extern struct pci_fixup __end_pci_fixups_suspend[];
+static bool pci_apply_fixup_final_quirks;
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
{
struct pci_fixup *start, *end;
- switch(pass) {
+ switch (pass) {
case pci_fixup_early:
start = __start_pci_fixups_early;
end = __end_pci_fixups_early;
@@ -2792,6 +3037,8 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
break;
case pci_fixup_final:
+ if (!pci_apply_fixup_final_quirks)
+ return;
start = __start_pci_fixups_final;
end = __end_pci_fixups_final;
break;
@@ -2824,6 +3071,7 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
}
EXPORT_SYMBOL(pci_fixup_device);
+
static int __init pci_apply_final_quirks(void)
{
struct pci_dev *dev = NULL;
@@ -2834,6 +3082,7 @@ static int __init pci_apply_final_quirks(void)
printk(KERN_DEBUG "PCI: CLS %u bytes\n",
pci_cache_line_size << 2);
+ pci_apply_fixup_final_quirks = true;
for_each_pci_dev(dev) {
pci_fixup_device(pci_fixup_final, dev);
/*
@@ -2848,12 +3097,13 @@ static int __init pci_apply_final_quirks(void)
if (!tmp || cls == tmp)
continue;
- printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), "
- "using %u bytes\n", cls << 2, tmp << 2,
+ printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n",
+ cls << 2, tmp << 2,
pci_dfl_cache_line_size << 2);
pci_cache_line_size = pci_dfl_cache_line_size;
}
}
+
if (!pci_cache_line_size) {
printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
cls << 2, pci_dfl_cache_line_size << 2);
@@ -2894,32 +3144,174 @@ static int reset_intel_generic_dev(struct pci_dev *dev, int probe)
static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
{
- int pos;
+ /*
+ * http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf
+ *
+ * The 82599 supports FLR on VFs, but FLR support is reported only
+ * in the PF DEVCAP (sec 9.3.10.4), not in the VF DEVCAP (sec 9.5).
+ * Therefore, we can't use pcie_flr(), which checks the VF DEVCAP.
+ */
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
- if (!pos)
+ if (probe)
+ return 0;
+
+ if (!pci_wait_for_pending_transaction(dev))
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
+
+ pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+
+ msleep(100);
+
+ return 0;
+}
+
+#include "../gpu/drm/i915/i915_reg.h"
+#define MSG_CTL 0x45010
+#define NSDE_PWR_STATE 0xd0100
+#define IGD_OPERATION_TIMEOUT 10000 /* set timeout 10 seconds */
+
+static int reset_ivb_igd(struct pci_dev *dev, int probe)
+{
+ void __iomem *mmio_base;
+ unsigned long timeout;
+ u32 val;
+
+ if (probe)
+ return 0;
+
+ mmio_base = pci_iomap(dev, 0, 0);
+ if (!mmio_base)
+ return -ENOMEM;
+
+ iowrite32(0x00000002, mmio_base + MSG_CTL);
+
+ /*
+ * Clobbering SOUTH_CHICKEN2 register is fine only if the next
+ * driver loaded sets the right bits. However, this's a reset and
+ * the bits have been set by i915 previously, so we clobber
+ * SOUTH_CHICKEN2 register directly here.
+ */
+ iowrite32(0x00000005, mmio_base + SOUTH_CHICKEN2);
+
+ val = ioread32(mmio_base + PCH_PP_CONTROL) & 0xfffffffe;
+ iowrite32(val, mmio_base + PCH_PP_CONTROL);
+
+ timeout = jiffies + msecs_to_jiffies(IGD_OPERATION_TIMEOUT);
+ do {
+ val = ioread32(mmio_base + PCH_PP_STATUS);
+ if ((val & 0xb0000000) == 0)
+ goto reset_complete;
+ msleep(10);
+ } while (time_before(jiffies, timeout));
+ dev_warn(&dev->dev, "timeout during reset\n");
+
+reset_complete:
+ iowrite32(0x00000002, mmio_base + NSDE_PWR_STATE);
+
+ pci_iounmap(dev, mmio_base);
+ return 0;
+}
+
+/*
+ * Device-specific reset method for Chelsio T4-based adapters.
+ */
+static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
+{
+ u16 old_command;
+ u16 msix_flags;
+
+ /*
+ * If this isn't a Chelsio T4-based device, return -ENOTTY indicating
+ * that we have no device-specific reset method.
+ */
+ if ((dev->device & 0xf000) != 0x4000)
return -ENOTTY;
+ /*
+ * If this is the "probe" phase, return 0 indicating that we can
+ * reset this device.
+ */
if (probe)
return 0;
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_BCR_FLR);
+ /*
+ * T4 can wedge if there are DMAs in flight within the chip and Bus
+ * Master has been disabled. We need to have it on till the Function
+ * Level Reset completes. (BUS_MASTER is disabled in
+ * pci_reset_function()).
+ */
+ pci_read_config_word(dev, PCI_COMMAND, &old_command);
+ pci_write_config_word(dev, PCI_COMMAND,
+ old_command | PCI_COMMAND_MASTER);
+
+ /*
+ * Perform the actual device function reset, saving and restoring
+ * configuration information around the reset.
+ */
+ pci_save_state(dev);
+
+ /*
+ * T4 also suffers a Head-Of-Line blocking problem if MSI-X interrupts
+ * are disabled when an MSI-X interrupt message needs to be delivered.
+ * So we briefly re-enable MSI-X interrupts for the duration of the
+ * FLR. The pci_restore_state() below will restore the original
+ * MSI-X state.
+ */
+ pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags);
+ if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0)
+ pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS,
+ msix_flags |
+ PCI_MSIX_FLAGS_ENABLE |
+ PCI_MSIX_FLAGS_MASKALL);
+
+ /*
+ * Start of pcie_flr() code sequence. This reset code is a copy of
+ * the guts of pcie_flr() because that's not an exported function.
+ */
+
+ if (!pci_wait_for_pending_transaction(dev))
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
+
+ pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
msleep(100);
+ /*
+ * End of pcie_flr() code sequence.
+ */
+
+ /*
+ * Restore the configuration information (BAR values, etc.) including
+ * the original PCI Configuration Space Command word, and return
+ * success.
+ */
+ pci_restore_state(dev);
+ pci_write_config_word(dev, PCI_COMMAND, old_command);
return 0;
}
#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
+#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
+#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
reset_intel_82599_sfp_virtfn },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M_VGA,
+ reset_ivb_igd },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
+ reset_ivb_igd },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
reset_intel_generic_dev },
+ { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
+ reset_chelsio_generic_dev },
{ 0 }
};
+/*
+ * These device-specific reset methods are here rather than in a driver
+ * because when a host assigns a device to a guest VM, the host may need
+ * to reset the device but probably doesn't have a driver for it.
+ */
int pci_dev_specific_reset(struct pci_dev *dev, int probe)
{
const struct pci_dev_reset_methods *i;
@@ -2934,3 +3326,403 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe)
return -ENOTTY;
}
+
+static void quirk_dma_func0_alias(struct pci_dev *dev)
+{
+ if (PCI_FUNC(dev->devfn) != 0) {
+ dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
+ dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
+ }
+}
+
+/*
+ * https://bugzilla.redhat.com/show_bug.cgi?id=605888
+ *
+ * Some Ricoh devices use function 0 as the PCIe requester ID for DMA.
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
+
+static void quirk_dma_func1_alias(struct pci_dev *dev)
+{
+ if (PCI_FUNC(dev->devfn) != 1) {
+ dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 1);
+ dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
+ }
+}
+
+/*
+ * Marvell 88SE9123 uses function 1 as the requester ID for DMA. In some
+ * SKUs function 1 is present and is a legacy IDE controller, in other
+ * SKUs this function is not present, making this a ghost requester.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=42679
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
+ quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
+ quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
+ quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c59 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
+ quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
+ quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
+ quirk_dma_func1_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
+ quirk_dma_func1_alias);
+/* https://bugs.gentoo.org/show_bug.cgi?id=497630 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
+ PCI_DEVICE_ID_JMICRON_JMB388_ESD,
+ quirk_dma_func1_alias);
+
+/*
+ * A few PCIe-to-PCI bridges fail to expose a PCIe capability, resulting in
+ * using the wrong DMA alias for the device. Some of these devices can be
+ * used as either forward or reverse bridges, so we need to test whether the
+ * device is operating in the correct mode. We could probably apply this
+ * quirk to PCI_ANY_ID, but for now we'll just use known offenders. The test
+ * is for a non-root, non-PCIe bridge where the upstream device is PCIe and
+ * is not a PCIe-to-PCI bridge, then @pdev is actually a PCIe-to-PCI bridge.
+ */
+static void quirk_use_pcie_bridge_dma_alias(struct pci_dev *pdev)
+{
+ if (!pci_is_root_bus(pdev->bus) &&
+ pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
+ !pci_is_pcie(pdev) && pci_is_pcie(pdev->bus->self) &&
+ pci_pcie_type(pdev->bus->self) != PCI_EXP_TYPE_PCI_BRIDGE)
+ pdev->dev_flags |= PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS;
+}
+/* ASM1083/1085, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c46 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080,
+ quirk_use_pcie_bridge_dma_alias);
+/* Tundra 8113, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c43 */
+DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
+/* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */
+DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
+
+static struct pci_dev *pci_func_0_dma_source(struct pci_dev *dev)
+{
+ if (!PCI_FUNC(dev->devfn))
+ return pci_dev_get(dev);
+
+ return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+}
+
+static const struct pci_dev_dma_source {
+ u16 vendor;
+ u16 device;
+ struct pci_dev *(*dma_source)(struct pci_dev *dev);
+} pci_dev_dma_source[] = {
+ /*
+ * https://bugzilla.redhat.com/show_bug.cgi?id=605888
+ *
+ * Some Ricoh devices use the function 0 source ID for DMA on
+ * other functions of a multifunction device. The DMA devices
+ * is therefore function 0, which will have implications of the
+ * iommu grouping of these devices.
+ */
+ { PCI_VENDOR_ID_RICOH, 0xe822, pci_func_0_dma_source },
+ { PCI_VENDOR_ID_RICOH, 0xe230, pci_func_0_dma_source },
+ { PCI_VENDOR_ID_RICOH, 0xe832, pci_func_0_dma_source },
+ { PCI_VENDOR_ID_RICOH, 0xe476, pci_func_0_dma_source },
+ { 0 }
+};
+
+/*
+ * IOMMUs with isolation capabilities need to be programmed with the
+ * correct source ID of a device. In most cases, the source ID matches
+ * the device doing the DMA, but sometimes hardware is broken and will
+ * tag the DMA as being sourced from a different device. This function
+ * allows that translation. Note that the reference count of the
+ * returned device is incremented on all paths.
+ */
+struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
+{
+ const struct pci_dev_dma_source *i;
+
+ for (i = pci_dev_dma_source; i->dma_source; i++) {
+ if ((i->vendor == dev->vendor ||
+ i->vendor == (u16)PCI_ANY_ID) &&
+ (i->device == dev->device ||
+ i->device == (u16)PCI_ANY_ID))
+ return i->dma_source(dev);
+ }
+
+ return pci_dev_get(dev);
+}
+
+/*
+ * AMD has indicated that the devices below do not support peer-to-peer
+ * in any system where they are found in the southbridge with an AMD
+ * IOMMU in the system. Multifunction devices that do not support
+ * peer-to-peer between functions can claim to support a subset of ACS.
+ * Such devices effectively enable request redirect (RR) and completion
+ * redirect (CR) since all transactions are redirected to the upstream
+ * root complex.
+ *
+ * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94086
+ * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94102
+ * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/99402
+ *
+ * 1002:4385 SBx00 SMBus Controller
+ * 1002:439c SB7x0/SB8x0/SB9x0 IDE Controller
+ * 1002:4383 SBx00 Azalia (Intel HDA)
+ * 1002:439d SB7x0/SB8x0/SB9x0 LPC host controller
+ * 1002:4384 SBx00 PCI to PCI Bridge
+ * 1002:4399 SB7x0/SB8x0/SB9x0 USB OHCI2 Controller
+ */
+static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
+{
+#ifdef CONFIG_ACPI
+ struct acpi_table_header *header = NULL;
+ acpi_status status;
+
+ /* Targeting multifunction devices on the SB (appears on root bus) */
+ if (!dev->multifunction || !pci_is_root_bus(dev->bus))
+ return -ENODEV;
+
+ /* The IVRS table describes the AMD IOMMU */
+ status = acpi_get_table("IVRS", 0, &header);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ /* Filter out flags not applicable to multifunction */
+ acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
+
+ return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
+#else
+ return -ENODEV;
+#endif
+}
+
+/*
+ * Many Intel PCH root ports do provide ACS-like features to disable peer
+ * transactions and validate bus numbers in requests, but do not provide an
+ * actual PCIe ACS capability. This is the list of device IDs known to fall
+ * into that category as provided by Intel in Red Hat bugzilla 1037684.
+ */
+static const u16 pci_quirk_intel_pch_acs_ids[] = {
+ /* Ibexpeak PCH */
+ 0x3b42, 0x3b43, 0x3b44, 0x3b45, 0x3b46, 0x3b47, 0x3b48, 0x3b49,
+ 0x3b4a, 0x3b4b, 0x3b4c, 0x3b4d, 0x3b4e, 0x3b4f, 0x3b50, 0x3b51,
+ /* Cougarpoint PCH */
+ 0x1c10, 0x1c11, 0x1c12, 0x1c13, 0x1c14, 0x1c15, 0x1c16, 0x1c17,
+ 0x1c18, 0x1c19, 0x1c1a, 0x1c1b, 0x1c1c, 0x1c1d, 0x1c1e, 0x1c1f,
+ /* Pantherpoint PCH */
+ 0x1e10, 0x1e11, 0x1e12, 0x1e13, 0x1e14, 0x1e15, 0x1e16, 0x1e17,
+ 0x1e18, 0x1e19, 0x1e1a, 0x1e1b, 0x1e1c, 0x1e1d, 0x1e1e, 0x1e1f,
+ /* Lynxpoint-H PCH */
+ 0x8c10, 0x8c11, 0x8c12, 0x8c13, 0x8c14, 0x8c15, 0x8c16, 0x8c17,
+ 0x8c18, 0x8c19, 0x8c1a, 0x8c1b, 0x8c1c, 0x8c1d, 0x8c1e, 0x8c1f,
+ /* Lynxpoint-LP PCH */
+ 0x9c10, 0x9c11, 0x9c12, 0x9c13, 0x9c14, 0x9c15, 0x9c16, 0x9c17,
+ 0x9c18, 0x9c19, 0x9c1a, 0x9c1b,
+ /* Wildcat PCH */
+ 0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97,
+ 0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
+ /* Patsburg (X79) PCH */
+ 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e,
+};
+
+static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
+{
+ int i;
+
+ /* Filter out a few obvious non-matches first */
+ if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(pci_quirk_intel_pch_acs_ids); i++)
+ if (pci_quirk_intel_pch_acs_ids[i] == dev->device)
+ return true;
+
+ return false;
+}
+
+#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV)
+
+static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ?
+ INTEL_PCH_ACS_FLAGS : 0;
+
+ if (!pci_quirk_intel_pch_acs_match(dev))
+ return -ENOTTY;
+
+ return acs_flags & ~flags ? 0 : 1;
+}
+
+static const struct pci_dev_acs_enabled {
+ u16 vendor;
+ u16 device;
+ int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
+} pci_dev_acs_enabled[] = {
+ { PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
+ { 0 }
+};
+
+int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
+{
+ const struct pci_dev_acs_enabled *i;
+ int ret;
+
+ /*
+ * Allow devices that do not expose standard PCIe ACS capabilities
+ * or control to indicate their support here. Multi-function express
+ * devices which do not allow internal peer-to-peer between functions,
+ * but do not implement PCIe ACS may wish to return true here.
+ */
+ for (i = pci_dev_acs_enabled; i->acs_enabled; i++) {
+ if ((i->vendor == dev->vendor ||
+ i->vendor == (u16)PCI_ANY_ID) &&
+ (i->device == dev->device ||
+ i->device == (u16)PCI_ANY_ID)) {
+ ret = i->acs_enabled(dev, acs_flags);
+ if (ret >= 0)
+ return ret;
+ }
+ }
+
+ return -ENOTTY;
+}
+
+/* Config space offset of Root Complex Base Address register */
+#define INTEL_LPC_RCBA_REG 0xf0
+/* 31:14 RCBA address */
+#define INTEL_LPC_RCBA_MASK 0xffffc000
+/* RCBA Enable */
+#define INTEL_LPC_RCBA_ENABLE (1 << 0)
+
+/* Backbone Scratch Pad Register */
+#define INTEL_BSPR_REG 0x1104
+/* Backbone Peer Non-Posted Disable */
+#define INTEL_BSPR_REG_BPNPD (1 << 8)
+/* Backbone Peer Posted Disable */
+#define INTEL_BSPR_REG_BPPD (1 << 9)
+
+/* Upstream Peer Decode Configuration Register */
+#define INTEL_UPDCR_REG 0x1114
+/* 5:0 Peer Decode Enable bits */
+#define INTEL_UPDCR_REG_MASK 0x3f
+
+static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
+{
+ u32 rcba, bspr, updcr;
+ void __iomem *rcba_mem;
+
+ /*
+ * Read the RCBA register from the LPC (D31:F0). PCH root ports
+ * are D28:F* and therefore get probed before LPC, thus we can't
+ * use pci_get_slot/pci_read_config_dword here.
+ */
+ pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
+ INTEL_LPC_RCBA_REG, &rcba);
+ if (!(rcba & INTEL_LPC_RCBA_ENABLE))
+ return -EINVAL;
+
+ rcba_mem = ioremap_nocache(rcba & INTEL_LPC_RCBA_MASK,
+ PAGE_ALIGN(INTEL_UPDCR_REG));
+ if (!rcba_mem)
+ return -ENOMEM;
+
+ /*
+ * The BSPR can disallow peer cycles, but it's set by soft strap and
+ * therefore read-only. If both posted and non-posted peer cycles are
+ * disallowed, we're ok. If either are allowed, then we need to use
+ * the UPDCR to disable peer decodes for each port. This provides the
+ * PCIe ACS equivalent of PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF
+ */
+ bspr = readl(rcba_mem + INTEL_BSPR_REG);
+ bspr &= INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD;
+ if (bspr != (INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD)) {
+ updcr = readl(rcba_mem + INTEL_UPDCR_REG);
+ if (updcr & INTEL_UPDCR_REG_MASK) {
+ dev_info(&dev->dev, "Disabling UPDCR peer decodes\n");
+ updcr &= ~INTEL_UPDCR_REG_MASK;
+ writel(updcr, rcba_mem + INTEL_UPDCR_REG);
+ }
+ }
+
+ iounmap(rcba_mem);
+ return 0;
+}
+
+/* Miscellaneous Port Configuration register */
+#define INTEL_MPC_REG 0xd8
+/* MPC: Invalid Receive Bus Number Check Enable */
+#define INTEL_MPC_REG_IRBNCE (1 << 26)
+
+static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev)
+{
+ u32 mpc;
+
+ /*
+ * When enabled, the IRBNCE bit of the MPC register enables the
+ * equivalent of PCI ACS Source Validation (PCI_ACS_SV), which
+ * ensures that requester IDs fall within the bus number range
+ * of the bridge. Enable if not already.
+ */
+ pci_read_config_dword(dev, INTEL_MPC_REG, &mpc);
+ if (!(mpc & INTEL_MPC_REG_IRBNCE)) {
+ dev_info(&dev->dev, "Enabling MPC IRBNCE\n");
+ mpc |= INTEL_MPC_REG_IRBNCE;
+ pci_write_config_word(dev, INTEL_MPC_REG, mpc);
+ }
+}
+
+static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
+{
+ if (!pci_quirk_intel_pch_acs_match(dev))
+ return -ENOTTY;
+
+ if (pci_quirk_enable_intel_lpc_acs(dev)) {
+ dev_warn(&dev->dev, "Failed to enable Intel PCH ACS quirk\n");
+ return 0;
+ }
+
+ pci_quirk_enable_intel_rp_mpc_acs(dev);
+
+ dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK;
+
+ dev_info(&dev->dev, "Intel PCH root port ACS workaround enabled\n");
+
+ return 0;
+}
+
+static const struct pci_dev_enable_acs {
+ u16 vendor;
+ u16 device;
+ int (*enable_acs)(struct pci_dev *dev);
+} pci_dev_enable_acs[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs },
+ { 0 }
+};
+
+void pci_dev_specific_enable_acs(struct pci_dev *dev)
+{
+ const struct pci_dev_enable_acs *i;
+ int ret;
+
+ for (i = pci_dev_enable_acs; i->enable_acs; i++) {
+ if ((i->vendor == dev->vendor ||
+ i->vendor == (u16)PCI_ANY_ID) &&
+ (i->device == dev->device ||
+ i->device == (u16)PCI_ANY_ID)) {
+ ret = i->enable_acs(dev);
+ if (ret >= 0)
+ return;
+ }
+ }
+}
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 176615e7231..8bd76c9ba21 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -7,8 +7,6 @@ static void pci_free_resources(struct pci_dev *dev)
{
int i;
- msi_remove_pci_irq_vectors(dev);
-
pci_cleanup_rom(dev);
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct resource *res = dev->resource + i;
@@ -19,10 +17,12 @@ static void pci_free_resources(struct pci_dev *dev)
static void pci_stop_dev(struct pci_dev *dev)
{
+ pci_pme_active(dev, false);
+
if (dev->is_added) {
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
- device_unregister(&dev->dev);
+ device_release_driver(&dev->dev);
dev->is_added = 0;
}
@@ -32,55 +32,72 @@ static void pci_stop_dev(struct pci_dev *dev)
static void pci_destroy_dev(struct pci_dev *dev)
{
- /* Remove the device from the device lists, and prevent any further
- * list accesses from this device */
+ if (!dev->dev.kobj.parent)
+ return;
+
+ device_del(&dev->dev);
+
down_write(&pci_bus_sem);
list_del(&dev->bus_list);
- dev->bus_list.next = dev->bus_list.prev = NULL;
up_write(&pci_bus_sem);
pci_free_resources(dev);
- pci_dev_put(dev);
+ put_device(&dev->dev);
}
-/**
- * pci_remove_device_safe - remove an unused hotplug device
- * @dev: the device to remove
- *
- * Delete the device structure from the device lists and
- * notify userspace (/sbin/hotplug), but only if the device
- * in question is not being used by a driver.
- * Returns 0 on success.
- */
-#if 0
-int pci_remove_device_safe(struct pci_dev *dev)
+void pci_remove_bus(struct pci_bus *bus)
{
- if (pci_dev_driver(dev))
- return -EBUSY;
- pci_destroy_dev(dev);
- return 0;
+ pci_proc_detach_bus(bus);
+
+ down_write(&pci_bus_sem);
+ list_del(&bus->node);
+ pci_bus_release_busn_res(bus);
+ up_write(&pci_bus_sem);
+ pci_remove_legacy_files(bus);
+ pcibios_remove_bus(bus);
+ device_unregister(&bus->dev);
}
-#endif /* 0 */
+EXPORT_SYMBOL(pci_remove_bus);
-void pci_remove_bus(struct pci_bus *pci_bus)
+static void pci_stop_bus_device(struct pci_dev *dev)
{
- pci_proc_detach_bus(pci_bus);
+ struct pci_bus *bus = dev->subordinate;
+ struct pci_dev *child, *tmp;
+
+ /*
+ * Stopping an SR-IOV PF device removes all the associated VFs,
+ * which will update the bus->devices list and confuse the
+ * iterator. Therefore, iterate in reverse so we remove the VFs
+ * first, then the PF.
+ */
+ if (bus) {
+ list_for_each_entry_safe_reverse(child, tmp,
+ &bus->devices, bus_list)
+ pci_stop_bus_device(child);
+ }
- down_write(&pci_bus_sem);
- list_del(&pci_bus->node);
- up_write(&pci_bus_sem);
- if (!pci_bus->is_added)
- return;
+ pci_stop_dev(dev);
+}
+
+static void pci_remove_bus_device(struct pci_dev *dev)
+{
+ struct pci_bus *bus = dev->subordinate;
+ struct pci_dev *child, *tmp;
- pci_remove_legacy_files(pci_bus);
- device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity);
- device_remove_file(&pci_bus->dev, &dev_attr_cpulistaffinity);
- device_unregister(&pci_bus->dev);
+ if (bus) {
+ list_for_each_entry_safe(child, tmp,
+ &bus->devices, bus_list)
+ pci_remove_bus_device(child);
+
+ pci_remove_bus(bus);
+ dev->subordinate = NULL;
+ }
+
+ pci_destroy_dev(dev);
}
-EXPORT_SYMBOL(pci_remove_bus);
/**
- * pci_remove_bus_device - remove a PCI device and any children
+ * pci_stop_and_remove_bus_device - remove a PCI device and any children
* @dev: the device to remove
*
* Remove a PCI device from the device lists, informing the drivers
@@ -91,63 +108,53 @@ EXPORT_SYMBOL(pci_remove_bus);
* device lists, remove the /proc entry, and notify userspace
* (/sbin/hotplug).
*/
-void pci_remove_bus_device(struct pci_dev *dev)
+void pci_stop_and_remove_bus_device(struct pci_dev *dev)
{
pci_stop_bus_device(dev);
- if (dev->subordinate) {
- struct pci_bus *b = dev->subordinate;
-
- pci_remove_behind_bridge(dev);
- pci_remove_bus(b);
- dev->subordinate = NULL;
- }
-
- pci_destroy_dev(dev);
+ pci_remove_bus_device(dev);
}
+EXPORT_SYMBOL(pci_stop_and_remove_bus_device);
-/**
- * pci_remove_behind_bridge - remove all devices behind a PCI bridge
- * @dev: PCI bridge device
- *
- * Remove all devices on the bus, except for the parent bridge.
- * This also removes any child buses, and any devices they may
- * contain in a depth-first manner.
- */
-void pci_remove_behind_bridge(struct pci_dev *dev)
+void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev)
{
- struct list_head *l, *n;
-
- if (dev->subordinate)
- list_for_each_safe(l, n, &dev->subordinate->devices)
- pci_remove_bus_device(pci_dev_b(l));
+ pci_lock_rescan_remove();
+ pci_stop_and_remove_bus_device(dev);
+ pci_unlock_rescan_remove();
}
+EXPORT_SYMBOL_GPL(pci_stop_and_remove_bus_device_locked);
-static void pci_stop_bus_devices(struct pci_bus *bus)
+void pci_stop_root_bus(struct pci_bus *bus)
{
- struct list_head *l, *n;
+ struct pci_dev *child, *tmp;
+ struct pci_host_bridge *host_bridge;
- list_for_each_safe(l, n, &bus->devices) {
- struct pci_dev *dev = pci_dev_b(l);
- pci_stop_bus_device(dev);
- }
+ if (!pci_is_root_bus(bus))
+ return;
+
+ host_bridge = to_pci_host_bridge(bus->bridge);
+ list_for_each_entry_safe_reverse(child, tmp,
+ &bus->devices, bus_list)
+ pci_stop_bus_device(child);
+
+ /* stop the host bridge */
+ device_release_driver(&host_bridge->dev);
}
-/**
- * pci_stop_bus_device - stop a PCI device and any children
- * @dev: the device to stop
- *
- * Stop a PCI device (detach the driver, remove from the global list
- * and so on). This also stop any subordinate buses and children in a
- * depth-first manner.
- */
-void pci_stop_bus_device(struct pci_dev *dev)
+void pci_remove_root_bus(struct pci_bus *bus)
{
- if (dev->subordinate)
- pci_stop_bus_devices(dev->subordinate);
+ struct pci_dev *child, *tmp;
+ struct pci_host_bridge *host_bridge;
- pci_stop_dev(dev);
-}
+ if (!pci_is_root_bus(bus))
+ return;
+
+ host_bridge = to_pci_host_bridge(bus->bridge);
+ list_for_each_entry_safe(child, tmp,
+ &bus->devices, bus_list)
+ pci_remove_bus_device(child);
+ pci_remove_bus(bus);
+ host_bridge->bus = NULL;
-EXPORT_SYMBOL(pci_remove_bus_device);
-EXPORT_SYMBOL(pci_remove_behind_bridge);
-EXPORT_SYMBOL_GPL(pci_stop_bus_device);
+ /* remove the host bridge */
+ device_unregister(&host_bridge->dev);
+}
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 36864a935d6..f955edb9bea 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -7,6 +7,7 @@
* PCI ROM access routines
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -30,13 +31,14 @@ int pci_enable_rom(struct pci_dev *pdev)
if (!res->flags)
return -1;
- pcibios_resource_to_bus(pdev, &region, res);
+ pcibios_resource_to_bus(pdev->bus, &region, res);
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_MASK;
rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE;
pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr);
return 0;
}
+EXPORT_SYMBOL_GPL(pci_enable_rom);
/**
* pci_disable_rom - disable ROM decoding for a PCI device
@@ -52,6 +54,7 @@ void pci_disable_rom(struct pci_dev *pdev)
rom_addr &= ~PCI_ROM_ADDRESS_ENABLE;
pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr);
}
+EXPORT_SYMBOL_GPL(pci_disable_rom);
/**
* pci_get_rom_size - obtain the actual size of the ROM image
@@ -134,7 +137,7 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
} else {
/* assign the ROM an address if it doesn't have one */
if (res->parent == NULL &&
- pci_assign_resource(pdev,PCI_ROM_RESOURCE))
+ pci_assign_resource(pdev, PCI_ROM_RESOURCE))
return NULL;
start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
@@ -165,44 +168,7 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
*size = pci_get_rom_size(pdev, rom, *size);
return rom;
}
-
-#if 0
-/**
- * pci_map_rom_copy - map a PCI ROM to kernel space, create a copy
- * @pdev: pointer to pci device struct
- * @size: pointer to receive size of pci window over ROM
- *
- * Return: kernel virtual pointer to image of ROM
- *
- * Map a PCI ROM into kernel space. If ROM is boot video ROM,
- * the shadow BIOS copy will be returned instead of the
- * actual ROM.
- */
-void __iomem *pci_map_rom_copy(struct pci_dev *pdev, size_t *size)
-{
- struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
- void __iomem *rom;
-
- rom = pci_map_rom(pdev, size);
- if (!rom)
- return NULL;
-
- if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_SHADOW |
- IORESOURCE_ROM_BIOS_COPY))
- return rom;
-
- res->start = (unsigned long)kmalloc(*size, GFP_KERNEL);
- if (!res->start)
- return rom;
-
- res->end = res->start + *size;
- memcpy_fromio((void*)(unsigned long)res->start, rom, *size);
- pci_unmap_rom(pdev, rom);
- res->flags |= IORESOURCE_ROM_COPY;
-
- return (void __iomem *)(unsigned long)res->start;
-}
-#endif /* 0 */
+EXPORT_SYMBOL(pci_map_rom);
/**
* pci_unmap_rom - unmap the ROM from kernel space
@@ -224,27 +190,7 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW)))
pci_disable_rom(pdev);
}
-
-#if 0
-/**
- * pci_remove_rom - disable the ROM and remove its sysfs attribute
- * @pdev: pointer to pci device struct
- *
- * Remove the rom file in sysfs and disable ROM decoding.
- */
-void pci_remove_rom(struct pci_dev *pdev)
-{
- struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
-
- if (pci_resource_len(pdev, PCI_ROM_RESOURCE))
- sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
- if (!(res->flags & (IORESOURCE_ROM_ENABLE |
- IORESOURCE_ROM_SHADOW |
- IORESOURCE_ROM_BIOS_COPY |
- IORESOURCE_ROM_COPY)))
- pci_disable_rom(pdev);
-}
-#endif /* 0 */
+EXPORT_SYMBOL(pci_unmap_rom);
/**
* pci_cleanup_rom - free the ROM copy created by pci_map_rom_copy
@@ -255,15 +201,29 @@ void pci_remove_rom(struct pci_dev *pdev)
void pci_cleanup_rom(struct pci_dev *pdev)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
+
if (res->flags & IORESOURCE_ROM_COPY) {
- kfree((void*)(unsigned long)res->start);
+ kfree((void *)(unsigned long)res->start);
+ res->flags |= IORESOURCE_UNSET;
res->flags &= ~IORESOURCE_ROM_COPY;
res->start = 0;
res->end = 0;
}
}
-EXPORT_SYMBOL(pci_map_rom);
-EXPORT_SYMBOL(pci_unmap_rom);
-EXPORT_SYMBOL_GPL(pci_enable_rom);
-EXPORT_SYMBOL_GPL(pci_disable_rom);
+/**
+ * pci_platform_rom - provides a pointer to any ROM image provided by the
+ * platform
+ * @pdev: pointer to pci device struct
+ * @size: pointer to receive size of pci window over ROM
+ */
+void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size)
+{
+ if (pdev->rom && pdev->romlen) {
+ *size = pdev->romlen;
+ return phys_to_virt((phys_addr_t)pdev->rom);
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(pci_platform_rom);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 9d75dc8ca60..827ad831f1d 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -1,5 +1,5 @@
/*
- * PCI searching functions.
+ * PCI searching functions.
*
* Copyright (C) 1993 -- 1997 Drew Eckhardt, Frederic Potter,
* David Mosberger-Tang
@@ -7,7 +7,6 @@
* Copyright (C) 2003 -- 2004 Greg Kroah-Hartman <greg@kroah.com>
*/
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -15,6 +14,95 @@
#include "pci.h"
DECLARE_RWSEM(pci_bus_sem);
+EXPORT_SYMBOL_GPL(pci_bus_sem);
+
+/*
+ * pci_for_each_dma_alias - Iterate over DMA aliases for a device
+ * @pdev: starting downstream device
+ * @fn: function to call for each alias
+ * @data: opaque data to pass to @fn
+ *
+ * Starting @pdev, walk up the bus calling @fn for each possible alias
+ * of @pdev at the root bus.
+ */
+int pci_for_each_dma_alias(struct pci_dev *pdev,
+ int (*fn)(struct pci_dev *pdev,
+ u16 alias, void *data), void *data)
+{
+ struct pci_bus *bus;
+ int ret;
+
+ ret = fn(pdev, PCI_DEVID(pdev->bus->number, pdev->devfn), data);
+ if (ret)
+ return ret;
+
+ /*
+ * If the device is broken and uses an alias requester ID for
+ * DMA, iterate over that too.
+ */
+ if (unlikely(pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN)) {
+ ret = fn(pdev, PCI_DEVID(pdev->bus->number,
+ pdev->dma_alias_devfn), data);
+ if (ret)
+ return ret;
+ }
+
+ for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
+ struct pci_dev *tmp;
+
+ /* Skip virtual buses */
+ if (!bus->self)
+ continue;
+
+ tmp = bus->self;
+
+ /*
+ * PCIe-to-PCI/X bridges alias transactions from downstream
+ * devices using the subordinate bus number (PCI Express to
+ * PCI/PCI-X Bridge Spec, rev 1.0, sec 2.3). For all cases
+ * where the upstream bus is PCI/X we alias to the bridge
+ * (there are various conditions in the previous reference
+ * where the bridge may take ownership of transactions, even
+ * when the secondary interface is PCI-X).
+ */
+ if (pci_is_pcie(tmp)) {
+ switch (pci_pcie_type(tmp)) {
+ case PCI_EXP_TYPE_ROOT_PORT:
+ case PCI_EXP_TYPE_UPSTREAM:
+ case PCI_EXP_TYPE_DOWNSTREAM:
+ continue;
+ case PCI_EXP_TYPE_PCI_BRIDGE:
+ ret = fn(tmp,
+ PCI_DEVID(tmp->subordinate->number,
+ PCI_DEVFN(0, 0)), data);
+ if (ret)
+ return ret;
+ continue;
+ case PCI_EXP_TYPE_PCIE_BRIDGE:
+ ret = fn(tmp,
+ PCI_DEVID(tmp->bus->number,
+ tmp->devfn), data);
+ if (ret)
+ return ret;
+ continue;
+ }
+ } else {
+ if (tmp->dev_flags & PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS)
+ ret = fn(tmp,
+ PCI_DEVID(tmp->subordinate->number,
+ PCI_DEVFN(0, 0)), data);
+ else
+ ret = fn(tmp,
+ PCI_DEVID(tmp->bus->number,
+ tmp->devfn), data);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
/*
* find the upstream PCIe-to-PCI bridge of a PCI device
* if the device is PCIE, return NULL
@@ -22,8 +110,7 @@ DECLARE_RWSEM(pci_bus_sem);
* legacy PCI bridge and the bridge is directly connected to bus 0), return its
* parent
*/
-struct pci_dev *
-pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
+struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
{
struct pci_dev *tmp = NULL;
@@ -39,7 +126,7 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
continue;
}
/* PCI device should connect to a PCIe bridge */
- if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) {
+ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_PCI_BRIDGE) {
/* Busted hardware? */
WARN_ON_ONCE(1);
return NULL;
@@ -52,15 +139,15 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr)
{
- struct pci_bus* child;
- struct list_head *tmp;
+ struct pci_bus *child;
+ struct pci_bus *tmp;
- if(bus->number == busnr)
+ if (bus->number == busnr)
return bus;
- list_for_each(tmp, &bus->children) {
- child = pci_do_find_bus(pci_bus_b(tmp), busnr);
- if(child)
+ list_for_each_entry(tmp, &bus->children, node) {
+ child = pci_do_find_bus(tmp, busnr);
+ if (child)
return child;
}
return NULL;
@@ -75,7 +162,7 @@ static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr)
* in the global list of PCI buses. If the bus is found, a pointer to its
* data structure is returned. If no bus is found, %NULL is returned.
*/
-struct pci_bus * pci_find_bus(int domain, int busnr)
+struct pci_bus *pci_find_bus(int domain, int busnr)
{
struct pci_bus *bus = NULL;
struct pci_bus *tmp_bus;
@@ -89,18 +176,18 @@ struct pci_bus * pci_find_bus(int domain, int busnr)
}
return NULL;
}
+EXPORT_SYMBOL(pci_find_bus);
/**
* pci_find_next_bus - begin or continue searching for a PCI bus
* @from: Previous PCI bus found, or %NULL for new search.
*
- * Iterates through the list of known PCI busses. A new search is
+ * Iterates through the list of known PCI buses. A new search is
* initiated by passing %NULL as the @from argument. Otherwise if
* @from is not %NULL, searches continue from next device on the
* global list.
*/
-struct pci_bus *
-pci_find_next_bus(const struct pci_bus *from)
+struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
{
struct list_head *n;
struct pci_bus *b = NULL;
@@ -109,35 +196,34 @@ pci_find_next_bus(const struct pci_bus *from)
down_read(&pci_bus_sem);
n = from ? from->node.next : pci_root_buses.next;
if (n != &pci_root_buses)
- b = pci_bus_b(n);
+ b = list_entry(n, struct pci_bus, node);
up_read(&pci_bus_sem);
return b;
}
+EXPORT_SYMBOL(pci_find_next_bus);
/**
* pci_get_slot - locate PCI device for a given PCI slot
* @bus: PCI bus on which desired PCI device resides
- * @devfn: encodes number of PCI slot in which the desired PCI
- * device resides and the logical device number within that slot
+ * @devfn: encodes number of PCI slot in which the desired PCI
+ * device resides and the logical device number within that slot
* in case of multi-function devices.
*
- * Given a PCI bus and slot/function number, the desired PCI device
+ * Given a PCI bus and slot/function number, the desired PCI device
* is located in the list of PCI devices.
* If the device is found, its reference count is increased and this
* function returns a pointer to its data structure. The caller must
* decrement the reference count by calling pci_dev_put().
* If no device is found, %NULL is returned.
*/
-struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn)
+struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn)
{
- struct list_head *tmp;
struct pci_dev *dev;
WARN_ON(in_interrupt());
down_read(&pci_bus_sem);
- list_for_each(tmp, &bus->devices) {
- dev = pci_dev_b(tmp);
+ list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->devfn == devfn)
goto out;
}
@@ -148,6 +234,7 @@ struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn)
up_read(&pci_bus_sem);
return dev;
}
+EXPORT_SYMBOL(pci_get_slot);
/**
* pci_get_domain_bus_and_slot - locate PCI device for a given PCI domain (segment), bus, and slot
@@ -243,31 +330,16 @@ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
unsigned int ss_vendor, unsigned int ss_device,
struct pci_dev *from)
{
- struct pci_dev *pdev;
- struct pci_device_id *id;
-
- /*
- * pci_find_subsys() can be called on the ide_setup() path,
- * super-early in boot. But the down_read() will enable local
- * interrupts, which can cause some machines to crash. So here we
- * detect and flag that situation and bail out early.
- */
- if (unlikely(no_pci_devices()))
- return NULL;
-
- id = kzalloc(sizeof(*id), GFP_KERNEL);
- if (!id)
- return NULL;
- id->vendor = vendor;
- id->device = device;
- id->subvendor = ss_vendor;
- id->subdevice = ss_device;
-
- pdev = pci_get_dev_by_id(id, from);
- kfree(id);
-
- return pdev;
+ struct pci_device_id id = {
+ .vendor = vendor,
+ .device = device,
+ .subvendor = ss_vendor,
+ .subdevice = ss_device,
+ };
+
+ return pci_get_dev_by_id(&id, from);
}
+EXPORT_SYMBOL(pci_get_subsys);
/**
* pci_get_device - begin or continue searching for a PCI device by vendor/device id
@@ -283,11 +355,12 @@ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
* from next device on the global list. The reference count for @from is
* always decremented if it is not %NULL.
*/
-struct pci_dev *
-pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from)
+struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
+ struct pci_dev *from)
{
return pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
}
+EXPORT_SYMBOL(pci_get_device);
/**
* pci_get_class - begin or continue searching for a PCI device by class
@@ -305,20 +378,18 @@ pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from)
*/
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from)
{
- struct pci_dev *dev;
- struct pci_device_id *id;
-
- id = kzalloc(sizeof(*id), GFP_KERNEL);
- if (!id)
- return NULL;
- id->vendor = id->device = id->subvendor = id->subdevice = PCI_ANY_ID;
- id->class_mask = PCI_ANY_ID;
- id->class = class;
-
- dev = pci_get_dev_by_id(id, from);
- kfree(id);
- return dev;
+ struct pci_device_id id = {
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class_mask = PCI_ANY_ID,
+ .class = class,
+ };
+
+ return pci_get_dev_by_id(&id, from);
}
+EXPORT_SYMBOL(pci_get_class);
/**
* pci_dev_present - Returns 1 if device matching the device list is present, 0 if not.
@@ -338,22 +409,13 @@ int pci_dev_present(const struct pci_device_id *ids)
WARN_ON(in_interrupt());
while (ids->vendor || ids->subvendor || ids->class_mask) {
found = pci_get_dev_by_id(ids, NULL);
- if (found)
- goto exit;
+ if (found) {
+ pci_dev_put(found);
+ return 1;
+ }
ids++;
}
-exit:
- if (found)
- return 1;
+
return 0;
}
EXPORT_SYMBOL(pci_dev_present);
-
-/* For boot time work */
-EXPORT_SYMBOL(pci_find_bus);
-EXPORT_SYMBOL(pci_find_next_bus);
-/* For everyone */
-EXPORT_SYMBOL(pci_get_device);
-EXPORT_SYMBOL(pci_get_subsys);
-EXPORT_SYMBOL(pci_get_slot);
-EXPORT_SYMBOL(pci_get_class);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 66cb8f4cc5f..a5a63ecfb62 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -25,54 +25,153 @@
#include <linux/ioport.h>
#include <linux/cache.h>
#include <linux/slab.h>
+#include <asm-generic/pci-bridge.h>
#include "pci.h"
-struct resource_list_x {
- struct resource_list_x *next;
+unsigned int pci_flags;
+
+struct pci_dev_resource {
+ struct list_head list;
struct resource *res;
struct pci_dev *dev;
resource_size_t start;
resource_size_t end;
+ resource_size_t add_size;
+ resource_size_t min_align;
unsigned long flags;
};
-static void add_to_failed_list(struct resource_list_x *head,
- struct pci_dev *dev, struct resource *res)
+static void free_list(struct list_head *head)
{
- struct resource_list_x *list = head;
- struct resource_list_x *ln = list->next;
- struct resource_list_x *tmp;
+ struct pci_dev_resource *dev_res, *tmp;
+
+ list_for_each_entry_safe(dev_res, tmp, head, list) {
+ list_del(&dev_res->list);
+ kfree(dev_res);
+ }
+}
- tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+/**
+ * add_to_list() - add a new resource tracker to the list
+ * @head: Head of the list
+ * @dev: device corresponding to which the resource
+ * belongs
+ * @res: The resource to be tracked
+ * @add_size: additional size to be optionally added
+ * to the resource
+ */
+static int add_to_list(struct list_head *head,
+ struct pci_dev *dev, struct resource *res,
+ resource_size_t add_size, resource_size_t min_align)
+{
+ struct pci_dev_resource *tmp;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp) {
- pr_warning("add_to_failed_list: kmalloc() failed!\n");
- return;
+ pr_warn("add_to_list: kmalloc() failed!\n");
+ return -ENOMEM;
}
- tmp->next = ln;
tmp->res = res;
tmp->dev = dev;
tmp->start = res->start;
tmp->end = res->end;
tmp->flags = res->flags;
- list->next = tmp;
+ tmp->add_size = add_size;
+ tmp->min_align = min_align;
+
+ list_add(&tmp->list, head);
+
+ return 0;
+}
+
+static void remove_from_list(struct list_head *head,
+ struct resource *res)
+{
+ struct pci_dev_resource *dev_res, *tmp;
+
+ list_for_each_entry_safe(dev_res, tmp, head, list) {
+ if (dev_res->res == res) {
+ list_del(&dev_res->list);
+ kfree(dev_res);
+ break;
+ }
+ }
}
-static void free_failed_list(struct resource_list_x *head)
+static resource_size_t get_res_add_size(struct list_head *head,
+ struct resource *res)
{
- struct resource_list_x *list, *tmp;
+ struct pci_dev_resource *dev_res;
- for (list = head->next; list;) {
- tmp = list;
- list = list->next;
- kfree(tmp);
+ list_for_each_entry(dev_res, head, list) {
+ if (dev_res->res == res) {
+ int idx = res - &dev_res->dev->resource[0];
+
+ dev_printk(KERN_DEBUG, &dev_res->dev->dev,
+ "res[%d]=%pR get_res_add_size add_size %llx\n",
+ idx, dev_res->res,
+ (unsigned long long)dev_res->add_size);
+
+ return dev_res->add_size;
+ }
}
- head->next = NULL;
+ return 0;
+}
+
+/* Sort resources by alignment */
+static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
+{
+ int i;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *r;
+ struct pci_dev_resource *dev_res, *tmp;
+ resource_size_t r_align;
+ struct list_head *n;
+
+ r = &dev->resource[i];
+
+ if (r->flags & IORESOURCE_PCI_FIXED)
+ continue;
+
+ if (!(r->flags) || r->parent)
+ continue;
+
+ r_align = pci_resource_alignment(dev, r);
+ if (!r_align) {
+ dev_warn(&dev->dev, "BAR %d: %pR has bogus alignment\n",
+ i, r);
+ continue;
+ }
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ panic("pdev_sort_resources(): kmalloc() failed!\n");
+ tmp->res = r;
+ tmp->dev = dev;
+
+ /* fallback is smallest one or list is empty*/
+ n = head;
+ list_for_each_entry(dev_res, head, list) {
+ resource_size_t align;
+
+ align = pci_resource_alignment(dev_res->dev,
+ dev_res->res);
+
+ if (r_align > align) {
+ n = &dev_res->list;
+ break;
+ }
+ }
+ /* Insert it just before n*/
+ list_add_tail(&tmp->list, n);
+ }
}
static void __dev_sort_resources(struct pci_dev *dev,
- struct resource_list *head)
+ struct list_head *head)
{
u16 class = dev->class >> 8;
@@ -91,59 +190,278 @@ static void __dev_sort_resources(struct pci_dev *dev,
pdev_sort_resources(dev, head);
}
-static void __assign_resources_sorted(struct resource_list *head,
- struct resource_list_x *fail_head)
+static inline void reset_resource(struct resource *res)
+{
+ res->start = 0;
+ res->end = 0;
+ res->flags = 0;
+}
+
+/**
+ * reassign_resources_sorted() - satisfy any additional resource requests
+ *
+ * @realloc_head : head of the list tracking requests requiring additional
+ * resources
+ * @head : head of the list tracking requests with allocated
+ * resources
+ *
+ * Walk through each element of the realloc_head and try to procure
+ * additional resources for the element, provided the element
+ * is in the head list.
+ */
+static void reassign_resources_sorted(struct list_head *realloc_head,
+ struct list_head *head)
{
struct resource *res;
- struct resource_list *list, *tmp;
+ struct pci_dev_resource *add_res, *tmp;
+ struct pci_dev_resource *dev_res;
+ resource_size_t add_size;
int idx;
- for (list = head->next; list;) {
- res = list->res;
- idx = res - &list->dev->resource[0];
+ list_for_each_entry_safe(add_res, tmp, realloc_head, list) {
+ bool found_match = false;
+
+ res = add_res->res;
+ /* skip resource that has been reset */
+ if (!res->flags)
+ goto out;
+
+ /* skip this resource if not found in head list */
+ list_for_each_entry(dev_res, head, list) {
+ if (dev_res->res == res) {
+ found_match = true;
+ break;
+ }
+ }
+ if (!found_match)/* just skip */
+ continue;
+
+ idx = res - &add_res->dev->resource[0];
+ add_size = add_res->add_size;
+ if (!resource_size(res)) {
+ res->start = add_res->start;
+ res->end = res->start + add_size - 1;
+ if (pci_assign_resource(add_res->dev, idx))
+ reset_resource(res);
+ } else {
+ resource_size_t align = add_res->min_align;
+ res->flags |= add_res->flags &
+ (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
+ if (pci_reassign_resource(add_res->dev, idx,
+ add_size, align))
+ dev_printk(KERN_DEBUG, &add_res->dev->dev,
+ "failed to add %llx res[%d]=%pR\n",
+ (unsigned long long)add_size,
+ idx, res);
+ }
+out:
+ list_del(&add_res->list);
+ kfree(add_res);
+ }
+}
+
+/**
+ * assign_requested_resources_sorted() - satisfy resource requests
+ *
+ * @head : head of the list tracking requests for resources
+ * @fail_head : head of the list tracking requests that could
+ * not be allocated
+ *
+ * Satisfy resource requests of each element in the list. Add
+ * requests that could not satisfied to the failed_list.
+ */
+static void assign_requested_resources_sorted(struct list_head *head,
+ struct list_head *fail_head)
+{
+ struct resource *res;
+ struct pci_dev_resource *dev_res;
+ int idx;
- if (pci_assign_resource(list->dev, idx)) {
- if (fail_head && !pci_is_root_bus(list->dev->bus)) {
+ list_for_each_entry(dev_res, head, list) {
+ res = dev_res->res;
+ idx = res - &dev_res->dev->resource[0];
+ if (resource_size(res) &&
+ pci_assign_resource(dev_res->dev, idx)) {
+ if (fail_head) {
/*
* if the failed res is for ROM BAR, and it will
* be enabled later, don't add it to the list
*/
if (!((idx == PCI_ROM_RESOURCE) &&
(!(res->flags & IORESOURCE_ROM_ENABLE))))
- add_to_failed_list(fail_head, list->dev, res);
+ add_to_list(fail_head,
+ dev_res->dev, res,
+ 0 /* don't care */,
+ 0 /* don't care */);
}
- res->start = 0;
- res->end = 0;
- res->flags = 0;
+ reset_resource(res);
+ }
+ }
+}
+
+static unsigned long pci_fail_res_type_mask(struct list_head *fail_head)
+{
+ struct pci_dev_resource *fail_res;
+ unsigned long mask = 0;
+
+ /* check failed type */
+ list_for_each_entry(fail_res, fail_head, list)
+ mask |= fail_res->flags;
+
+ /*
+ * one pref failed resource will set IORESOURCE_MEM,
+ * as we can allocate pref in non-pref range.
+ * Will release all assigned non-pref sibling resources
+ * according to that bit.
+ */
+ return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH);
+}
+
+static bool pci_need_to_release(unsigned long mask, struct resource *res)
+{
+ if (res->flags & IORESOURCE_IO)
+ return !!(mask & IORESOURCE_IO);
+
+ /* check pref at first */
+ if (res->flags & IORESOURCE_PREFETCH) {
+ if (mask & IORESOURCE_PREFETCH)
+ return true;
+ /* count pref if its parent is non-pref */
+ else if ((mask & IORESOURCE_MEM) &&
+ !(res->parent->flags & IORESOURCE_PREFETCH))
+ return true;
+ else
+ return false;
+ }
+
+ if (res->flags & IORESOURCE_MEM)
+ return !!(mask & IORESOURCE_MEM);
+
+ return false; /* should not get here */
+}
+
+static void __assign_resources_sorted(struct list_head *head,
+ struct list_head *realloc_head,
+ struct list_head *fail_head)
+{
+ /*
+ * Should not assign requested resources at first.
+ * they could be adjacent, so later reassign can not reallocate
+ * them one by one in parent resource window.
+ * Try to assign requested + add_size at beginning
+ * if could do that, could get out early.
+ * if could not do that, we still try to assign requested at first,
+ * then try to reassign add_size for some resources.
+ *
+ * Separate three resource type checking if we need to release
+ * assigned resource after requested + add_size try.
+ * 1. if there is io port assign fail, will release assigned
+ * io port.
+ * 2. if there is pref mmio assign fail, release assigned
+ * pref mmio.
+ * if assigned pref mmio's parent is non-pref mmio and there
+ * is non-pref mmio assign fail, will release that assigned
+ * pref mmio.
+ * 3. if there is non-pref mmio assign fail or pref mmio
+ * assigned fail, will release assigned non-pref mmio.
+ */
+ LIST_HEAD(save_head);
+ LIST_HEAD(local_fail_head);
+ struct pci_dev_resource *save_res;
+ struct pci_dev_resource *dev_res, *tmp_res;
+ unsigned long fail_type;
+
+ /* Check if optional add_size is there */
+ if (!realloc_head || list_empty(realloc_head))
+ goto requested_and_reassign;
+
+ /* Save original start, end, flags etc at first */
+ list_for_each_entry(dev_res, head, list) {
+ if (add_to_list(&save_head, dev_res->dev, dev_res->res, 0, 0)) {
+ free_list(&save_head);
+ goto requested_and_reassign;
}
- tmp = list;
- list = list->next;
- kfree(tmp);
}
+
+ /* Update res in head list with add_size in realloc_head list */
+ list_for_each_entry(dev_res, head, list)
+ dev_res->res->end += get_res_add_size(realloc_head,
+ dev_res->res);
+
+ /* Try updated head list with add_size added */
+ assign_requested_resources_sorted(head, &local_fail_head);
+
+ /* all assigned with add_size ? */
+ if (list_empty(&local_fail_head)) {
+ /* Remove head list from realloc_head list */
+ list_for_each_entry(dev_res, head, list)
+ remove_from_list(realloc_head, dev_res->res);
+ free_list(&save_head);
+ free_list(head);
+ return;
+ }
+
+ /* check failed type */
+ fail_type = pci_fail_res_type_mask(&local_fail_head);
+ /* remove not need to be released assigned res from head list etc */
+ list_for_each_entry_safe(dev_res, tmp_res, head, list)
+ if (dev_res->res->parent &&
+ !pci_need_to_release(fail_type, dev_res->res)) {
+ /* remove it from realloc_head list */
+ remove_from_list(realloc_head, dev_res->res);
+ remove_from_list(&save_head, dev_res->res);
+ list_del(&dev_res->list);
+ kfree(dev_res);
+ }
+
+ free_list(&local_fail_head);
+ /* Release assigned resource */
+ list_for_each_entry(dev_res, head, list)
+ if (dev_res->res->parent)
+ release_resource(dev_res->res);
+ /* Restore start/end/flags from saved list */
+ list_for_each_entry(save_res, &save_head, list) {
+ struct resource *res = save_res->res;
+
+ res->start = save_res->start;
+ res->end = save_res->end;
+ res->flags = save_res->flags;
+ }
+ free_list(&save_head);
+
+requested_and_reassign:
+ /* Satisfy the must-have resource requests */
+ assign_requested_resources_sorted(head, fail_head);
+
+ /* Try to satisfy any additional optional resource
+ requests */
+ if (realloc_head)
+ reassign_resources_sorted(realloc_head, head);
+ free_list(head);
}
static void pdev_assign_resources_sorted(struct pci_dev *dev,
- struct resource_list_x *fail_head)
+ struct list_head *add_head,
+ struct list_head *fail_head)
{
- struct resource_list head;
+ LIST_HEAD(head);
- head.next = NULL;
__dev_sort_resources(dev, &head);
- __assign_resources_sorted(&head, fail_head);
+ __assign_resources_sorted(&head, add_head, fail_head);
}
static void pbus_assign_resources_sorted(const struct pci_bus *bus,
- struct resource_list_x *fail_head)
+ struct list_head *realloc_head,
+ struct list_head *fail_head)
{
struct pci_dev *dev;
- struct resource_list head;
+ LIST_HEAD(head);
- head.next = NULL;
list_for_each_entry(dev, &bus->devices, bus_list)
__dev_sort_resources(dev, &head);
- __assign_resources_sorted(&head, fail_head);
+ __assign_resources_sorted(&head, realloc_head, fail_head);
}
void pci_setup_cardbus(struct pci_bus *bus)
@@ -152,11 +470,11 @@ void pci_setup_cardbus(struct pci_bus *bus)
struct resource *res;
struct pci_bus_region region;
- dev_info(&bridge->dev, "CardBus bridge to [bus %02x-%02x]\n",
- bus->secondary, bus->subordinate);
+ dev_info(&bridge->dev, "CardBus bridge to %pR\n",
+ &bus->busn_res);
res = bus->resource[0];
- pcibios_resource_to_bus(bridge, &region, res);
+ pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_IO) {
/*
* The IO resource is allocated a range twice as large as it
@@ -170,7 +488,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
}
res = bus->resource[1];
- pcibios_resource_to_bus(bridge, &region, res);
+ pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_IO) {
dev_info(&bridge->dev, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
@@ -180,7 +498,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
}
res = bus->resource[2];
- pcibios_resource_to_bus(bridge, &region, res);
+ pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_MEM) {
dev_info(&bridge->dev, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
@@ -190,7 +508,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
}
res = bus->resource[3];
- pcibios_resource_to_bus(bridge, &region, res);
+ pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_MEM) {
dev_info(&bridge->dev, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
@@ -217,16 +535,23 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
struct pci_dev *bridge = bus->self;
struct resource *res;
struct pci_bus_region region;
- u32 l, io_upper16;
+ unsigned long io_mask;
+ u8 io_base_lo, io_limit_lo;
+ u16 l;
+ u32 io_upper16;
+
+ io_mask = PCI_IO_RANGE_MASK;
+ if (bridge->io_window_1k)
+ io_mask = PCI_IO_1K_RANGE_MASK;
/* Set up the top and bottom of the PCI I/O segment for this bus. */
res = bus->resource[0];
- pcibios_resource_to_bus(bridge, &region, res);
+ pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_IO) {
- pci_read_config_dword(bridge, PCI_IO_BASE, &l);
- l &= 0xffff0000;
- l |= (region.start >> 8) & 0x00f0;
- l |= region.end & 0xf000;
+ pci_read_config_word(bridge, PCI_IO_BASE, &l);
+ io_base_lo = (region.start >> 8) & io_mask;
+ io_limit_lo = (region.end >> 8) & io_mask;
+ l = ((u16) io_limit_lo << 8) | io_base_lo;
/* Set up upper 16 bits of I/O base/limit. */
io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
dev_info(&bridge->dev, " bridge window %pR\n", res);
@@ -234,12 +559,11 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
/* Clear upper 16 bits of I/O base/limit. */
io_upper16 = 0;
l = 0x00f0;
- dev_info(&bridge->dev, " bridge window [io disabled]\n");
}
/* Temporarily disable the I/O range before updating PCI_IO_BASE. */
pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
/* Update lower 16 bits of I/O base/limit. */
- pci_write_config_dword(bridge, PCI_IO_BASE, l);
+ pci_write_config_word(bridge, PCI_IO_BASE, l);
/* Update upper 16 bits of I/O base/limit. */
pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
}
@@ -253,14 +577,13 @@ static void pci_setup_bridge_mmio(struct pci_bus *bus)
/* Set up the top and bottom of the PCI Memory segment for this bus. */
res = bus->resource[1];
- pcibios_resource_to_bus(bridge, &region, res);
+ pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_MEM) {
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
dev_info(&bridge->dev, " bridge window %pR\n", res);
} else {
l = 0x0000fff0;
- dev_info(&bridge->dev, " bridge window [mem disabled]\n");
}
pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
}
@@ -280,7 +603,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
/* Set up PREF base/limit. */
bu = lu = 0;
res = bus->resource[2];
- pcibios_resource_to_bus(bridge, &region, res);
+ pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_PREFETCH) {
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
@@ -291,7 +614,6 @@ static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
dev_info(&bridge->dev, " bridge window %pR\n", res);
} else {
l = 0x0000fff0;
- dev_info(&bridge->dev, " bridge window [mem pref disabled]\n");
}
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
@@ -304,8 +626,8 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
{
struct pci_dev *bridge = bus->self;
- dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n",
- bus->secondary, bus->subordinate);
+ dev_info(&bridge->dev, "PCI bridge to %pR\n",
+ &bus->busn_res);
if (type & IORESOURCE_IO)
pci_setup_bridge_io(bus);
@@ -319,7 +641,7 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
-static void pci_setup_bridge(struct pci_bus *bus)
+void pci_setup_bridge(struct pci_bus *bus)
{
unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
@@ -342,21 +664,23 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
pci_read_config_word(bridge, PCI_IO_BASE, &io);
if (!io) {
- pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0);
+ pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0);
pci_read_config_word(bridge, PCI_IO_BASE, &io);
- pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
- }
- if (io)
+ pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
+ }
+ if (io)
b_res[0].flags |= IORESOURCE_IO;
+
/* DECchip 21050 pass 2 errata: the bridge may miss an address
disconnect boundary by one PCI data phase.
Workaround: do not use prefetching on this device. */
if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
return;
+
pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
if (!pmem) {
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
- 0xfff0fff0);
+ 0xffe0fff0);
pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
}
@@ -388,12 +712,11 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
bus resource of a given type. Note: we intentionally skip
the bus resources which have already been assigned (that is,
have non-NULL parent resource). */
-static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned long type)
+static struct resource *find_free_bus_resource(struct pci_bus *bus,
+ unsigned long type_mask, unsigned long type)
{
int i;
struct resource *r;
- unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
- IORESOURCE_PREFETCH;
pci_bus_for_each_resource(bus, r, i) {
if (r == &ioport_resource || r == &iomem_resource)
@@ -404,19 +727,102 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned lon
return NULL;
}
-/* Sizing the IO windows of the PCI-PCI bridge is trivial,
- since these windows have 4K granularity and the IO ranges
- of non-bridge PCI devices are limited to 256 bytes.
- We must be careful with the ISA aliasing though. */
-static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size)
+static resource_size_t calculate_iosize(resource_size_t size,
+ resource_size_t min_size,
+ resource_size_t size1,
+ resource_size_t old_size,
+ resource_size_t align)
+{
+ if (size < min_size)
+ size = min_size;
+ if (old_size == 1)
+ old_size = 0;
+ /* To be fixed in 2.5: we should have sort of HAVE_ISA
+ flag in the struct pci_bus. */
+#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
+ size = (size & 0xff) + ((size & ~0xffUL) << 2);
+#endif
+ size = ALIGN(size + size1, align);
+ if (size < old_size)
+ size = old_size;
+ return size;
+}
+
+static resource_size_t calculate_memsize(resource_size_t size,
+ resource_size_t min_size,
+ resource_size_t size1,
+ resource_size_t old_size,
+ resource_size_t align)
+{
+ if (size < min_size)
+ size = min_size;
+ if (old_size == 1)
+ old_size = 0;
+ if (size < old_size)
+ size = old_size;
+ size = ALIGN(size + size1, align);
+ return size;
+}
+
+resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
+ unsigned long type)
+{
+ return 1;
+}
+
+#define PCI_P2P_DEFAULT_MEM_ALIGN 0x100000 /* 1MiB */
+#define PCI_P2P_DEFAULT_IO_ALIGN 0x1000 /* 4KiB */
+#define PCI_P2P_DEFAULT_IO_ALIGN_1K 0x400 /* 1KiB */
+
+static resource_size_t window_alignment(struct pci_bus *bus,
+ unsigned long type)
+{
+ resource_size_t align = 1, arch_align;
+
+ if (type & IORESOURCE_MEM)
+ align = PCI_P2P_DEFAULT_MEM_ALIGN;
+ else if (type & IORESOURCE_IO) {
+ /*
+ * Per spec, I/O windows are 4K-aligned, but some
+ * bridges have an extension to support 1K alignment.
+ */
+ if (bus->self->io_window_1k)
+ align = PCI_P2P_DEFAULT_IO_ALIGN_1K;
+ else
+ align = PCI_P2P_DEFAULT_IO_ALIGN;
+ }
+
+ arch_align = pcibios_window_alignment(bus, type);
+ return max(align, arch_align);
+}
+
+/**
+ * pbus_size_io() - size the io window of a given bus
+ *
+ * @bus : the bus
+ * @min_size : the minimum io window that must to be allocated
+ * @add_size : additional optional io window
+ * @realloc_head : track the additional io window on this list
+ *
+ * Sizing the IO windows of the PCI-PCI bridge is trivial,
+ * since these windows have 1K or 4K granularity and the IO ranges
+ * of non-bridge PCI devices are limited to 256 bytes.
+ * We must be careful with the ISA aliasing though.
+ */
+static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
+ resource_size_t add_size, struct list_head *realloc_head)
{
struct pci_dev *dev;
- struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
- unsigned long size = 0, size1 = 0, old_size;
+ struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO,
+ IORESOURCE_IO);
+ resource_size_t size = 0, size0 = 0, size1 = 0;
+ resource_size_t children_add_size = 0;
+ resource_size_t min_align, align;
if (!b_res)
- return;
+ return;
+ min_align = window_alignment(bus, IORESOURCE_IO);
list_for_each_entry(dev, &bus->devices, bus_list) {
int i;
@@ -433,57 +839,105 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size)
size += r_size;
else
size1 += r_size;
+
+ align = pci_resource_alignment(dev, r);
+ if (align > min_align)
+ min_align = align;
+
+ if (realloc_head)
+ children_add_size += get_res_add_size(realloc_head, r);
}
}
- if (size < min_size)
- size = min_size;
- old_size = resource_size(b_res);
- if (old_size == 1)
- old_size = 0;
-/* To be fixed in 2.5: we should have sort of HAVE_ISA
- flag in the struct pci_bus. */
-#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
- size = (size & 0xff) + ((size & ~0xffUL) << 2);
-#endif
- size = ALIGN(size + size1, 4096);
- if (size < old_size)
- size = old_size;
- if (!size) {
+
+ size0 = calculate_iosize(size, min_size, size1,
+ resource_size(b_res), min_align);
+ if (children_add_size > add_size)
+ add_size = children_add_size;
+ size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
+ calculate_iosize(size, min_size, add_size + size1,
+ resource_size(b_res), min_align);
+ if (!size0 && !size1) {
if (b_res->start || b_res->end)
- dev_info(&bus->self->dev, "disabling bridge window "
- "%pR to [bus %02x-%02x] (unused)\n", b_res,
- bus->secondary, bus->subordinate);
+ dev_info(&bus->self->dev, "disabling bridge window %pR to %pR (unused)\n",
+ b_res, &bus->busn_res);
b_res->flags = 0;
return;
}
- /* Alignment of the IO window is always 4K */
- b_res->start = 4096;
- b_res->end = b_res->start + size - 1;
+
+ b_res->start = min_align;
+ b_res->end = b_res->start + size0 - 1;
b_res->flags |= IORESOURCE_STARTALIGN;
+ if (size1 > size0 && realloc_head) {
+ add_to_list(realloc_head, bus->self, b_res, size1-size0,
+ min_align);
+ dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window %pR to %pR add_size %llx\n",
+ b_res, &bus->busn_res,
+ (unsigned long long)size1-size0);
+ }
+}
+
+static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
+ int max_order)
+{
+ resource_size_t align = 0;
+ resource_size_t min_align = 0;
+ int order;
+
+ for (order = 0; order <= max_order; order++) {
+ resource_size_t align1 = 1;
+
+ align1 <<= (order + 20);
+
+ if (!align)
+ min_align = align1;
+ else if (ALIGN(align + min_align, min_align) < align1)
+ min_align = align1 >> 1;
+ align += aligns[order];
+ }
+
+ return min_align;
}
-/* Calculate the size of the bus and minimal alignment which
- guarantees that all child resources fit in this size. */
+/**
+ * pbus_size_mem() - size the memory window of a given bus
+ *
+ * @bus : the bus
+ * @mask: mask the resource flag, then compare it with type
+ * @type: the type of free resource from bridge
+ * @type2: second match type
+ * @type3: third match type
+ * @min_size : the minimum memory window that must to be allocated
+ * @add_size : additional optional memory window
+ * @realloc_head : track the additional memory window on this list
+ *
+ * Calculate the size of the bus and minimal alignment which
+ * guarantees that all child resources fit in this size.
+ *
+ * Returns -ENOSPC if there's no available bus resource of the desired type.
+ * Otherwise, sets the bus resource start/end to indicate the required
+ * size, adds things to realloc_head (if supplied), and returns 0.
+ */
static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
- unsigned long type, resource_size_t min_size)
+ unsigned long type, unsigned long type2,
+ unsigned long type3,
+ resource_size_t min_size, resource_size_t add_size,
+ struct list_head *realloc_head)
{
struct pci_dev *dev;
- resource_size_t min_align, align, size, old_size;
- resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */
+ resource_size_t min_align, align, size, size0, size1;
+ resource_size_t aligns[14]; /* Alignments from 1Mb to 8Gb */
int order, max_order;
- struct resource *b_res = find_free_bus_resource(bus, type);
- unsigned int mem64_mask = 0;
+ struct resource *b_res = find_free_bus_resource(bus,
+ mask | IORESOURCE_PREFETCH, type);
+ resource_size_t children_add_size = 0;
if (!b_res)
- return 0;
+ return -ENOSPC;
memset(aligns, 0, sizeof(aligns));
max_order = 0;
size = 0;
- mem64_mask = b_res->flags & IORESOURCE_MEM_64;
- b_res->flags &= ~IORESOURCE_MEM_64;
-
list_for_each_entry(dev, &bus->devices, bus_list) {
int i;
@@ -491,85 +945,129 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
struct resource *r = &dev->resource[i];
resource_size_t r_size;
- if (r->parent || (r->flags & mask) != type)
+ if (r->parent || ((r->flags & mask) != type &&
+ (r->flags & mask) != type2 &&
+ (r->flags & mask) != type3))
continue;
r_size = resource_size(r);
- /* For bridges size != alignment */
+#ifdef CONFIG_PCI_IOV
+ /* put SRIOV requested res to the optional list */
+ if (realloc_head && i >= PCI_IOV_RESOURCES &&
+ i <= PCI_IOV_RESOURCE_END) {
+ r->end = r->start - 1;
+ add_to_list(realloc_head, dev, r, r_size, 0/* don't care */);
+ children_add_size += r_size;
+ continue;
+ }
+#endif
+ /*
+ * aligns[0] is for 1MB (since bridge memory
+ * windows are always at least 1MB aligned), so
+ * keep "order" from being negative for smaller
+ * resources.
+ */
align = pci_resource_alignment(dev, r);
order = __ffs(align) - 20;
- if (order > 11) {
- dev_warn(&dev->dev, "disabling BAR %d: %pR "
- "(bad alignment %#llx)\n", i, r,
- (unsigned long long) align);
+ if (order < 0)
+ order = 0;
+ if (order >= ARRAY_SIZE(aligns)) {
+ dev_warn(&dev->dev, "disabling BAR %d: %pR (bad alignment %#llx)\n",
+ i, r, (unsigned long long) align);
r->flags = 0;
continue;
}
size += r_size;
- if (order < 0)
- order = 0;
/* Exclude ranges with size > align from
calculation of the alignment. */
if (r_size == align)
aligns[order] += align;
if (order > max_order)
max_order = order;
- mem64_mask &= r->flags & IORESOURCE_MEM_64;
+
+ if (realloc_head)
+ children_add_size += get_res_add_size(realloc_head, r);
}
}
- if (size < min_size)
- size = min_size;
- old_size = resource_size(b_res);
- if (old_size == 1)
- old_size = 0;
- if (size < old_size)
- size = old_size;
- align = 0;
- min_align = 0;
- for (order = 0; order <= max_order; order++) {
- resource_size_t align1 = 1;
-
- align1 <<= (order + 20);
-
- if (!align)
- min_align = align1;
- else if (ALIGN(align + min_align, min_align) < align1)
- min_align = align1 >> 1;
- align += aligns[order];
- }
- size = ALIGN(size, min_align);
- if (!size) {
+ min_align = calculate_mem_align(aligns, max_order);
+ min_align = max(min_align, window_alignment(bus, b_res->flags));
+ size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
+ if (children_add_size > add_size)
+ add_size = children_add_size;
+ size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
+ calculate_memsize(size, min_size, add_size,
+ resource_size(b_res), min_align);
+ if (!size0 && !size1) {
if (b_res->start || b_res->end)
- dev_info(&bus->self->dev, "disabling bridge window "
- "%pR to [bus %02x-%02x] (unused)\n", b_res,
- bus->secondary, bus->subordinate);
+ dev_info(&bus->self->dev, "disabling bridge window %pR to %pR (unused)\n",
+ b_res, &bus->busn_res);
b_res->flags = 0;
- return 1;
+ return 0;
}
b_res->start = min_align;
- b_res->end = size + min_align - 1;
+ b_res->end = size0 + min_align - 1;
b_res->flags |= IORESOURCE_STARTALIGN;
- b_res->flags |= mem64_mask;
- return 1;
+ if (size1 > size0 && realloc_head) {
+ add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align);
+ dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window %pR to %pR add_size %llx\n",
+ b_res, &bus->busn_res,
+ (unsigned long long)size1-size0);
+ }
+ return 0;
+}
+
+unsigned long pci_cardbus_resource_alignment(struct resource *res)
+{
+ if (res->flags & IORESOURCE_IO)
+ return pci_cardbus_io_size;
+ if (res->flags & IORESOURCE_MEM)
+ return pci_cardbus_mem_size;
+ return 0;
}
-static void pci_bus_size_cardbus(struct pci_bus *bus)
+static void pci_bus_size_cardbus(struct pci_bus *bus,
+ struct list_head *realloc_head)
{
struct pci_dev *bridge = bus->self;
struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
+ resource_size_t b_res_3_size = pci_cardbus_mem_size * 2;
u16 ctrl;
+ if (b_res[0].parent)
+ goto handle_b_res_1;
/*
* Reserve some resources for CardBus. We reserve
* a fixed amount of bus space for CardBus bridges.
*/
- b_res[0].start = 0;
- b_res[0].end = pci_cardbus_io_size - 1;
- b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
+ b_res[0].start = pci_cardbus_io_size;
+ b_res[0].end = b_res[0].start + pci_cardbus_io_size - 1;
+ b_res[0].flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
+ if (realloc_head) {
+ b_res[0].end -= pci_cardbus_io_size;
+ add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size,
+ pci_cardbus_io_size);
+ }
- b_res[1].start = 0;
- b_res[1].end = pci_cardbus_io_size - 1;
- b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
+handle_b_res_1:
+ if (b_res[1].parent)
+ goto handle_b_res_2;
+ b_res[1].start = pci_cardbus_io_size;
+ b_res[1].end = b_res[1].start + pci_cardbus_io_size - 1;
+ b_res[1].flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
+ if (realloc_head) {
+ b_res[1].end -= pci_cardbus_io_size;
+ add_to_list(realloc_head, bridge, b_res+1, pci_cardbus_io_size,
+ pci_cardbus_io_size);
+ }
+
+handle_b_res_2:
+ /* MEM1 must not be pref mmio */
+ pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
+ if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM1) {
+ ctrl &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1;
+ pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
+ pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
+ }
/*
* Check whether prefetchable memory is supported
@@ -582,31 +1080,51 @@ static void pci_bus_size_cardbus(struct pci_bus *bus)
pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
}
+ if (b_res[2].parent)
+ goto handle_b_res_3;
/*
* If we have prefetchable memory support, allocate
* two regions. Otherwise, allocate one region of
* twice the size.
*/
if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
- b_res[2].start = 0;
- b_res[2].end = pci_cardbus_mem_size - 1;
- b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN;
+ b_res[2].start = pci_cardbus_mem_size;
+ b_res[2].end = b_res[2].start + pci_cardbus_mem_size - 1;
+ b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH |
+ IORESOURCE_STARTALIGN;
+ if (realloc_head) {
+ b_res[2].end -= pci_cardbus_mem_size;
+ add_to_list(realloc_head, bridge, b_res+2,
+ pci_cardbus_mem_size, pci_cardbus_mem_size);
+ }
- b_res[3].start = 0;
- b_res[3].end = pci_cardbus_mem_size - 1;
- b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
- } else {
- b_res[3].start = 0;
- b_res[3].end = pci_cardbus_mem_size * 2 - 1;
- b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
+ /* reduce that to half */
+ b_res_3_size = pci_cardbus_mem_size;
}
+
+handle_b_res_3:
+ if (b_res[3].parent)
+ goto handle_done;
+ b_res[3].start = pci_cardbus_mem_size;
+ b_res[3].end = b_res[3].start + b_res_3_size - 1;
+ b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_STARTALIGN;
+ if (realloc_head) {
+ b_res[3].end -= b_res_3_size;
+ add_to_list(realloc_head, bridge, b_res+3, b_res_3_size,
+ pci_cardbus_mem_size);
+ }
+
+handle_done:
+ ;
}
-void __ref pci_bus_size_bridges(struct pci_bus *bus)
+void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
{
struct pci_dev *dev;
- unsigned long mask, prefmask;
- resource_size_t min_mem_size = 0, min_io_size = 0;
+ unsigned long mask, prefmask, type2 = 0, type3 = 0;
+ resource_size_t additional_mem_size = 0, additional_io_size = 0;
+ struct resource *b_res;
+ int ret;
list_for_each_entry(dev, &bus->devices, bus_list) {
struct pci_bus *b = dev->subordinate;
@@ -615,18 +1133,18 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
switch (dev->class >> 8) {
case PCI_CLASS_BRIDGE_CARDBUS:
- pci_bus_size_cardbus(b);
+ pci_bus_size_cardbus(b, realloc_head);
break;
case PCI_CLASS_BRIDGE_PCI:
default:
- pci_bus_size_bridges(b);
+ __pci_bus_size_bridges(b, realloc_head);
break;
}
}
/* The root bus? */
- if (!bus->self)
+ if (pci_is_root_bus(bus))
return;
switch (bus->self->class >> 8) {
@@ -637,42 +1155,108 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
case PCI_CLASS_BRIDGE_PCI:
pci_bridge_check_ranges(bus);
if (bus->self->is_hotplug_bridge) {
- min_io_size = pci_hotplug_io_size;
- min_mem_size = pci_hotplug_mem_size;
+ additional_io_size = pci_hotplug_io_size;
+ additional_mem_size = pci_hotplug_mem_size;
}
+ /* Fall through */
default:
- pbus_size_io(bus, min_io_size);
- /* If the bridge supports prefetchable range, size it
- separately. If it doesn't, or its prefetchable window
- has already been allocated by arch code, try
- non-prefetchable range for both types of PCI memory
- resources. */
+ pbus_size_io(bus, realloc_head ? 0 : additional_io_size,
+ additional_io_size, realloc_head);
+
+ /*
+ * If there's a 64-bit prefetchable MMIO window, compute
+ * the size required to put all 64-bit prefetchable
+ * resources in it.
+ */
+ b_res = &bus->self->resource[PCI_BRIDGE_RESOURCES];
mask = IORESOURCE_MEM;
prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
- if (pbus_size_mem(bus, prefmask, prefmask, min_mem_size))
- mask = prefmask; /* Success, size non-prefetch only. */
- else
- min_mem_size += min_mem_size;
- pbus_size_mem(bus, mask, IORESOURCE_MEM, min_mem_size);
+ if (b_res[2].flags & IORESOURCE_MEM_64) {
+ prefmask |= IORESOURCE_MEM_64;
+ ret = pbus_size_mem(bus, prefmask, prefmask,
+ prefmask, prefmask,
+ realloc_head ? 0 : additional_mem_size,
+ additional_mem_size, realloc_head);
+
+ /*
+ * If successful, all non-prefetchable resources
+ * and any 32-bit prefetchable resources will go in
+ * the non-prefetchable window.
+ */
+ if (ret == 0) {
+ mask = prefmask;
+ type2 = prefmask & ~IORESOURCE_MEM_64;
+ type3 = prefmask & ~IORESOURCE_PREFETCH;
+ }
+ }
+
+ /*
+ * If there is no 64-bit prefetchable window, compute the
+ * size required to put all prefetchable resources in the
+ * 32-bit prefetchable window (if there is one).
+ */
+ if (!type2) {
+ prefmask &= ~IORESOURCE_MEM_64;
+ ret = pbus_size_mem(bus, prefmask, prefmask,
+ prefmask, prefmask,
+ realloc_head ? 0 : additional_mem_size,
+ additional_mem_size, realloc_head);
+
+ /*
+ * If successful, only non-prefetchable resources
+ * will go in the non-prefetchable window.
+ */
+ if (ret == 0)
+ mask = prefmask;
+ else
+ additional_mem_size += additional_mem_size;
+
+ type2 = type3 = IORESOURCE_MEM;
+ }
+
+ /*
+ * Compute the size required to put everything else in the
+ * non-prefetchable window. This includes:
+ *
+ * - all non-prefetchable resources
+ * - 32-bit prefetchable resources if there's a 64-bit
+ * prefetchable window or no prefetchable window at all
+ * - 64-bit prefetchable resources if there's no
+ * prefetchable window at all
+ *
+ * Note that the strategy in __pci_assign_resource() must
+ * match that used here. Specifically, we cannot put a
+ * 32-bit prefetchable resource in a 64-bit prefetchable
+ * window.
+ */
+ pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3,
+ realloc_head ? 0 : additional_mem_size,
+ additional_mem_size, realloc_head);
break;
}
}
+
+void pci_bus_size_bridges(struct pci_bus *bus)
+{
+ __pci_bus_size_bridges(bus, NULL);
+}
EXPORT_SYMBOL(pci_bus_size_bridges);
-static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
- struct resource_list_x *fail_head)
+void __pci_bus_assign_resources(const struct pci_bus *bus,
+ struct list_head *realloc_head,
+ struct list_head *fail_head)
{
struct pci_bus *b;
struct pci_dev *dev;
- pbus_assign_resources_sorted(bus, fail_head);
+ pbus_assign_resources_sorted(bus, realloc_head, fail_head);
list_for_each_entry(dev, &bus->devices, bus_list) {
b = dev->subordinate;
if (!b)
continue;
- __pci_bus_assign_resources(b, fail_head);
+ __pci_bus_assign_resources(b, realloc_head, fail_head);
switch (dev->class >> 8) {
case PCI_CLASS_BRIDGE_PCI:
@@ -685,31 +1269,33 @@ static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
break;
default:
- dev_info(&dev->dev, "not setting up bridge for bus "
- "%04x:%02x\n", pci_domain_nr(b), b->number);
+ dev_info(&dev->dev, "not setting up bridge for bus %04x:%02x\n",
+ pci_domain_nr(b), b->number);
break;
}
}
}
-void __ref pci_bus_assign_resources(const struct pci_bus *bus)
+void pci_bus_assign_resources(const struct pci_bus *bus)
{
- __pci_bus_assign_resources(bus, NULL);
+ __pci_bus_assign_resources(bus, NULL, NULL);
}
EXPORT_SYMBOL(pci_bus_assign_resources);
-static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge,
- struct resource_list_x *fail_head)
+static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
+ struct list_head *add_head,
+ struct list_head *fail_head)
{
struct pci_bus *b;
- pdev_assign_resources_sorted((struct pci_dev *)bridge, fail_head);
+ pdev_assign_resources_sorted((struct pci_dev *)bridge,
+ add_head, fail_head);
b = bridge->subordinate;
if (!b)
return;
- __pci_bus_assign_resources(b, fail_head);
+ __pci_bus_assign_resources(b, add_head, fail_head);
switch (bridge->class >> 8) {
case PCI_CLASS_BRIDGE_PCI:
@@ -721,50 +1307,74 @@ static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge,
break;
default:
- dev_info(&bridge->dev, "not setting up bridge for bus "
- "%04x:%02x\n", pci_domain_nr(b), b->number);
+ dev_info(&bridge->dev, "not setting up bridge for bus %04x:%02x\n",
+ pci_domain_nr(b), b->number);
break;
}
}
static void pci_bridge_release_resources(struct pci_bus *bus,
unsigned long type)
{
- int idx;
- bool changed = false;
- struct pci_dev *dev;
+ struct pci_dev *dev = bus->self;
struct resource *r;
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
- IORESOURCE_PREFETCH;
+ IORESOURCE_PREFETCH | IORESOURCE_MEM_64;
+ unsigned old_flags = 0;
+ struct resource *b_res;
+ int idx = 1;
- dev = bus->self;
- for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END;
- idx++) {
- r = &dev->resource[idx];
- if ((r->flags & type_mask) != type)
- continue;
- if (!r->parent)
- continue;
- /*
- * if there are children under that, we should release them
- * all
- */
- release_child_resources(r);
- if (!release_resource(r)) {
- dev_printk(KERN_DEBUG, &dev->dev,
- "resource %d %pR released\n", idx, r);
- /* keep the old size */
- r->end = resource_size(r) - 1;
- r->start = 0;
- r->flags = 0;
- changed = true;
- }
- }
+ b_res = &dev->resource[PCI_BRIDGE_RESOURCES];
+
+ /*
+ * 1. if there is io port assign fail, will release bridge
+ * io port.
+ * 2. if there is non pref mmio assign fail, release bridge
+ * nonpref mmio.
+ * 3. if there is 64bit pref mmio assign fail, and bridge pref
+ * is 64bit, release bridge pref mmio.
+ * 4. if there is pref mmio assign fail, and bridge pref is
+ * 32bit mmio, release bridge pref mmio
+ * 5. if there is pref mmio assign fail, and bridge pref is not
+ * assigned, release bridge nonpref mmio.
+ */
+ if (type & IORESOURCE_IO)
+ idx = 0;
+ else if (!(type & IORESOURCE_PREFETCH))
+ idx = 1;
+ else if ((type & IORESOURCE_MEM_64) &&
+ (b_res[2].flags & IORESOURCE_MEM_64))
+ idx = 2;
+ else if (!(b_res[2].flags & IORESOURCE_MEM_64) &&
+ (b_res[2].flags & IORESOURCE_PREFETCH))
+ idx = 2;
+ else
+ idx = 1;
+
+ r = &b_res[idx];
+
+ if (!r->parent)
+ return;
+
+ /*
+ * if there are children under that, we should release them
+ * all
+ */
+ release_child_resources(r);
+ if (!release_resource(r)) {
+ type = old_flags = r->flags & type_mask;
+ dev_printk(KERN_DEBUG, &dev->dev, "resource %d %pR released\n",
+ PCI_BRIDGE_RESOURCES + idx, r);
+ /* keep the old size */
+ r->end = resource_size(r) - 1;
+ r->start = 0;
+ r->flags = 0;
- if (changed) {
/* avoiding touch the one without PREF */
if (type & IORESOURCE_PREFETCH)
type = IORESOURCE_PREFETCH;
__pci_setup_bridge(bus, type);
+ /* for next child res under same bridge */
+ r->flags = old_flags;
}
}
@@ -776,9 +1386,9 @@ enum release_type {
* try to release pci bridge resources that is from leaf bridge,
* so we can allocate big new one later
*/
-static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus,
- unsigned long type,
- enum release_type rel_type)
+static void pci_bus_release_bridge_resources(struct pci_bus *bus,
+ unsigned long type,
+ enum release_type rel_type)
{
struct pci_dev *dev;
bool is_leaf_bridge = true;
@@ -815,10 +1425,10 @@ static void pci_bus_dump_res(struct pci_bus *bus)
pci_bus_for_each_resource(bus, res, i) {
if (!res || !res->end || !res->flags)
- continue;
+ continue;
dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
- }
+ }
}
static void pci_bus_dump_resources(struct pci_bus *bus)
@@ -838,51 +1448,224 @@ static void pci_bus_dump_resources(struct pci_bus *bus)
}
}
-void __init
-pci_assign_unassigned_resources(void)
+static int pci_bus_get_depth(struct pci_bus *bus)
+{
+ int depth = 0;
+ struct pci_bus *child_bus;
+
+ list_for_each_entry(child_bus, &bus->children, node) {
+ int ret;
+
+ ret = pci_bus_get_depth(child_bus);
+ if (ret + 1 > depth)
+ depth = ret + 1;
+ }
+
+ return depth;
+}
+
+/*
+ * -1: undefined, will auto detect later
+ * 0: disabled by user
+ * 1: disabled by auto detect
+ * 2: enabled by user
+ * 3: enabled by auto detect
+ */
+enum enable_type {
+ undefined = -1,
+ user_disabled,
+ auto_disabled,
+ user_enabled,
+ auto_enabled,
+};
+
+static enum enable_type pci_realloc_enable = undefined;
+void __init pci_realloc_get_opt(char *str)
+{
+ if (!strncmp(str, "off", 3))
+ pci_realloc_enable = user_disabled;
+ else if (!strncmp(str, "on", 2))
+ pci_realloc_enable = user_enabled;
+}
+static bool pci_realloc_enabled(enum enable_type enable)
+{
+ return enable >= user_enabled;
+}
+
+#if defined(CONFIG_PCI_IOV) && defined(CONFIG_PCI_REALLOC_ENABLE_AUTO)
+static int iov_resources_unassigned(struct pci_dev *dev, void *data)
+{
+ int i;
+ bool *unassigned = data;
+
+ for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++) {
+ struct resource *r = &dev->resource[i];
+ struct pci_bus_region region;
+
+ /* Not assigned or rejected by kernel? */
+ if (!r->flags)
+ continue;
+
+ pcibios_resource_to_bus(dev->bus, &region, r);
+ if (!region.start) {
+ *unassigned = true;
+ return 1; /* return early from pci_walk_bus() */
+ }
+ }
+
+ return 0;
+}
+
+static enum enable_type pci_realloc_detect(struct pci_bus *bus,
+ enum enable_type enable_local)
+{
+ bool unassigned = false;
+
+ if (enable_local != undefined)
+ return enable_local;
+
+ pci_walk_bus(bus, iov_resources_unassigned, &unassigned);
+ if (unassigned)
+ return auto_enabled;
+
+ return enable_local;
+}
+#else
+static enum enable_type pci_realloc_detect(struct pci_bus *bus,
+ enum enable_type enable_local)
+{
+ return enable_local;
+}
+#endif
+
+/*
+ * first try will not touch pci bridge res
+ * second and later try will clear small leaf bridge res
+ * will stop till to the max depth if can not find good one
+ */
+void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
{
- struct pci_bus *bus;
+ LIST_HEAD(realloc_head); /* list of resources that
+ want additional resources */
+ struct list_head *add_list = NULL;
+ int tried_times = 0;
+ enum release_type rel_type = leaf_only;
+ LIST_HEAD(fail_head);
+ struct pci_dev_resource *fail_res;
+ unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
+ IORESOURCE_PREFETCH | IORESOURCE_MEM_64;
+ int pci_try_num = 1;
+ enum enable_type enable_local;
+
+ /* don't realloc if asked to do so */
+ enable_local = pci_realloc_detect(bus, pci_realloc_enable);
+ if (pci_realloc_enabled(enable_local)) {
+ int max_depth = pci_bus_get_depth(bus);
+
+ pci_try_num = max_depth + 1;
+ dev_printk(KERN_DEBUG, &bus->dev,
+ "max bus depth: %d pci_try_num: %d\n",
+ max_depth, pci_try_num);
+ }
+again:
+ /*
+ * last try will use add_list, otherwise will try good to have as
+ * must have, so can realloc parent bridge resource
+ */
+ if (tried_times + 1 == pci_try_num)
+ add_list = &realloc_head;
/* Depth first, calculate sizes and alignments of all
subordinate buses. */
- list_for_each_entry(bus, &pci_root_buses, node) {
- pci_bus_size_bridges(bus);
- }
+ __pci_bus_size_bridges(bus, add_list);
+
/* Depth last, allocate resources and update the hardware. */
- list_for_each_entry(bus, &pci_root_buses, node) {
- pci_bus_assign_resources(bus);
- pci_enable_bridges(bus);
+ __pci_bus_assign_resources(bus, add_list, &fail_head);
+ if (add_list)
+ BUG_ON(!list_empty(add_list));
+ tried_times++;
+
+ /* any device complain? */
+ if (list_empty(&fail_head))
+ goto dump;
+
+ if (tried_times >= pci_try_num) {
+ if (enable_local == undefined)
+ dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n");
+ else if (enable_local == auto_enabled)
+ dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
+
+ free_list(&fail_head);
+ goto dump;
}
- /* dump the resource on buses */
- list_for_each_entry(bus, &pci_root_buses, node) {
- pci_bus_dump_resources(bus);
+ dev_printk(KERN_DEBUG, &bus->dev,
+ "No. %d try to assign unassigned res\n", tried_times + 1);
+
+ /* third times and later will not check if it is leaf */
+ if ((tried_times + 1) > 2)
+ rel_type = whole_subtree;
+
+ /*
+ * Try to release leaf bridge's resources that doesn't fit resource of
+ * child device under that bridge
+ */
+ list_for_each_entry(fail_res, &fail_head, list)
+ pci_bus_release_bridge_resources(fail_res->dev->bus,
+ fail_res->flags & type_mask,
+ rel_type);
+
+ /* restore size and flags */
+ list_for_each_entry(fail_res, &fail_head, list) {
+ struct resource *res = fail_res->res;
+
+ res->start = fail_res->start;
+ res->end = fail_res->end;
+ res->flags = fail_res->flags;
+ if (fail_res->dev->subordinate)
+ res->flags = 0;
}
+ free_list(&fail_head);
+
+ goto again;
+
+dump:
+ /* dump the resource on buses */
+ pci_bus_dump_resources(bus);
+}
+
+void __init pci_assign_unassigned_resources(void)
+{
+ struct pci_bus *root_bus;
+
+ list_for_each_entry(root_bus, &pci_root_buses, node)
+ pci_assign_unassigned_root_bus_resources(root_bus);
}
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
{
struct pci_bus *parent = bridge->subordinate;
+ LIST_HEAD(add_list); /* list of resources that
+ want additional resources */
int tried_times = 0;
- struct resource_list_x head, *list;
+ LIST_HEAD(fail_head);
+ struct pci_dev_resource *fail_res;
int retval;
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
- head.next = NULL;
-
again:
- pci_bus_size_bridges(parent);
- __pci_bridge_assign_resources(bridge, &head);
-
+ __pci_bus_size_bridges(parent, &add_list);
+ __pci_bridge_assign_resources(bridge, &add_list, &fail_head);
+ BUG_ON(!list_empty(&add_list));
tried_times++;
- if (!head.next)
+ if (list_empty(&fail_head))
goto enable_all;
if (tried_times >= 2) {
/* still fail, don't need to try more */
- free_failed_list(&head);
+ free_list(&fail_head);
goto enable_all;
}
@@ -893,33 +1676,45 @@ again:
* Try to release leaf bridge's resources that doesn't fit resource of
* child device under that bridge
*/
- for (list = head.next; list;) {
- struct pci_bus *bus = list->dev->bus;
- unsigned long flags = list->flags;
-
- pci_bus_release_bridge_resources(bus, flags & type_mask,
+ list_for_each_entry(fail_res, &fail_head, list)
+ pci_bus_release_bridge_resources(fail_res->dev->bus,
+ fail_res->flags & type_mask,
whole_subtree);
- list = list->next;
- }
+
/* restore size and flags */
- for (list = head.next; list;) {
- struct resource *res = list->res;
+ list_for_each_entry(fail_res, &fail_head, list) {
+ struct resource *res = fail_res->res;
- res->start = list->start;
- res->end = list->end;
- res->flags = list->flags;
- if (list->dev->subordinate)
+ res->start = fail_res->start;
+ res->end = fail_res->end;
+ res->flags = fail_res->flags;
+ if (fail_res->dev->subordinate)
res->flags = 0;
-
- list = list->next;
}
- free_failed_list(&head);
+ free_list(&fail_head);
goto again;
enable_all:
retval = pci_reenable_device(bridge);
+ if (retval)
+ dev_err(&bridge->dev, "Error reenabling bridge (%d)\n", retval);
pci_set_master(bridge);
- pci_enable_bridges(parent);
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
+
+void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+ LIST_HEAD(add_list); /* list of resources that
+ want additional resources */
+
+ down_read(&pci_bus_sem);
+ list_for_each_entry(dev, &bus->devices, bus_list)
+ if (pci_is_bridge(dev) && pci_has_subordinate(dev))
+ __pci_bus_size_bridges(dev->subordinate,
+ &add_list);
+ up_read(&pci_bus_sem);
+ __pci_bus_assign_resources(bus, &add_list, NULL);
+ BUG_ON(!list_empty(&add_list));
+}
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
index eec9738f349..4e2d595d50c 100644
--- a/drivers/pci/setup-irq.c
+++ b/drivers/pci/setup-irq.c
@@ -10,18 +10,21 @@
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/cache.h>
+void __weak pcibios_update_irq(struct pci_dev *dev, int irq)
+{
+ dev_dbg(&dev->dev, "assigning IRQ %02d\n", irq);
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
-static void __init
-pdev_fixup_irq(struct pci_dev *dev,
- u8 (*swizzle)(struct pci_dev *, u8 *),
- int (*map_irq)(struct pci_dev *, u8, u8))
+static void pdev_fixup_irq(struct pci_dev *dev,
+ u8 (*swizzle)(struct pci_dev *, u8 *),
+ int (*map_irq)(const struct pci_dev *, u8, u8))
{
u8 pin, slot;
int irq = 0;
@@ -54,11 +57,11 @@ pdev_fixup_irq(struct pci_dev *dev,
pcibios_update_irq(dev, irq);
}
-void __init
-pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *),
- int (*map_irq)(struct pci_dev *, u8, u8))
+void pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *),
+ int (*map_irq)(const struct pci_dev *, u8, u8))
{
struct pci_dev *dev = NULL;
+
for_each_pci_dev(dev)
pdev_fixup_irq(dev, swizzle, map_irq);
}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index bc0e6eea0ff..caed1ce6fac 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -16,8 +16,8 @@
* Resource sorting
*/
-#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -29,6 +29,8 @@
void pci_update_resource(struct pci_dev *dev, int resno)
{
struct pci_bus_region region;
+ bool disable;
+ u16 cmd;
u32 new, check, mask;
int reg;
enum pci_bar_type type;
@@ -41,6 +43,9 @@ void pci_update_resource(struct pci_dev *dev, int resno)
if (!res->flags)
return;
+ if (res->flags & IORESOURCE_UNSET)
+ return;
+
/*
* Ignore non-moveable resources. This might be legacy resources for
* which no functional BAR register exists or another important
@@ -49,7 +54,7 @@ void pci_update_resource(struct pci_dev *dev, int resno)
if (res->flags & IORESOURCE_PCI_FIXED)
return;
- pcibios_resource_to_bus(dev, &region, res);
+ pcibios_resource_to_bus(dev->bus, &region, res);
new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
if (res->flags & IORESOURCE_IO)
@@ -66,6 +71,18 @@ void pci_update_resource(struct pci_dev *dev, int resno)
new |= PCI_ROM_ADDRESS_ENABLE;
}
+ /*
+ * We can't update a 64-bit BAR atomically, so when possible,
+ * disable decoding so that a half-updated BAR won't conflict
+ * with another device.
+ */
+ disable = (res->flags & IORESOURCE_MEM_64) && !dev->mmio_always_on;
+ if (disable) {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ pci_write_config_word(dev, PCI_COMMAND,
+ cmd & ~PCI_COMMAND_MEMORY);
+ }
+
pci_write_config_dword(dev, reg, new);
pci_read_config_dword(dev, reg, &check);
@@ -74,20 +91,18 @@ void pci_update_resource(struct pci_dev *dev, int resno)
resno, new, check);
}
- if ((new & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
- (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) {
+ if (res->flags & IORESOURCE_MEM_64) {
new = region.start >> 16 >> 16;
pci_write_config_dword(dev, reg + 4, new);
pci_read_config_dword(dev, reg + 4, &check);
if (check != new) {
- dev_err(&dev->dev, "BAR %d: error updating "
- "(high %#08x != %#08x)\n", resno, new, check);
+ dev_err(&dev->dev, "BAR %d: error updating (high %#08x != %#08x)\n",
+ resno, new, check);
}
}
- res->flags &= ~IORESOURCE_UNSET;
- dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx])\n",
- resno, res, (unsigned long long)region.start,
- (unsigned long long)region.end);
+
+ if (disable)
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
}
int pci_claim_resource(struct pci_dev *dev, int resource)
@@ -95,18 +110,23 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
struct resource *res = &dev->resource[resource];
struct resource *root, *conflict;
+ if (res->flags & IORESOURCE_UNSET) {
+ dev_info(&dev->dev, "can't claim BAR %d %pR: no address assigned\n",
+ resource, res);
+ return -EINVAL;
+ }
+
root = pci_find_parent_resource(dev, res);
if (!root) {
- dev_info(&dev->dev, "no compatible bridge window for %pR\n",
- res);
+ dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n",
+ resource, res);
return -EINVAL;
}
conflict = request_resource_conflict(root, res);
if (conflict) {
- dev_info(&dev->dev,
- "address space collision: %pR conflicts with %s %pR\n",
- res, conflict->name, conflict);
+ dev_info(&dev->dev, "can't claim BAR %d %pR: address conflict with %s %pR\n",
+ resource, res, conflict->name, conflict);
return -EBUSY;
}
@@ -114,7 +134,6 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
}
EXPORT_SYMBOL(pci_claim_resource);
-#ifdef CONFIG_PCI_QUIRKS
void pci_disable_bridge_window(struct pci_dev *dev)
{
dev_info(&dev->dev, "disabling bridge mem windows\n");
@@ -127,101 +146,120 @@ void pci_disable_bridge_window(struct pci_dev *dev)
pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0x0000fff0);
pci_write_config_dword(dev, PCI_PREF_BASE_UPPER32, 0xffffffff);
}
-#endif /* CONFIG_PCI_QUIRKS */
-static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
- int resno)
+/*
+ * Generic function that returns a value indicating that the device's
+ * original BIOS BAR address was not saved and so is not available for
+ * reinstatement.
+ *
+ * Can be over-ridden by architecture specific code that implements
+ * reinstatement functionality rather than leaving it disabled when
+ * normal allocation attempts fail.
+ */
+resource_size_t __weak pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx)
{
- struct resource *res = dev->resource + resno;
- resource_size_t size, min, align;
- int ret;
-
- size = resource_size(res);
- min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
- align = pci_resource_alignment(dev, res);
-
- /* First, try exact prefetching match.. */
- ret = pci_bus_alloc_resource(bus, res, size, align, min,
- IORESOURCE_PREFETCH,
- pcibios_align_resource, dev);
+ return 0;
+}
- if (ret < 0 && (res->flags & IORESOURCE_PREFETCH)) {
- /*
- * That failed.
- *
- * But a prefetching area can handle a non-prefetching
- * window (it will just not perform as well).
- */
- ret = pci_bus_alloc_resource(bus, res, size, align, min, 0,
- pcibios_align_resource, dev);
- }
+static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
+ int resno, resource_size_t size)
+{
+ struct resource *root, *conflict;
+ resource_size_t fw_addr, start, end;
+ int ret = 0;
- if (ret < 0 && dev->fw_addr[resno]) {
- struct resource *root, *conflict;
- resource_size_t start, end;
+ fw_addr = pcibios_retrieve_fw_addr(dev, resno);
+ if (!fw_addr)
+ return 1;
- /*
- * If we failed to assign anything, let's try the address
- * where firmware left it. That at least has a chance of
- * working, which is better than just leaving it disabled.
- */
+ start = res->start;
+ end = res->end;
+ res->start = fw_addr;
+ res->end = res->start + size - 1;
+ root = pci_find_parent_resource(dev, res);
+ if (!root) {
if (res->flags & IORESOURCE_IO)
root = &ioport_resource;
else
root = &iomem_resource;
+ }
- start = res->start;
- end = res->end;
- res->start = dev->fw_addr[resno];
- res->end = res->start + size - 1;
- dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n",
- resno, res);
- conflict = request_resource_conflict(root, res);
- if (conflict) {
- dev_info(&dev->dev,
- "BAR %d: %pR conflicts with %s %pR\n", resno,
- res, conflict->name, conflict);
- res->start = start;
- res->end = end;
- } else
- ret = 0;
+ dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n",
+ resno, res);
+ conflict = request_resource_conflict(root, res);
+ if (conflict) {
+ dev_info(&dev->dev,
+ "BAR %d: %pR conflicts with %s %pR\n", resno,
+ res, conflict->name, conflict);
+ res->start = start;
+ res->end = end;
+ ret = 1;
}
+ return ret;
+}
- if (!ret) {
- res->flags &= ~IORESOURCE_STARTALIGN;
- dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
- if (resno < PCI_BRIDGE_RESOURCES)
- pci_update_resource(dev, resno);
+static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
+ int resno, resource_size_t size, resource_size_t align)
+{
+ struct resource *res = dev->resource + resno;
+ resource_size_t min;
+ int ret;
+
+ min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
+
+ /*
+ * First, try exact prefetching match. Even if a 64-bit
+ * prefetchable bridge window is below 4GB, we can't put a 32-bit
+ * prefetchable resource in it because pbus_size_mem() assumes a
+ * 64-bit window will contain no 32-bit resources. If we assign
+ * things differently than they were sized, not everything will fit.
+ */
+ ret = pci_bus_alloc_resource(bus, res, size, align, min,
+ IORESOURCE_PREFETCH | IORESOURCE_MEM_64,
+ pcibios_align_resource, dev);
+ if (ret == 0)
+ return 0;
+
+ /*
+ * If the prefetchable window is only 32 bits wide, we can put
+ * 64-bit prefetchable resources in it.
+ */
+ if ((res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) ==
+ (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) {
+ ret = pci_bus_alloc_resource(bus, res, size, align, min,
+ IORESOURCE_PREFETCH,
+ pcibios_align_resource, dev);
+ if (ret == 0)
+ return 0;
}
+ /*
+ * If we didn't find a better match, we can put any memory resource
+ * in a non-prefetchable window. If this resource is 32 bits and
+ * non-prefetchable, the first call already tried the only possibility
+ * so we don't need to try again.
+ */
+ if (res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64))
+ ret = pci_bus_alloc_resource(bus, res, size, align, min, 0,
+ pcibios_align_resource, dev);
+
return ret;
}
-int pci_assign_resource(struct pci_dev *dev, int resno)
+static int _pci_assign_resource(struct pci_dev *dev, int resno,
+ resource_size_t size, resource_size_t min_align)
{
struct resource *res = dev->resource + resno;
- resource_size_t align;
struct pci_bus *bus;
int ret;
char *type;
- align = pci_resource_alignment(dev, res);
- if (!align) {
- dev_info(&dev->dev, "BAR %d: can't assign %pR "
- "(bogus alignment)\n", resno, res);
- return -EINVAL;
- }
-
bus = dev->bus;
- while ((ret = __pci_assign_resource(bus, dev, resno))) {
- if (bus->parent && bus->self->transparent)
- bus = bus->parent;
- else
- bus = NULL;
- if (bus)
- continue;
- break;
+ while ((ret = __pci_assign_resource(bus, dev, resno, size, min_align))) {
+ if (!bus->parent || !bus->self->transparent)
+ break;
+ bus = bus->parent;
}
if (ret) {
@@ -242,50 +280,67 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
return ret;
}
-/* Sort resources by alignment */
-void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
+int pci_assign_resource(struct pci_dev *dev, int resno)
{
- int i;
+ struct resource *res = dev->resource + resno;
+ resource_size_t align, size;
+ int ret;
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- struct resource *r;
- struct resource_list *list, *tmp;
- resource_size_t r_align;
+ res->flags |= IORESOURCE_UNSET;
+ align = pci_resource_alignment(dev, res);
+ if (!align) {
+ dev_info(&dev->dev, "BAR %d: can't assign %pR (bogus alignment)\n",
+ resno, res);
+ return -EINVAL;
+ }
- r = &dev->resource[i];
+ size = resource_size(res);
+ ret = _pci_assign_resource(dev, resno, size, align);
- if (r->flags & IORESOURCE_PCI_FIXED)
- continue;
+ /*
+ * If we failed to assign anything, let's try the address
+ * where firmware left it. That at least has a chance of
+ * working, which is better than just leaving it disabled.
+ */
+ if (ret < 0)
+ ret = pci_revert_fw_address(res, dev, resno, size);
- if (!(r->flags) || r->parent)
- continue;
+ if (!ret) {
+ res->flags &= ~IORESOURCE_UNSET;
+ res->flags &= ~IORESOURCE_STARTALIGN;
+ dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
+ if (resno < PCI_BRIDGE_RESOURCES)
+ pci_update_resource(dev, resno);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(pci_assign_resource);
- r_align = pci_resource_alignment(dev, r);
- if (!r_align) {
- dev_warn(&dev->dev, "BAR %d: %pR has bogus alignment\n",
- i, r);
- continue;
- }
- for (list = head; ; list = list->next) {
- resource_size_t align = 0;
- struct resource_list *ln = list->next;
-
- if (ln)
- align = pci_resource_alignment(ln->dev, ln->res);
-
- if (r_align > align) {
- tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- panic("pdev_sort_resources(): "
- "kmalloc() failed!\n");
- tmp->next = ln;
- tmp->res = r;
- tmp->dev = dev;
- list->next = tmp;
- break;
- }
- }
+int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize,
+ resource_size_t min_align)
+{
+ struct resource *res = dev->resource + resno;
+ resource_size_t new_size;
+ int ret;
+
+ res->flags |= IORESOURCE_UNSET;
+ if (!res->parent) {
+ dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resource %pR\n",
+ resno, res);
+ return -EINVAL;
+ }
+
+ /* already aligned with min_align */
+ new_size = resource_size(res) + addsize;
+ ret = _pci_assign_resource(dev, resno, new_size, min_align);
+ if (!ret) {
+ res->flags &= ~IORESOURCE_UNSET;
+ res->flags &= ~IORESOURCE_STARTALIGN;
+ dev_info(&dev->dev, "BAR %d: reassigned %pR\n", resno, res);
+ if (resno < PCI_BRIDGE_RESOURCES)
+ pci_update_resource(dev, resno);
}
+ return ret;
}
int pci_enable_resources(struct pci_dev *dev, int mask)
@@ -309,9 +364,15 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
(!(r->flags & IORESOURCE_ROM_ENABLE)))
continue;
+ if (r->flags & IORESOURCE_UNSET) {
+ dev_err(&dev->dev, "can't enable device: BAR %d %pR not assigned\n",
+ i, r);
+ return -EINVAL;
+ }
+
if (!r->parent) {
- dev_err(&dev->dev, "device not available "
- "(can't reserve %pR)\n", r);
+ dev_err(&dev->dev, "can't enable device: BAR %d %pR not claimed\n",
+ i, r);
return -EINVAL;
}
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 968cfea04f7..396c200b9dd 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -7,6 +7,7 @@
#include <linux/kobject.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/err.h>
#include "pci.h"
@@ -52,7 +53,7 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf)
static const char *pci_bus_speed_strings[] = {
"33 MHz PCI", /* 0x00 */
"66 MHz PCI", /* 0x01 */
- "66 MHz PCI-X", /* 0x02 */
+ "66 MHz PCI-X", /* 0x02 */
"100 MHz PCI-X", /* 0x03 */
"133 MHz PCI-X", /* 0x04 */
NULL, /* 0x05 */
@@ -115,11 +116,11 @@ static void pci_slot_release(struct kobject *kobj)
}
static struct pci_slot_attribute pci_slot_attr_address =
- __ATTR(address, (S_IFREG | S_IRUGO), address_read_file, NULL);
+ __ATTR(address, S_IRUGO, address_read_file, NULL);
static struct pci_slot_attribute pci_slot_attr_max_speed =
- __ATTR(max_bus_speed, (S_IFREG | S_IRUGO), max_speed_read_file, NULL);
+ __ATTR(max_bus_speed, S_IRUGO, max_speed_read_file, NULL);
static struct pci_slot_attribute pci_slot_attr_cur_speed =
- __ATTR(cur_bus_speed, (S_IFREG | S_IRUGO), cur_speed_read_file, NULL);
+ __ATTR(cur_bus_speed, S_IRUGO, cur_speed_read_file, NULL);
static struct attribute *pci_slot_default_attrs[] = {
&pci_slot_attr_address.attr,
@@ -319,32 +320,6 @@ err:
EXPORT_SYMBOL_GPL(pci_create_slot);
/**
- * pci_renumber_slot - update %struct pci_slot -> number
- * @slot: &struct pci_slot to update
- * @slot_nr: new number for slot
- *
- * The primary purpose of this interface is to allow callers who earlier
- * created a placeholder slot in pci_create_slot() by passing a -1 as
- * slot_nr, to update their %struct pci_slot with the correct @slot_nr.
- */
-void pci_renumber_slot(struct pci_slot *slot, int slot_nr)
-{
- struct pci_slot *tmp;
-
- down_write(&pci_bus_sem);
-
- list_for_each_entry(tmp, &slot->bus->slots, list) {
- WARN_ON(tmp->number == slot_nr);
- goto out;
- }
-
- slot->number = slot_nr;
-out:
- up_write(&pci_bus_sem);
-}
-EXPORT_SYMBOL_GPL(pci_renumber_slot);
-
-/**
* pci_destroy_slot - decrement refcount for physical PCI slot
* @slot: struct pci_slot to decrement
*
@@ -376,14 +351,17 @@ void pci_hp_create_module_link(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
struct kobject *kobj = NULL;
- int no_warn;
+ int ret;
if (!slot || !slot->ops)
return;
kobj = kset_find_obj(module_kset, slot->ops->mod_name);
if (!kobj)
return;
- no_warn = sysfs_create_link(&pci_slot->kobj, kobj, "module");
+ ret = sysfs_create_link(&pci_slot->kobj, kobj, "module");
+ if (ret)
+ dev_err(&pci_slot->bus->dev, "Error creating sysfs link (%d)\n",
+ ret);
kobject_put(kobj);
}
EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index e1c1ec54089..b91c4da6836 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -44,7 +44,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
default:
err = -EINVAL;
goto error;
- };
+ }
err = -EIO;
if (cfg_ret != PCIBIOS_SUCCESSFUL)
@@ -99,7 +99,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
if (!dev)
return -ENODEV;
- switch(len) {
+ switch (len) {
case 1:
err = get_user(byte, (u8 __user *)buf);
if (err)
diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c
new file mode 100644
index 00000000000..7e1304d2e38
--- /dev/null
+++ b/drivers/pci/vc.c
@@ -0,0 +1,434 @@
+/*
+ * PCI Virtual Channel support
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/types.h>
+
+/**
+ * pci_vc_save_restore_dwords - Save or restore a series of dwords
+ * @dev: device
+ * @pos: starting config space position
+ * @buf: buffer to save to or restore from
+ * @dwords: number of dwords to save/restore
+ * @save: whether to save or restore
+ */
+static void pci_vc_save_restore_dwords(struct pci_dev *dev, int pos,
+ u32 *buf, int dwords, bool save)
+{
+ int i;
+
+ for (i = 0; i < dwords; i++, buf++) {
+ if (save)
+ pci_read_config_dword(dev, pos + (i * 4), buf);
+ else
+ pci_write_config_dword(dev, pos + (i * 4), *buf);
+ }
+}
+
+/**
+ * pci_vc_load_arb_table - load and wait for VC arbitration table
+ * @dev: device
+ * @pos: starting position of VC capability (VC/VC9/MFVC)
+ *
+ * Set Load VC Arbitration Table bit requesting hardware to apply the VC
+ * Arbitration Table (previously loaded). When the VC Arbitration Table
+ * Status clears, hardware has latched the table into VC arbitration logic.
+ */
+static void pci_vc_load_arb_table(struct pci_dev *dev, int pos)
+{
+ u16 ctrl;
+
+ pci_read_config_word(dev, pos + PCI_VC_PORT_CTRL, &ctrl);
+ pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
+ ctrl | PCI_VC_PORT_CTRL_LOAD_TABLE);
+ if (pci_wait_for_pending(dev, pos + PCI_VC_PORT_STATUS,
+ PCI_VC_PORT_STATUS_TABLE))
+ return;
+
+ dev_err(&dev->dev, "VC arbitration table failed to load\n");
+}
+
+/**
+ * pci_vc_load_port_arb_table - Load and wait for VC port arbitration table
+ * @dev: device
+ * @pos: starting position of VC capability (VC/VC9/MFVC)
+ * @res: VC resource number, ie. VCn (0-7)
+ *
+ * Set Load Port Arbitration Table bit requesting hardware to apply the Port
+ * Arbitration Table (previously loaded). When the Port Arbitration Table
+ * Status clears, hardware has latched the table into port arbitration logic.
+ */
+static void pci_vc_load_port_arb_table(struct pci_dev *dev, int pos, int res)
+{
+ int ctrl_pos, status_pos;
+ u32 ctrl;
+
+ ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF);
+ status_pos = pos + PCI_VC_RES_STATUS + (res * PCI_CAP_VC_PER_VC_SIZEOF);
+
+ pci_read_config_dword(dev, ctrl_pos, &ctrl);
+ pci_write_config_dword(dev, ctrl_pos,
+ ctrl | PCI_VC_RES_CTRL_LOAD_TABLE);
+
+ if (pci_wait_for_pending(dev, status_pos, PCI_VC_RES_STATUS_TABLE))
+ return;
+
+ dev_err(&dev->dev, "VC%d port arbitration table failed to load\n", res);
+}
+
+/**
+ * pci_vc_enable - Enable virtual channel
+ * @dev: device
+ * @pos: starting position of VC capability (VC/VC9/MFVC)
+ * @res: VC res number, ie. VCn (0-7)
+ *
+ * A VC is enabled by setting the enable bit in matching resource control
+ * registers on both sides of a link. We therefore need to find the opposite
+ * end of the link. To keep this simple we enable from the downstream device.
+ * RC devices do not have an upstream device, nor does it seem that VC9 do
+ * (spec is unclear). Once we find the upstream device, match the VC ID to
+ * get the correct resource, disable and enable on both ends.
+ */
+static void pci_vc_enable(struct pci_dev *dev, int pos, int res)
+{
+ int ctrl_pos, status_pos, id, pos2, evcc, i, ctrl_pos2, status_pos2;
+ u32 ctrl, header, cap1, ctrl2;
+ struct pci_dev *link = NULL;
+
+ /* Enable VCs from the downstream device */
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+
+ ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF);
+ status_pos = pos + PCI_VC_RES_STATUS + (res * PCI_CAP_VC_PER_VC_SIZEOF);
+
+ pci_read_config_dword(dev, ctrl_pos, &ctrl);
+ id = ctrl & PCI_VC_RES_CTRL_ID;
+
+ pci_read_config_dword(dev, pos, &header);
+
+ /* If there is no opposite end of the link, skip to enable */
+ if (PCI_EXT_CAP_ID(header) == PCI_EXT_CAP_ID_VC9 ||
+ pci_is_root_bus(dev->bus))
+ goto enable;
+
+ pos2 = pci_find_ext_capability(dev->bus->self, PCI_EXT_CAP_ID_VC);
+ if (!pos2)
+ goto enable;
+
+ pci_read_config_dword(dev->bus->self, pos2 + PCI_VC_PORT_CAP1, &cap1);
+ evcc = cap1 & PCI_VC_CAP1_EVCC;
+
+ /* VC0 is hardwired enabled, so we can start with 1 */
+ for (i = 1; i < evcc + 1; i++) {
+ ctrl_pos2 = pos2 + PCI_VC_RES_CTRL +
+ (i * PCI_CAP_VC_PER_VC_SIZEOF);
+ status_pos2 = pos2 + PCI_VC_RES_STATUS +
+ (i * PCI_CAP_VC_PER_VC_SIZEOF);
+ pci_read_config_dword(dev->bus->self, ctrl_pos2, &ctrl2);
+ if ((ctrl2 & PCI_VC_RES_CTRL_ID) == id) {
+ link = dev->bus->self;
+ break;
+ }
+ }
+
+ if (!link)
+ goto enable;
+
+ /* Disable if enabled */
+ if (ctrl2 & PCI_VC_RES_CTRL_ENABLE) {
+ ctrl2 &= ~PCI_VC_RES_CTRL_ENABLE;
+ pci_write_config_dword(link, ctrl_pos2, ctrl2);
+ }
+
+ /* Enable on both ends */
+ ctrl2 |= PCI_VC_RES_CTRL_ENABLE;
+ pci_write_config_dword(link, ctrl_pos2, ctrl2);
+enable:
+ ctrl |= PCI_VC_RES_CTRL_ENABLE;
+ pci_write_config_dword(dev, ctrl_pos, ctrl);
+
+ if (!pci_wait_for_pending(dev, status_pos, PCI_VC_RES_STATUS_NEGO))
+ dev_err(&dev->dev, "VC%d negotiation stuck pending\n", id);
+
+ if (link && !pci_wait_for_pending(link, status_pos2,
+ PCI_VC_RES_STATUS_NEGO))
+ dev_err(&link->dev, "VC%d negotiation stuck pending\n", id);
+}
+
+/**
+ * pci_vc_do_save_buffer - Size, save, or restore VC state
+ * @dev: device
+ * @pos: starting position of VC capability (VC/VC9/MFVC)
+ * @save_state: buffer for save/restore
+ * @name: for error message
+ * @save: if provided a buffer, this indicates what to do with it
+ *
+ * Walking Virtual Channel config space to size, save, or restore it
+ * is complicated, so we do it all from one function to reduce code and
+ * guarantee ordering matches in the buffer. When called with NULL
+ * @save_state, return the size of the necessary save buffer. When called
+ * with a non-NULL @save_state, @save determines whether we save to the
+ * buffer or restore from it.
+ */
+static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos,
+ struct pci_cap_saved_state *save_state,
+ bool save)
+{
+ u32 cap1;
+ char evcc, lpevcc, parb_size;
+ int i, len = 0;
+ u8 *buf = save_state ? (u8 *)save_state->cap.data : NULL;
+
+ /* Sanity check buffer size for save/restore */
+ if (buf && save_state->cap.size !=
+ pci_vc_do_save_buffer(dev, pos, NULL, save)) {
+ dev_err(&dev->dev,
+ "VC save buffer size does not match @0x%x\n", pos);
+ return -ENOMEM;
+ }
+
+ pci_read_config_dword(dev, pos + PCI_VC_PORT_CAP1, &cap1);
+ /* Extended VC Count (not counting VC0) */
+ evcc = cap1 & PCI_VC_CAP1_EVCC;
+ /* Low Priority Extended VC Count (not counting VC0) */
+ lpevcc = (cap1 & PCI_VC_CAP1_LPEVCC) >> 4;
+ /* Port Arbitration Table Entry Size (bits) */
+ parb_size = 1 << ((cap1 & PCI_VC_CAP1_ARB_SIZE) >> 10);
+
+ /*
+ * Port VC Control Register contains VC Arbitration Select, which
+ * cannot be modified when more than one LPVC is in operation. We
+ * therefore save/restore it first, as only VC0 should be enabled
+ * after device reset.
+ */
+ if (buf) {
+ if (save)
+ pci_read_config_word(dev, pos + PCI_VC_PORT_CTRL,
+ (u16 *)buf);
+ else
+ pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
+ *(u16 *)buf);
+ buf += 2;
+ }
+ len += 2;
+
+ /*
+ * If we have any Low Priority VCs and a VC Arbitration Table Offset
+ * in Port VC Capability Register 2 then save/restore it next.
+ */
+ if (lpevcc) {
+ u32 cap2;
+ int vcarb_offset;
+
+ pci_read_config_dword(dev, pos + PCI_VC_PORT_CAP2, &cap2);
+ vcarb_offset = ((cap2 & PCI_VC_CAP2_ARB_OFF) >> 24) * 16;
+
+ if (vcarb_offset) {
+ int size, vcarb_phases = 0;
+
+ if (cap2 & PCI_VC_CAP2_128_PHASE)
+ vcarb_phases = 128;
+ else if (cap2 & PCI_VC_CAP2_64_PHASE)
+ vcarb_phases = 64;
+ else if (cap2 & PCI_VC_CAP2_32_PHASE)
+ vcarb_phases = 32;
+
+ /* Fixed 4 bits per phase per lpevcc (plus VC0) */
+ size = ((lpevcc + 1) * vcarb_phases * 4) / 8;
+
+ if (size && buf) {
+ pci_vc_save_restore_dwords(dev,
+ pos + vcarb_offset,
+ (u32 *)buf,
+ size / 4, save);
+ /*
+ * On restore, we need to signal hardware to
+ * re-load the VC Arbitration Table.
+ */
+ if (!save)
+ pci_vc_load_arb_table(dev, pos);
+
+ buf += size;
+ }
+ len += size;
+ }
+ }
+
+ /*
+ * In addition to each VC Resource Control Register, we may have a
+ * Port Arbitration Table attached to each VC. The Port Arbitration
+ * Table Offset in each VC Resource Capability Register tells us if
+ * it exists. The entry size is global from the Port VC Capability
+ * Register1 above. The number of phases is determined per VC.
+ */
+ for (i = 0; i < evcc + 1; i++) {
+ u32 cap;
+ int parb_offset;
+
+ pci_read_config_dword(dev, pos + PCI_VC_RES_CAP +
+ (i * PCI_CAP_VC_PER_VC_SIZEOF), &cap);
+ parb_offset = ((cap & PCI_VC_RES_CAP_ARB_OFF) >> 24) * 16;
+ if (parb_offset) {
+ int size, parb_phases = 0;
+
+ if (cap & PCI_VC_RES_CAP_256_PHASE)
+ parb_phases = 256;
+ else if (cap & (PCI_VC_RES_CAP_128_PHASE |
+ PCI_VC_RES_CAP_128_PHASE_TB))
+ parb_phases = 128;
+ else if (cap & PCI_VC_RES_CAP_64_PHASE)
+ parb_phases = 64;
+ else if (cap & PCI_VC_RES_CAP_32_PHASE)
+ parb_phases = 32;
+
+ size = (parb_size * parb_phases) / 8;
+
+ if (size && buf) {
+ pci_vc_save_restore_dwords(dev,
+ pos + parb_offset,
+ (u32 *)buf,
+ size / 4, save);
+ buf += size;
+ }
+ len += size;
+ }
+
+ /* VC Resource Control Register */
+ if (buf) {
+ int ctrl_pos = pos + PCI_VC_RES_CTRL +
+ (i * PCI_CAP_VC_PER_VC_SIZEOF);
+ if (save)
+ pci_read_config_dword(dev, ctrl_pos,
+ (u32 *)buf);
+ else {
+ u32 tmp, ctrl = *(u32 *)buf;
+ /*
+ * For an FLR case, the VC config may remain.
+ * Preserve enable bit, restore the rest.
+ */
+ pci_read_config_dword(dev, ctrl_pos, &tmp);
+ tmp &= PCI_VC_RES_CTRL_ENABLE;
+ tmp |= ctrl & ~PCI_VC_RES_CTRL_ENABLE;
+ pci_write_config_dword(dev, ctrl_pos, tmp);
+ /* Load port arbitration table if used */
+ if (ctrl & PCI_VC_RES_CTRL_ARB_SELECT)
+ pci_vc_load_port_arb_table(dev, pos, i);
+ /* Re-enable if needed */
+ if ((ctrl ^ tmp) & PCI_VC_RES_CTRL_ENABLE)
+ pci_vc_enable(dev, pos, i);
+ }
+ buf += 4;
+ }
+ len += 4;
+ }
+
+ return buf ? 0 : len;
+}
+
+static struct {
+ u16 id;
+ const char *name;
+} vc_caps[] = { { PCI_EXT_CAP_ID_MFVC, "MFVC" },
+ { PCI_EXT_CAP_ID_VC, "VC" },
+ { PCI_EXT_CAP_ID_VC9, "VC9" } };
+
+/**
+ * pci_save_vc_state - Save VC state to pre-allocate save buffer
+ * @dev: device
+ *
+ * For each type of VC capability, VC/VC9/MFVC, find the capability and
+ * save it to the pre-allocated save buffer.
+ */
+int pci_save_vc_state(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vc_caps); i++) {
+ int pos, ret;
+ struct pci_cap_saved_state *save_state;
+
+ pos = pci_find_ext_capability(dev, vc_caps[i].id);
+ if (!pos)
+ continue;
+
+ save_state = pci_find_saved_ext_cap(dev, vc_caps[i].id);
+ if (!save_state) {
+ dev_err(&dev->dev, "%s buffer not found in %s\n",
+ vc_caps[i].name, __func__);
+ return -ENOMEM;
+ }
+
+ ret = pci_vc_do_save_buffer(dev, pos, save_state, true);
+ if (ret) {
+ dev_err(&dev->dev, "%s save unsuccessful %s\n",
+ vc_caps[i].name, __func__);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * pci_restore_vc_state - Restore VC state from save buffer
+ * @dev: device
+ *
+ * For each type of VC capability, VC/VC9/MFVC, find the capability and
+ * restore it from the previously saved buffer.
+ */
+void pci_restore_vc_state(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vc_caps); i++) {
+ int pos;
+ struct pci_cap_saved_state *save_state;
+
+ pos = pci_find_ext_capability(dev, vc_caps[i].id);
+ save_state = pci_find_saved_ext_cap(dev, vc_caps[i].id);
+ if (!save_state || !pos)
+ continue;
+
+ pci_vc_do_save_buffer(dev, pos, save_state, false);
+ }
+}
+
+/**
+ * pci_allocate_vc_save_buffers - Allocate save buffers for VC caps
+ * @dev: device
+ *
+ * For each type of VC capability, VC/VC9/MFVC, find the capability, size
+ * it, and allocate a buffer for save/restore.
+ */
+
+void pci_allocate_vc_save_buffers(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vc_caps); i++) {
+ int len, pos = pci_find_ext_capability(dev, vc_caps[i].id);
+
+ if (!pos)
+ continue;
+
+ len = pci_vc_do_save_buffer(dev, pos, NULL, false);
+ if (pci_add_ext_cap_save_buffer(dev, vc_caps[i].id, len))
+ dev_err(&dev->dev,
+ "unable to preallocate %s save buffer\n",
+ vc_caps[i].name);
+ }
+}
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index a5a5ca17cfe..39b79070335 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -6,6 +6,7 @@
*/
#include <linux/pci.h>
+#include <linux/export.h>
int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt)
{
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 3a5a6fcc0ea..53df39a22c8 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -16,11 +16,13 @@
#include <xen/interface/io/pciif.h>
#include <asm/xen/pci.h>
#include <linux/interrupt.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/time.h>
+#include <xen/platform_pci.h>
+#include <asm/xen/swiotlb-xen.h>
#define INVALID_GRANT_REF (0)
#define INVALID_EVTCHN (-1)
@@ -189,7 +191,7 @@ static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
if (verbose_request)
dev_info(&pdev->xdev->dev,
- "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
+ "read dev=%04x:%02x:%02x.%d - offset %x size %d\n",
pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), where, size);
@@ -228,7 +230,7 @@ static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
if (verbose_request)
dev_info(&pdev->xdev->dev,
- "write dev=%04x:%02x:%02x.%01x - "
+ "write dev=%04x:%02x:%02x.%d - "
"offset %x size %d val %x\n",
pci_domain_nr(bus), bus->number,
PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
@@ -236,14 +238,14 @@ static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
return errno_to_pcibios_err(do_pci_op(pdev, &op));
}
-struct pci_ops pcifront_bus_ops = {
+static struct pci_ops pcifront_bus_ops = {
.read = pcifront_bus_read,
.write = pcifront_bus_write,
};
#ifdef CONFIG_PCI_MSI
static int pci_frontend_enable_msix(struct pci_dev *dev,
- int **vector, int nvec)
+ int vector[], int nvec)
{
int err;
int i;
@@ -277,18 +279,25 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
if (likely(!err)) {
if (likely(!op.value)) {
/* we get the result */
- for (i = 0; i < nvec; i++)
- *(*vector+i) = op.msix_entries[i].vector;
- return 0;
+ for (i = 0; i < nvec; i++) {
+ if (op.msix_entries[i].vector <= 0) {
+ dev_warn(&dev->dev, "MSI-X entry %d is invalid: %d!\n",
+ i, op.msix_entries[i].vector);
+ err = -EINVAL;
+ vector[i] = -1;
+ continue;
+ }
+ vector[i] = op.msix_entries[i].vector;
+ }
} else {
printk(KERN_DEBUG "enable msix get value %x\n",
op.value);
- return op.value;
+ err = op.value;
}
} else {
dev_err(&dev->dev, "enable msix get err %x\n", err);
- return err;
}
+ return err;
}
static void pci_frontend_disable_msix(struct pci_dev *dev)
@@ -310,7 +319,7 @@ static void pci_frontend_disable_msix(struct pci_dev *dev)
dev_err(&dev->dev, "pci_disable_msix get err %x\n", err);
}
-static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector)
+static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[])
{
int err;
struct xen_pci_op op = {
@@ -324,7 +333,13 @@ static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector)
err = do_pci_op(pdev, &op);
if (likely(!err)) {
- *(*vector) = op.value;
+ vector[0] = op.value;
+ if (op.value <= 0) {
+ dev_warn(&dev->dev, "MSI entry is invalid: %d!\n",
+ op.value);
+ err = -EINVAL;
+ vector[0] = -1;
+ }
} else {
dev_err(&dev->dev, "pci frontend enable msi failed for dev "
"%x:%x\n", op.bus, op.devfn);
@@ -388,9 +403,8 @@ static int pcifront_claim_resource(struct pci_dev *dev, void *data)
dev_info(&pdev->xdev->dev, "claiming resource %s/%d\n",
pci_name(dev), i);
if (pci_claim_resource(dev, i)) {
- dev_err(&pdev->xdev->dev, "Could not claim "
- "resource %s/%d! Device offline. Try "
- "giving less than 4GB to domain.\n",
+ dev_err(&pdev->xdev->dev, "Could not claim resource %s/%d! "
+ "Device offline. Try using e820_host=1 in the guest config.\n",
pci_name(dev), i);
}
}
@@ -399,7 +413,7 @@ static int pcifront_claim_resource(struct pci_dev *dev, void *data)
return 0;
}
-static int __devinit pcifront_scan_bus(struct pcifront_device *pdev,
+static int pcifront_scan_bus(struct pcifront_device *pdev,
unsigned int domain, unsigned int bus,
struct pci_bus *b)
{
@@ -421,14 +435,14 @@ static int __devinit pcifront_scan_bus(struct pcifront_device *pdev,
d = pci_scan_single_device(b, devfn);
if (d)
dev_info(&pdev->xdev->dev, "New device on "
- "%04x:%02x:%02x.%02x found.\n", domain, bus,
+ "%04x:%02x:%02x.%d found.\n", domain, bus,
PCI_SLOT(devfn), PCI_FUNC(devfn));
}
return 0;
}
-static int __devinit pcifront_scan_root(struct pcifront_device *pdev,
+static int pcifront_scan_root(struct pcifront_device *pdev,
unsigned int domain, unsigned int bus)
{
struct pci_bus *b;
@@ -458,12 +472,15 @@ static int __devinit pcifront_scan_root(struct pcifront_device *pdev,
}
pcifront_init_sd(sd, domain, bus, pdev);
+ pci_lock_rescan_remove();
+
b = pci_scan_bus_parented(&pdev->xdev->dev, bus,
&pcifront_bus_ops, sd);
if (!b) {
dev_err(&pdev->xdev->dev,
"Error creating PCI Frontend Bus!\n");
err = -ENOMEM;
+ pci_unlock_rescan_remove();
goto err_out;
}
@@ -481,6 +498,7 @@ static int __devinit pcifront_scan_root(struct pcifront_device *pdev,
/* Create SysFS and notify udev of the devices. Aka: "going live" */
pci_bus_add_devices(b);
+ pci_unlock_rescan_remove();
return err;
err_out:
@@ -490,7 +508,7 @@ err_out:
return err;
}
-static int __devinit pcifront_rescan_root(struct pcifront_device *pdev,
+static int pcifront_rescan_root(struct pcifront_device *pdev,
unsigned int domain, unsigned int bus)
{
int err;
@@ -533,7 +551,7 @@ static void free_root_bus_devs(struct pci_bus *bus)
dev = container_of(bus->devices.next, struct pci_dev,
bus_list);
dev_dbg(&dev->dev, "removing device\n");
- pci_remove_bus_device(dev);
+ pci_stop_and_remove_bus_device(dev);
}
}
@@ -543,6 +561,7 @@ static void pcifront_free_roots(struct pcifront_device *pdev)
dev_dbg(&pdev->xdev->dev, "cleaning up root buses\n");
+ pci_lock_rescan_remove();
list_for_each_entry_safe(bus_entry, t, &pdev->root_buses, list) {
list_del(&bus_entry->list);
@@ -555,6 +574,7 @@ static void pcifront_free_roots(struct pcifront_device *pdev)
kfree(bus_entry);
}
+ pci_unlock_rescan_remove();
}
static pci_ers_result_t pcifront_common_process(int cmd,
@@ -582,7 +602,7 @@ static pci_ers_result_t pcifront_common_process(int cmd,
}
pdrv = pcidev->driver;
- if (get_driver(&pdrv->driver)) {
+ if (pdrv) {
if (pdrv->err_handler && pdrv->err_handler->error_detected) {
dev_dbg(&pcidev->dev,
"trying to call AER service\n");
@@ -612,7 +632,6 @@ static pci_ers_result_t pcifront_common_process(int cmd,
}
}
}
- put_driver(&pdrv->driver);
}
if (!flag)
result = PCI_ERS_RESULT_NONE;
@@ -643,9 +662,9 @@ static void pcifront_do_aer(struct work_struct *data)
notify_remote_via_evtchn(pdev->evtchn);
/*in case of we lost an aer request in four lines time_window*/
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(_PDEVB_op_active, &pdev->flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
schedule_pcifront_aer_op(pdev);
@@ -657,7 +676,7 @@ static irqreturn_t pcifront_handler_aer(int irq, void *dev)
schedule_pcifront_aer_op(pdev);
return IRQ_HANDLED;
}
-static int pcifront_connect(struct pcifront_device *pdev)
+static int pcifront_connect_and_init_dma(struct pcifront_device *pdev)
{
int err = 0;
@@ -666,13 +685,16 @@ static int pcifront_connect(struct pcifront_device *pdev)
if (!pcifront_dev) {
dev_info(&pdev->xdev->dev, "Installing PCI frontend\n");
pcifront_dev = pdev;
- } else {
- dev_err(&pdev->xdev->dev, "PCI frontend already installed!\n");
+ } else
err = -EEXIST;
- }
spin_unlock(&pcifront_dev_lock);
+ if (!err && !swiotlb_nr_tbl()) {
+ err = pci_xen_swiotlb_init_late();
+ if (err)
+ dev_err(&pdev->xdev->dev, "Could not setup SWIOTLB!\n");
+ }
return err;
}
@@ -733,8 +755,7 @@ static void free_pdev(struct pcifront_device *pdev)
pcifront_free_roots(pdev);
- /*For PCIE_AER error handling job*/
- flush_scheduled_work();
+ cancel_work_sync(&pdev->op_work);
if (pdev->irq >= 0)
unbind_from_irqhandler(pdev->irq, pdev);
@@ -819,7 +840,7 @@ out:
return err;
}
-static int __devinit pcifront_try_connect(struct pcifront_device *pdev)
+static int pcifront_try_connect(struct pcifront_device *pdev)
{
int err = -EFAULT;
int i, num_roots, len;
@@ -832,10 +853,10 @@ static int __devinit pcifront_try_connect(struct pcifront_device *pdev)
XenbusStateInitialised)
goto out;
- err = pcifront_connect(pdev);
- if (err) {
+ err = pcifront_connect_and_init_dma(pdev);
+ if (err && err != -EEXIST) {
xenbus_dev_fatal(pdev->xdev, err,
- "Error connecting PCI Frontend");
+ "Error setting up PCI Frontend");
goto out;
}
@@ -909,7 +930,7 @@ out:
return err;
}
-static int __devinit pcifront_attach_devices(struct pcifront_device *pdev)
+static int pcifront_attach_devices(struct pcifront_device *pdev)
{
int err = -EFAULT;
int i, num_roots, len;
@@ -972,7 +993,6 @@ static int pcifront_detach_devices(struct pcifront_device *pdev)
int err = 0;
int i, num_devs;
unsigned int domain, bus, slot, func;
- struct pci_bus *pci_bus;
struct pci_dev *pci_dev;
char str[64];
@@ -1022,24 +1042,21 @@ static int pcifront_detach_devices(struct pcifront_device *pdev)
goto out;
}
- pci_bus = pci_find_bus(domain, bus);
- if (!pci_bus) {
- dev_dbg(&pdev->xdev->dev, "Cannot get bus %04x:%02x\n",
- domain, bus);
- continue;
- }
- pci_dev = pci_get_slot(pci_bus, PCI_DEVFN(slot, func));
+ pci_dev = pci_get_domain_bus_and_slot(domain, bus,
+ PCI_DEVFN(slot, func));
if (!pci_dev) {
dev_dbg(&pdev->xdev->dev,
- "Cannot get PCI device %04x:%02x:%02x.%02x\n",
+ "Cannot get PCI device %04x:%02x:%02x.%d\n",
domain, bus, slot, func);
continue;
}
- pci_remove_bus_device(pci_dev);
+ pci_lock_rescan_remove();
+ pci_stop_and_remove_bus_device(pci_dev);
pci_dev_put(pci_dev);
+ pci_unlock_rescan_remove();
dev_dbg(&pdev->xdev->dev,
- "PCI device %04x:%02x:%02x.%02x removed.\n",
+ "PCI device %04x:%02x:%02x.%d removed.\n",
domain, bus, slot, func);
}
@@ -1059,13 +1076,16 @@ static void __init_refok pcifront_backend_changed(struct xenbus_device *xdev,
case XenbusStateInitialising:
case XenbusStateInitWait:
case XenbusStateInitialised:
- case XenbusStateClosed:
break;
case XenbusStateConnected:
pcifront_try_connect(pdev);
break;
+ case XenbusStateClosed:
+ if (xdev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
dev_warn(&xdev->dev, "backend going away!\n");
pcifront_try_disconnect(pdev);
@@ -1116,28 +1136,28 @@ static const struct xenbus_device_id xenpci_ids[] = {
{""},
};
-static struct xenbus_driver xenbus_pcifront_driver = {
- .name = "pcifront",
- .owner = THIS_MODULE,
- .ids = xenpci_ids,
+static DEFINE_XENBUS_DRIVER(xenpci, "pcifront",
.probe = pcifront_xenbus_probe,
.remove = pcifront_xenbus_remove,
.otherend_changed = pcifront_backend_changed,
-};
+);
static int __init pcifront_init(void)
{
if (!xen_pv_domain() || xen_initial_domain())
return -ENODEV;
+ if (!xen_has_pv_devices())
+ return -ENODEV;
+
pci_frontend_registrar(1 /* enable */);
- return xenbus_register_frontend(&xenbus_pcifront_driver);
+ return xenbus_register_frontend(&xenpci_driver);
}
static void __exit pcifront_cleanup(void)
{
- xenbus_unregister_driver(&xenbus_pcifront_driver);
+ xenbus_unregister_driver(&xenpci_driver);
pci_frontend_registrar(0 /* disable */);
}
module_init(pcifront_init);