diff options
Diffstat (limited to 'drivers/pci/pci.c')
| -rw-r--r-- | drivers/pci/pci.c | 940 |
1 files changed, 478 insertions, 462 deletions
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 07369f32e8b..1c8592b0e14 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -106,15 +106,15 @@ static bool pcie_ari_disabled; * Given a PCI bus, returns the highest PCI bus number present in the set * including the given PCI bus and its list of child PCI buses. */ -unsigned char pci_bus_max_busnr(struct pci_bus* bus) +unsigned char pci_bus_max_busnr(struct pci_bus *bus) { - struct list_head *tmp; + struct pci_bus *tmp; unsigned char max, n; max = bus->busn_res.end; - list_for_each(tmp, &bus->children) { - n = pci_bus_max_busnr(pci_bus_b(tmp)); - if(n > max) + list_for_each_entry(tmp, &bus->children, node) { + n = pci_bus_max_busnr(tmp); + if (n > max) max = n; } return max; @@ -226,6 +226,7 @@ int pci_find_capability(struct pci_dev *dev, int cap) return pos; } +EXPORT_SYMBOL(pci_find_capability); /** * pci_bus_find_capability - query for devices' capabilities @@ -253,6 +254,7 @@ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) return pos; } +EXPORT_SYMBOL(pci_bus_find_capability); /** * pci_find_next_ext_capability - Find an extended capability @@ -401,33 +403,67 @@ EXPORT_SYMBOL_GPL(pci_find_ht_capability); * @res: child resource record for which parent is sought * * For given resource region of given device, return the resource - * region of parent bus the given region is contained in or where - * it should be allocated from. + * region of parent bus the given region is contained in. */ -struct resource * -pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) +struct resource *pci_find_parent_resource(const struct pci_dev *dev, + struct resource *res) { const struct pci_bus *bus = dev->bus; + struct resource *r; int i; - struct resource *best = NULL, *r; pci_bus_for_each_resource(bus, r, i) { if (!r) continue; - if (res->start && !(res->start >= r->start && res->end <= r->end)) - continue; /* Not contained */ - if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) - continue; /* Wrong type */ - if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) - return r; /* Exact match */ - /* We can't insert a non-prefetch resource inside a prefetchable parent .. */ - if (r->flags & IORESOURCE_PREFETCH) - continue; - /* .. but we can put a prefetchable resource inside a non-prefetchable one */ - if (!best) - best = r; + if (res->start && resource_contains(r, res)) { + + /* + * If the window is prefetchable but the BAR is + * not, the allocator made a mistake. + */ + if (r->flags & IORESOURCE_PREFETCH && + !(res->flags & IORESOURCE_PREFETCH)) + return NULL; + + /* + * If we're below a transparent bridge, there may + * be both a positively-decoded aperture and a + * subtractively-decoded region that contain the BAR. + * We want the positively-decoded one, so this depends + * on pci_bus_for_each_resource() giving us those + * first. + */ + return r; + } + } + return NULL; +} +EXPORT_SYMBOL(pci_find_parent_resource); + +/** + * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos + * @dev: the PCI device to operate on + * @pos: config space offset of status word + * @mask: mask of bit(s) to care about in status word + * + * Return 1 when mask bit(s) in status word clear, 0 otherwise. + */ +int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask) +{ + int i; + + /* Wait for Transaction Pending bit clean */ + for (i = 0; i < 4; i++) { + u16 status; + if (i) + msleep((1 << (i - 1)) * 100); + + pci_read_config_word(dev, pos, &status); + if (!(status & mask)) + return 1; } - return best; + + return 0; } /** @@ -437,8 +473,7 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) * Restore the BAR values for a given device, so as to make it * accessible by its driver. */ -static void -pci_restore_bars(struct pci_dev *dev) +static void pci_restore_bars(struct pci_dev *dev) { int i; @@ -463,7 +498,7 @@ static inline bool platform_pci_power_manageable(struct pci_dev *dev) } static inline int platform_pci_set_power_state(struct pci_dev *dev, - pci_power_t t) + pci_power_t t) { return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS; } @@ -520,8 +555,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) */ if (state != PCI_D0 && dev->current_state <= PCI_D3cold && dev->current_state > state) { - dev_err(&dev->dev, "invalid power transition " - "(from state %d to %d)\n", dev->current_state, state); + dev_err(&dev->dev, "invalid power transition (from state %d to %d)\n", + dev->current_state, state); return -EINVAL; } @@ -568,8 +603,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); if (dev->current_state != state && printk_ratelimit()) - dev_info(&dev->dev, "Refused to change power state, " - "currently in D%d\n", dev->current_state); + dev_info(&dev->dev, "Refused to change power state, currently in D%d\n", + dev->current_state); /* * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT @@ -657,6 +692,28 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) } /** + * pci_wakeup - Wake up a PCI device + * @pci_dev: Device to handle. + * @ign: ignored parameter + */ +static int pci_wakeup(struct pci_dev *pci_dev, void *ign) +{ + pci_wakeup_event(pci_dev); + pm_request_resume(&pci_dev->dev); + return 0; +} + +/** + * pci_wakeup_bus - Walk given bus and wake up devices on it + * @bus: Top bus of the subtree to walk. + */ +static void pci_wakeup_bus(struct pci_bus *bus) +{ + if (bus) + pci_walk_bus(bus, pci_wakeup, NULL); +} + +/** * __pci_start_power_transition - Start power transition of a PCI device * @dev: PCI device to handle. * @state: State to put the device into. @@ -791,6 +848,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) return error; } +EXPORT_SYMBOL(pci_set_power_state); /** * pci_choose_state - Choose the power state of a PCI device @@ -829,24 +887,32 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) } return PCI_D0; } - EXPORT_SYMBOL(pci_choose_state); #define PCI_EXP_SAVE_REGS 7 - -static struct pci_cap_saved_state *pci_find_saved_cap( - struct pci_dev *pci_dev, char cap) +static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev, + u16 cap, bool extended) { struct pci_cap_saved_state *tmp; hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) { - if (tmp->cap.cap_nr == cap) + if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap) return tmp; } return NULL; } +struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap) +{ + return _pci_find_saved_cap(dev, cap, false); +} + +struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap) +{ + return _pci_find_saved_cap(dev, cap, true); +} + static int pci_save_pcie_state(struct pci_dev *dev) { int i = 0; @@ -936,8 +1002,7 @@ static void pci_restore_pcix_state(struct pci_dev *dev) * pci_save_state - save the PCI configuration space of a device before suspending * @dev: - PCI device that we're dealing with */ -int -pci_save_state(struct pci_dev *dev) +int pci_save_state(struct pci_dev *dev) { int i; /* XXX: 100% dword access ok here? */ @@ -948,8 +1013,11 @@ pci_save_state(struct pci_dev *dev) return i; if ((i = pci_save_pcix_state(dev)) != 0) return i; + if ((i = pci_save_vc_state(dev)) != 0) + return i; return 0; } +EXPORT_SYMBOL(pci_save_state); static void pci_restore_config_dword(struct pci_dev *pdev, int offset, u32 saved_val, int retry) @@ -961,8 +1029,8 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset, return; for (;;) { - dev_dbg(&pdev->dev, "restoring config space at offset " - "%#x (was %#x, writing %#x)\n", offset, val, saved_val); + dev_dbg(&pdev->dev, "restoring config space at offset %#x (was %#x, writing %#x)\n", + offset, val, saved_val); pci_write_config_dword(pdev, offset, saved_val); if (retry-- <= 0) return; @@ -1010,6 +1078,7 @@ void pci_restore_state(struct pci_dev *dev) /* PCI Express register must be restored first */ pci_restore_pcie_state(dev); pci_restore_ats_state(dev); + pci_restore_vc_state(dev); pci_restore_config_space(dev); @@ -1019,6 +1088,7 @@ void pci_restore_state(struct pci_dev *dev) dev->state_saved = false; } +EXPORT_SYMBOL(pci_restore_state); struct pci_saved_state { u32 config_space[16]; @@ -1071,7 +1141,8 @@ EXPORT_SYMBOL_GPL(pci_store_saved_state); * @dev: PCI device that we're dealing with * @state: Saved state returned from pci_store_saved_state() */ -int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state) +static int pci_load_saved_state(struct pci_dev *dev, + struct pci_saved_state *state) { struct pci_cap_saved_data *cap; @@ -1087,7 +1158,7 @@ int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state) while (cap->size) { struct pci_cap_saved_state *tmp; - tmp = pci_find_saved_cap(dev, cap->cap_nr); + tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended); if (!tmp || tmp->cap.size != cap->size) return -EINVAL; @@ -1099,7 +1170,6 @@ int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state) dev->state_saved = true; return 0; } -EXPORT_SYMBOL_GPL(pci_load_saved_state); /** * pci_load_and_free_saved_state - Reload the save state pointed to by state, @@ -1117,9 +1187,16 @@ int pci_load_and_free_saved_state(struct pci_dev *dev, } EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state); +int __weak pcibios_enable_device(struct pci_dev *dev, int bars) +{ + return pci_enable_resources(dev, bars); +} + static int do_pci_enable_device(struct pci_dev *dev, int bars) { int err; + u16 cmd; + u8 pin; err = pci_set_power_state(dev, PCI_D0); if (err < 0 && err != -EIO) @@ -1129,6 +1206,17 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars) return err; pci_fixup_device(pci_fixup_enable, dev); + if (dev->msi_enabled || dev->msix_enabled) + return 0; + + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); + if (pin) { + pci_read_config_word(dev, PCI_COMMAND, &cmd); + if (cmd & PCI_COMMAND_INTX_DISABLE) + pci_write_config_word(dev, PCI_COMMAND, + cmd & ~PCI_COMMAND_INTX_DISABLE); + } + return 0; } @@ -1145,6 +1233,7 @@ int pci_reenable_device(struct pci_dev *dev) return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); return 0; } +EXPORT_SYMBOL(pci_reenable_device); static void pci_enable_bridge(struct pci_dev *dev) { @@ -1219,6 +1308,7 @@ int pci_enable_device_io(struct pci_dev *dev) { return pci_enable_device_flags(dev, IORESOURCE_IO); } +EXPORT_SYMBOL(pci_enable_device_io); /** * pci_enable_device_mem - Initialize a device for use with Memory space @@ -1232,6 +1322,7 @@ int pci_enable_device_mem(struct pci_dev *dev) { return pci_enable_device_flags(dev, IORESOURCE_MEM); } +EXPORT_SYMBOL(pci_enable_device_mem); /** * pci_enable_device - Initialize device before it's used by a driver. @@ -1248,6 +1339,7 @@ int pci_enable_device(struct pci_dev *dev) { return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); } +EXPORT_SYMBOL(pci_enable_device); /* * Managed PCI resources. This manages device on/off, intx/msi/msix @@ -1285,7 +1377,7 @@ static void pcim_release(struct device *gendev, void *res) pci_disable_device(dev); } -static struct pci_devres * get_pci_dr(struct pci_dev *pdev) +static struct pci_devres *get_pci_dr(struct pci_dev *pdev) { struct pci_devres *dr, *new_dr; @@ -1299,7 +1391,7 @@ static struct pci_devres * get_pci_dr(struct pci_dev *pdev) return devres_get(&pdev->dev, new_dr, NULL, NULL); } -static struct pci_devres * find_pci_dr(struct pci_dev *pdev) +static struct pci_devres *find_pci_dr(struct pci_dev *pdev) { if (pci_is_managed(pdev)) return devres_find(&pdev->dev, pcim_release, NULL, NULL); @@ -1330,6 +1422,7 @@ int pcim_enable_device(struct pci_dev *pdev) } return rc; } +EXPORT_SYMBOL(pcim_enable_device); /** * pcim_pin_device - Pin managed PCI device @@ -1348,6 +1441,7 @@ void pcim_pin_device(struct pci_dev *pdev) if (dr) dr->pinned = 1; } +EXPORT_SYMBOL(pcim_pin_device); /* * pcibios_add_device - provide arch specific hooks when adding device dev @@ -1357,7 +1451,7 @@ void pcim_pin_device(struct pci_dev *pdev) * devices are added. This is the default implementation. Architecture * implementations can override this. */ -int __weak pcibios_add_device (struct pci_dev *dev) +int __weak pcibios_add_device(struct pci_dev *dev) { return 0; } @@ -1382,6 +1476,17 @@ void __weak pcibios_release_device(struct pci_dev *dev) {} */ void __weak pcibios_disable_device (struct pci_dev *dev) {} +/** + * pcibios_penalize_isa_irq - penalize an ISA IRQ + * @irq: ISA IRQ to penalize + * @active: IRQ active or not + * + * Permits the platform to provide architecture-specific functionality when + * penalizing ISA IRQs. This is the default implementation. Architecture + * implementations can override this. + */ +void __weak pcibios_penalize_isa_irq(int irq, int active) {} + static void do_pci_disable_device(struct pci_dev *dev) { u16 pci_command; @@ -1418,8 +1523,7 @@ void pci_disable_enabled_device(struct pci_dev *dev) * Note we don't actually disable the device until all callers of * pci_enable_device() have called pci_disable_device(). */ -void -pci_disable_device(struct pci_dev *dev) +void pci_disable_device(struct pci_dev *dev) { struct pci_devres *dr; @@ -1437,6 +1541,7 @@ pci_disable_device(struct pci_dev *dev) dev->is_busmaster = 0; } +EXPORT_SYMBOL(pci_disable_device); /** * pcibios_set_pcie_reset_state - set reset state for device dev @@ -1465,6 +1570,7 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) { return pcibios_set_pcie_reset_state(dev, state); } +EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); /** * pci_check_pme_status - Check if given device has generated PME. @@ -1531,27 +1637,6 @@ void pci_pme_wakeup_bus(struct pci_bus *bus) pci_walk_bus(bus, pci_pme_wakeup, (void *)true); } -/** - * pci_wakeup - Wake up a PCI device - * @pci_dev: Device to handle. - * @ign: ignored parameter - */ -static int pci_wakeup(struct pci_dev *pci_dev, void *ign) -{ - pci_wakeup_event(pci_dev); - pm_request_resume(&pci_dev->dev); - return 0; -} - -/** - * pci_wakeup_bus - Walk given bus and wake up devices on it - * @bus: Top bus of the subtree to walk. - */ -void pci_wakeup_bus(struct pci_bus *bus) -{ - if (bus) - pci_walk_bus(bus, pci_wakeup, NULL); -} /** * pci_pme_capable - check the capability of PCI device to generate PME# @@ -1565,35 +1650,34 @@ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) return !!(dev->pme_support & (1 << state)); } +EXPORT_SYMBOL(pci_pme_capable); static void pci_pme_list_scan(struct work_struct *work) { struct pci_pme_device *pme_dev, *n; mutex_lock(&pci_pme_list_mutex); - if (!list_empty(&pci_pme_list)) { - list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) { - if (pme_dev->dev->pme_poll) { - struct pci_dev *bridge; - - bridge = pme_dev->dev->bus->self; - /* - * If bridge is in low power state, the - * configuration space of subordinate devices - * may be not accessible - */ - if (bridge && bridge->current_state != PCI_D0) - continue; - pci_pme_wakeup(pme_dev->dev, NULL); - } else { - list_del(&pme_dev->list); - kfree(pme_dev); - } + list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) { + if (pme_dev->dev->pme_poll) { + struct pci_dev *bridge; + + bridge = pme_dev->dev->bus->self; + /* + * If bridge is in low power state, the + * configuration space of subordinate devices + * may be not accessible + */ + if (bridge && bridge->current_state != PCI_D0) + continue; + pci_pme_wakeup(pme_dev->dev, NULL); + } else { + list_del(&pme_dev->list); + kfree(pme_dev); } - if (!list_empty(&pci_pme_list)) - schedule_delayed_work(&pci_pme_work, - msecs_to_jiffies(PME_TIMEOUT)); } + if (!list_empty(&pci_pme_list)) + schedule_delayed_work(&pci_pme_work, + msecs_to_jiffies(PME_TIMEOUT)); mutex_unlock(&pci_pme_list_mutex); } @@ -1671,6 +1755,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable) dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled"); } +EXPORT_SYMBOL(pci_pme_active); /** * __pci_enable_wake - enable PCI device as wakeup event source @@ -1756,6 +1841,7 @@ int pci_wake_from_d3(struct pci_dev *dev, bool enable) pci_enable_wake(dev, PCI_D3cold, enable) : pci_enable_wake(dev, PCI_D3hot, enable); } +EXPORT_SYMBOL(pci_wake_from_d3); /** * pci_target_state - find an appropriate low power state for a given PCI dev @@ -1765,7 +1851,7 @@ int pci_wake_from_d3(struct pci_dev *dev, bool enable) * If the platform can't manage @dev, return the deepest state from which it * can generate wake events, based on any available PME info. */ -pci_power_t pci_target_state(struct pci_dev *dev) +static pci_power_t pci_target_state(struct pci_dev *dev) { pci_power_t target_state = PCI_D3hot; @@ -1834,6 +1920,7 @@ int pci_prepare_to_sleep(struct pci_dev *dev) return error; } +EXPORT_SYMBOL(pci_prepare_to_sleep); /** * pci_back_from_sleep - turn PCI device on during system-wide transition into working state @@ -1846,6 +1933,7 @@ int pci_back_from_sleep(struct pci_dev *dev) pci_enable_wake(dev, PCI_D0, false); return pci_set_power_state(dev, PCI_D0); } +EXPORT_SYMBOL(pci_back_from_sleep); /** * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend. @@ -2021,18 +2109,24 @@ static void pci_add_saved_cap(struct pci_dev *pci_dev, } /** - * pci_add_cap_save_buffer - allocate buffer for saving given capability registers + * _pci_add_cap_save_buffer - allocate buffer for saving given + * capability registers * @dev: the PCI device * @cap: the capability to allocate the buffer for + * @extended: Standard or Extended capability ID * @size: requested size of the buffer */ -static int pci_add_cap_save_buffer( - struct pci_dev *dev, char cap, unsigned int size) +static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap, + bool extended, unsigned int size) { int pos; struct pci_cap_saved_state *save_state; - pos = pci_find_capability(dev, cap); + if (extended) + pos = pci_find_ext_capability(dev, cap); + else + pos = pci_find_capability(dev, cap); + if (pos <= 0) return 0; @@ -2041,12 +2135,23 @@ static int pci_add_cap_save_buffer( return -ENOMEM; save_state->cap.cap_nr = cap; + save_state->cap.cap_extended = extended; save_state->cap.size = size; pci_add_saved_cap(dev, save_state); return 0; } +int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size) +{ + return _pci_add_cap_save_buffer(dev, cap, false, size); +} + +int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size) +{ + return _pci_add_cap_save_buffer(dev, cap, true, size); +} + /** * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities * @dev: the PCI device @@ -2065,6 +2170,8 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev) if (error) dev_err(&dev->dev, "unable to preallocate PCI-X save buffer\n"); + + pci_allocate_vc_save_buffers(dev); } void pci_free_cap_save_buffers(struct pci_dev *dev) @@ -2110,242 +2217,6 @@ void pci_configure_ari(struct pci_dev *dev) } } -/** - * pci_enable_ido - enable ID-based Ordering on a device - * @dev: the PCI device - * @type: which types of IDO to enable - * - * Enable ID-based ordering on @dev. @type can contain the bits - * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate - * which types of transactions are allowed to be re-ordered. - */ -void pci_enable_ido(struct pci_dev *dev, unsigned long type) -{ - u16 ctrl = 0; - - if (type & PCI_EXP_IDO_REQUEST) - ctrl |= PCI_EXP_DEVCTL2_IDO_REQ_EN; - if (type & PCI_EXP_IDO_COMPLETION) - ctrl |= PCI_EXP_DEVCTL2_IDO_CMP_EN; - if (ctrl) - pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, ctrl); -} -EXPORT_SYMBOL(pci_enable_ido); - -/** - * pci_disable_ido - disable ID-based ordering on a device - * @dev: the PCI device - * @type: which types of IDO to disable - */ -void pci_disable_ido(struct pci_dev *dev, unsigned long type) -{ - u16 ctrl = 0; - - if (type & PCI_EXP_IDO_REQUEST) - ctrl |= PCI_EXP_DEVCTL2_IDO_REQ_EN; - if (type & PCI_EXP_IDO_COMPLETION) - ctrl |= PCI_EXP_DEVCTL2_IDO_CMP_EN; - if (ctrl) - pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, ctrl); -} -EXPORT_SYMBOL(pci_disable_ido); - -/** - * pci_enable_obff - enable optimized buffer flush/fill - * @dev: PCI device - * @type: type of signaling to use - * - * Try to enable @type OBFF signaling on @dev. It will try using WAKE# - * signaling if possible, falling back to message signaling only if - * WAKE# isn't supported. @type should indicate whether the PCIe link - * be brought out of L0s or L1 to send the message. It should be either - * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0. - * - * If your device can benefit from receiving all messages, even at the - * power cost of bringing the link back up from a low power state, use - * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the - * preferred type). - * - * RETURNS: - * Zero on success, appropriate error number on failure. - */ -int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type) -{ - u32 cap; - u16 ctrl; - int ret; - - pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); - if (!(cap & PCI_EXP_DEVCAP2_OBFF_MASK)) - return -ENOTSUPP; /* no OBFF support at all */ - - /* Make sure the topology supports OBFF as well */ - if (dev->bus->self) { - ret = pci_enable_obff(dev->bus->self, type); - if (ret) - return ret; - } - - pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctrl); - if (cap & PCI_EXP_DEVCAP2_OBFF_WAKE) - ctrl |= PCI_EXP_DEVCTL2_OBFF_WAKE_EN; - else { - switch (type) { - case PCI_EXP_OBFF_SIGNAL_L0: - if (!(ctrl & PCI_EXP_DEVCTL2_OBFF_WAKE_EN)) - ctrl |= PCI_EXP_DEVCTL2_OBFF_MSGA_EN; - break; - case PCI_EXP_OBFF_SIGNAL_ALWAYS: - ctrl &= ~PCI_EXP_DEVCTL2_OBFF_WAKE_EN; - ctrl |= PCI_EXP_DEVCTL2_OBFF_MSGB_EN; - break; - default: - WARN(1, "bad OBFF signal type\n"); - return -ENOTSUPP; - } - } - pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, ctrl); - - return 0; -} -EXPORT_SYMBOL(pci_enable_obff); - -/** - * pci_disable_obff - disable optimized buffer flush/fill - * @dev: PCI device - * - * Disable OBFF on @dev. - */ -void pci_disable_obff(struct pci_dev *dev) -{ - pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, - PCI_EXP_DEVCTL2_OBFF_WAKE_EN); -} -EXPORT_SYMBOL(pci_disable_obff); - -/** - * pci_ltr_supported - check whether a device supports LTR - * @dev: PCI device - * - * RETURNS: - * True if @dev supports latency tolerance reporting, false otherwise. - */ -static bool pci_ltr_supported(struct pci_dev *dev) -{ - u32 cap; - - pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); - - return cap & PCI_EXP_DEVCAP2_LTR; -} - -/** - * pci_enable_ltr - enable latency tolerance reporting - * @dev: PCI device - * - * Enable LTR on @dev if possible, which means enabling it first on - * upstream ports. - * - * RETURNS: - * Zero on success, errno on failure. - */ -int pci_enable_ltr(struct pci_dev *dev) -{ - int ret; - - /* Only primary function can enable/disable LTR */ - if (PCI_FUNC(dev->devfn) != 0) - return -EINVAL; - - if (!pci_ltr_supported(dev)) - return -ENOTSUPP; - - /* Enable upstream ports first */ - if (dev->bus->self) { - ret = pci_enable_ltr(dev->bus->self); - if (ret) - return ret; - } - - return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, - PCI_EXP_DEVCTL2_LTR_EN); -} -EXPORT_SYMBOL(pci_enable_ltr); - -/** - * pci_disable_ltr - disable latency tolerance reporting - * @dev: PCI device - */ -void pci_disable_ltr(struct pci_dev *dev) -{ - /* Only primary function can enable/disable LTR */ - if (PCI_FUNC(dev->devfn) != 0) - return; - - if (!pci_ltr_supported(dev)) - return; - - pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, - PCI_EXP_DEVCTL2_LTR_EN); -} -EXPORT_SYMBOL(pci_disable_ltr); - -static int __pci_ltr_scale(int *val) -{ - int scale = 0; - - while (*val > 1023) { - *val = (*val + 31) / 32; - scale++; - } - return scale; -} - -/** - * pci_set_ltr - set LTR latency values - * @dev: PCI device - * @snoop_lat_ns: snoop latency in nanoseconds - * @nosnoop_lat_ns: nosnoop latency in nanoseconds - * - * Figure out the scale and set the LTR values accordingly. - */ -int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns) -{ - int pos, ret, snoop_scale, nosnoop_scale; - u16 val; - - if (!pci_ltr_supported(dev)) - return -ENOTSUPP; - - snoop_scale = __pci_ltr_scale(&snoop_lat_ns); - nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns); - - if (snoop_lat_ns > PCI_LTR_VALUE_MASK || - nosnoop_lat_ns > PCI_LTR_VALUE_MASK) - return -EINVAL; - - if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) || - (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT))) - return -EINVAL; - - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); - if (!pos) - return -ENOTSUPP; - - val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns; - ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val); - if (ret != 4) - return -EIO; - - val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns; - ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val); - if (ret != 4) - return -EIO; - - return 0; -} -EXPORT_SYMBOL(pci_set_ltr); - static int pci_acs_enable; /** @@ -2357,21 +2228,18 @@ void pci_request_acs(void) } /** - * pci_enable_acs - enable ACS if hardware support it + * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites * @dev: the PCI device */ -void pci_enable_acs(struct pci_dev *dev) +static int pci_std_enable_acs(struct pci_dev *dev) { int pos; u16 cap; u16 ctrl; - if (!pci_acs_enable) - return; - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); if (!pos) - return; + return -ENODEV; pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); @@ -2389,6 +2257,23 @@ void pci_enable_acs(struct pci_dev *dev) ctrl |= (cap & PCI_ACS_UF); pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); + + return 0; +} + +/** + * pci_enable_acs - enable ACS if hardware support it + * @dev: the PCI device + */ +void pci_enable_acs(struct pci_dev *dev) +{ + if (!pci_acs_enable) + return; + + if (!pci_std_enable_acs(dev)) + return; + + pci_dev_specific_enable_acs(dev); } static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags) @@ -2544,8 +2429,7 @@ u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin) return (((pin - 1) + slot) % 4) + 1; } -int -pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) +int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) { u8 pin; @@ -2607,6 +2491,7 @@ void pci_release_region(struct pci_dev *pdev, int bar) if (dr) dr->region_mask &= ~(1 << bar); } +EXPORT_SYMBOL(pci_release_region); /** * __pci_request_region - Reserved PCI I/O and memory resource @@ -2627,8 +2512,8 @@ void pci_release_region(struct pci_dev *pdev, int bar) * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */ -static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, - int exclusive) +static int __pci_request_region(struct pci_dev *pdev, int bar, + const char *res_name, int exclusive) { struct pci_devres *dr; @@ -2639,8 +2524,7 @@ static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_n if (!request_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar), res_name)) goto err_out; - } - else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { + } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { if (!__request_mem_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar), res_name, exclusive)) @@ -2677,6 +2561,7 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) { return __pci_request_region(pdev, bar, res_name, 0); } +EXPORT_SYMBOL(pci_request_region); /** * pci_request_region_exclusive - Reserved PCI I/O and memory resource @@ -2696,10 +2581,13 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) * explicitly not allowed to map the resource via /dev/mem or * sysfs. */ -int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name) +int pci_request_region_exclusive(struct pci_dev *pdev, int bar, + const char *res_name) { return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE); } +EXPORT_SYMBOL(pci_request_region_exclusive); + /** * pci_release_selected_regions - Release selected PCI I/O and memory resources * @pdev: PCI device whose resources were previously reserved @@ -2716,9 +2604,10 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars) if (bars & (1 << i)) pci_release_region(pdev, i); } +EXPORT_SYMBOL(pci_release_selected_regions); static int __pci_request_selected_regions(struct pci_dev *pdev, int bars, - const char *res_name, int excl) + const char *res_name, int excl) { int i; @@ -2729,7 +2618,7 @@ static int __pci_request_selected_regions(struct pci_dev *pdev, int bars, return 0; err_out: - while(--i >= 0) + while (--i >= 0) if (bars & (1 << i)) pci_release_region(pdev, i); @@ -2748,13 +2637,15 @@ int pci_request_selected_regions(struct pci_dev *pdev, int bars, { return __pci_request_selected_regions(pdev, bars, res_name, 0); } +EXPORT_SYMBOL(pci_request_selected_regions); -int pci_request_selected_regions_exclusive(struct pci_dev *pdev, - int bars, const char *res_name) +int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars, + const char *res_name) { return __pci_request_selected_regions(pdev, bars, res_name, IORESOURCE_EXCLUSIVE); } +EXPORT_SYMBOL(pci_request_selected_regions_exclusive); /** * pci_release_regions - Release reserved PCI I/O and memory resources @@ -2769,6 +2660,7 @@ void pci_release_regions(struct pci_dev *pdev) { pci_release_selected_regions(pdev, (1 << 6) - 1); } +EXPORT_SYMBOL(pci_release_regions); /** * pci_request_regions - Reserved PCI I/O and memory resources @@ -2787,6 +2679,7 @@ int pci_request_regions(struct pci_dev *pdev, const char *res_name) { return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name); } +EXPORT_SYMBOL(pci_request_regions); /** * pci_request_regions_exclusive - Reserved PCI I/O and memory resources @@ -2809,6 +2702,7 @@ int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) return pci_request_selected_regions_exclusive(pdev, ((1 << 6) - 1), res_name); } +EXPORT_SYMBOL(pci_request_regions_exclusive); static void __pci_set_master(struct pci_dev *dev, bool enable) { @@ -2878,6 +2772,7 @@ void pci_set_master(struct pci_dev *dev) __pci_set_master(dev, true); pcibios_set_master(dev); } +EXPORT_SYMBOL(pci_set_master); /** * pci_clear_master - disables bus-mastering for device dev @@ -2887,6 +2782,7 @@ void pci_clear_master(struct pci_dev *dev) { __pci_set_master(dev, false); } +EXPORT_SYMBOL(pci_clear_master); /** * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed @@ -2919,30 +2815,13 @@ int pci_set_cacheline_size(struct pci_dev *dev) if (cacheline_size == pci_cache_line_size) return 0; - dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not " - "supported\n", pci_cache_line_size << 2); + dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not supported\n", + pci_cache_line_size << 2); return -EINVAL; } EXPORT_SYMBOL_GPL(pci_set_cacheline_size); -#ifdef PCI_DISABLE_MWI -int pci_set_mwi(struct pci_dev *dev) -{ - return 0; -} - -int pci_try_set_mwi(struct pci_dev *dev) -{ - return 0; -} - -void pci_clear_mwi(struct pci_dev *dev) -{ -} - -#else - /** * pci_set_mwi - enables memory-write-invalidate PCI transaction * @dev: the PCI device for which MWI is enabled @@ -2951,9 +2830,11 @@ void pci_clear_mwi(struct pci_dev *dev) * * RETURNS: An appropriate -ERRNO error value on error, or zero for success. */ -int -pci_set_mwi(struct pci_dev *dev) +int pci_set_mwi(struct pci_dev *dev) { +#ifdef PCI_DISABLE_MWI + return 0; +#else int rc; u16 cmd; @@ -2962,14 +2843,15 @@ pci_set_mwi(struct pci_dev *dev) return rc; pci_read_config_word(dev, PCI_COMMAND, &cmd); - if (! (cmd & PCI_COMMAND_INVALIDATE)) { + if (!(cmd & PCI_COMMAND_INVALIDATE)) { dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n"); cmd |= PCI_COMMAND_INVALIDATE; pci_write_config_word(dev, PCI_COMMAND, cmd); } - return 0; +#endif } +EXPORT_SYMBOL(pci_set_mwi); /** * pci_try_set_mwi - enables memory-write-invalidate PCI transaction @@ -2982,9 +2864,13 @@ pci_set_mwi(struct pci_dev *dev) */ int pci_try_set_mwi(struct pci_dev *dev) { - int rc = pci_set_mwi(dev); - return rc; +#ifdef PCI_DISABLE_MWI + return 0; +#else + return pci_set_mwi(dev); +#endif } +EXPORT_SYMBOL(pci_try_set_mwi); /** * pci_clear_mwi - disables Memory-Write-Invalidate for device dev @@ -2992,9 +2878,9 @@ int pci_try_set_mwi(struct pci_dev *dev) * * Disables PCI Memory-Write-Invalidate transaction on the device */ -void -pci_clear_mwi(struct pci_dev *dev) +void pci_clear_mwi(struct pci_dev *dev) { +#ifndef PCI_DISABLE_MWI u16 cmd; pci_read_config_word(dev, PCI_COMMAND, &cmd); @@ -3002,8 +2888,9 @@ pci_clear_mwi(struct pci_dev *dev) cmd &= ~PCI_COMMAND_INVALIDATE; pci_write_config_word(dev, PCI_COMMAND, cmd); } +#endif } -#endif /* ! PCI_DISABLE_MWI */ +EXPORT_SYMBOL(pci_clear_mwi); /** * pci_intx - enables/disables PCI INTx for device dev @@ -3012,18 +2899,16 @@ pci_clear_mwi(struct pci_dev *dev) * * Enables/disables PCI INTx for device dev */ -void -pci_intx(struct pci_dev *pdev, int enable) +void pci_intx(struct pci_dev *pdev, int enable) { u16 pci_command, new; pci_read_config_word(pdev, PCI_COMMAND, &pci_command); - if (enable) { + if (enable) new = pci_command & ~PCI_COMMAND_INTX_DISABLE; - } else { + else new = pci_command | PCI_COMMAND_INTX_DISABLE; - } if (new != pci_command) { struct pci_devres *dr; @@ -3037,6 +2922,7 @@ pci_intx(struct pci_dev *pdev, int enable) } } } +EXPORT_SYMBOL_GPL(pci_intx); /** * pci_intx_mask_supported - probe for INTx masking support @@ -3066,8 +2952,8 @@ bool pci_intx_mask_supported(struct pci_dev *dev) * go ahead and check it. */ if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) { - dev_err(&dev->dev, "Command register changed from " - "0x%x to 0x%x: driver or hardware bug?\n", orig, new); + dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n", + orig, new); } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) { mask_supported = true; pci_write_config_word(dev, PCI_COMMAND, orig); @@ -3138,7 +3024,7 @@ bool pci_check_and_mask_intx(struct pci_dev *dev) EXPORT_SYMBOL_GPL(pci_check_and_mask_intx); /** - * pci_check_and_mask_intx - unmask INTx of no interrupt is pending + * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending * @dev: the PCI device to operate on * * Check if the device dev has its INTx line asserted, unmask it if not @@ -3204,20 +3090,11 @@ EXPORT_SYMBOL(pci_set_dma_seg_boundary); */ int pci_wait_for_pending_transaction(struct pci_dev *dev) { - int i; - u16 status; - - /* Wait for Transaction Pending bit clean */ - for (i = 0; i < 4; i++) { - if (i) - msleep((1 << (i - 1)) * 100); - - pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); - if (!(status & PCI_EXP_DEVSTA_TRPND)) - return 1; - } + if (!pci_is_pcie(dev)) + return 1; - return 0; + return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA, + PCI_EXP_DEVSTA_TRPND); } EXPORT_SYMBOL(pci_wait_for_pending_transaction); @@ -3244,10 +3121,8 @@ static int pcie_flr(struct pci_dev *dev, int probe) static int pci_af_flr(struct pci_dev *dev, int probe) { - int i; int pos; u8 cap; - u8 status; pos = pci_find_capability(dev, PCI_CAP_ID_AF); if (!pos) @@ -3260,18 +3135,16 @@ static int pci_af_flr(struct pci_dev *dev, int probe) if (probe) return 0; - /* Wait for Transaction Pending bit clean */ - for (i = 0; i < 4; i++) { - if (i) - msleep((1 << (i - 1)) * 100); - - pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status); - if (!(status & PCI_AF_STATUS_TP)) - goto clear; - } + /* + * Wait for Transaction Pending bit to clear. A word-aligned test + * is used, so we use the conrol offset rather than status and shift + * the test bit to match. + */ + if (pci_wait_for_pending(dev, pos + PCI_AF_CTRL, + PCI_AF_STATUS_TP << 8)) + goto clear; - dev_err(&dev->dev, "transaction is not cleared; " - "proceeding with reset anyway\n"); + dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); clear: pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); @@ -3325,14 +3198,7 @@ static int pci_pm_reset(struct pci_dev *dev, int probe) return 0; } -/** - * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge. - * @dev: Bridge device - * - * Use the bridge control register to assert reset on the secondary bus. - * Devices on the secondary bus are left in power-on state. - */ -void pci_reset_bridge_secondary_bus(struct pci_dev *dev) +void __weak pcibios_reset_secondary_bus(struct pci_dev *dev) { u16 ctrl; @@ -3357,6 +3223,18 @@ void pci_reset_bridge_secondary_bus(struct pci_dev *dev) */ ssleep(1); } + +/** + * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge. + * @dev: Bridge device + * + * Use the bridge control register to assert reset on the secondary bus. + * Devices on the secondary bus are left in power-on state. + */ +void pci_reset_bridge_secondary_bus(struct pci_dev *dev) +{ + pcibios_reset_secondary_bus(dev); +} EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus); static int pci_parent_bus_reset(struct pci_dev *dev, int probe) @@ -3445,14 +3323,45 @@ static void pci_dev_lock(struct pci_dev *dev) device_lock(&dev->dev); } +/* Return 1 on successful lock, 0 on contention */ +static int pci_dev_trylock(struct pci_dev *dev) +{ + if (pci_cfg_access_trylock(dev)) { + if (device_trylock(&dev->dev)) + return 1; + pci_cfg_access_unlock(dev); + } + + return 0; +} + static void pci_dev_unlock(struct pci_dev *dev) { device_unlock(&dev->dev); pci_cfg_access_unlock(dev); } +/** + * pci_reset_notify - notify device driver of reset + * @dev: device to be notified of reset + * @prepare: 'true' if device is about to be reset; 'false' if reset attempt + * completed + * + * Must be called prior to device access being disabled and after device + * access is restored. + */ +static void pci_reset_notify(struct pci_dev *dev, bool prepare) +{ + const struct pci_error_handlers *err_handler = + dev->driver ? dev->driver->err_handler : NULL; + if (err_handler && err_handler->reset_notify) + err_handler->reset_notify(dev, prepare); +} + static void pci_dev_save_and_disable(struct pci_dev *dev) { + pci_reset_notify(dev, true); + /* * Wake-up device prior to save. PM registers default to D0 after * reset and a simple register restore doesn't reliably return @@ -3474,6 +3383,7 @@ static void pci_dev_save_and_disable(struct pci_dev *dev) static void pci_dev_restore(struct pci_dev *dev) { pci_restore_state(dev); + pci_reset_notify(dev, false); } static int pci_dev_reset(struct pci_dev *dev, int probe) @@ -3490,6 +3400,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) return rc; } + /** * __pci_reset_function - reset a PCI device function * @dev: PCI device to reset @@ -3588,6 +3499,34 @@ int pci_reset_function(struct pci_dev *dev) } EXPORT_SYMBOL_GPL(pci_reset_function); +/** + * pci_try_reset_function - quiesce and reset a PCI device function + * @dev: PCI device to reset + * + * Same as above, except return -EAGAIN if unable to lock device. + */ +int pci_try_reset_function(struct pci_dev *dev) +{ + int rc; + + rc = pci_dev_reset(dev, 1); + if (rc) + return rc; + + pci_dev_save_and_disable(dev); + + if (pci_dev_trylock(dev)) { + rc = __pci_dev_reset(dev, 0); + pci_dev_unlock(dev); + } else + rc = -EAGAIN; + + pci_dev_restore(dev); + + return rc; +} +EXPORT_SYMBOL_GPL(pci_try_reset_function); + /* Lock devices from the top of the tree down */ static void pci_bus_lock(struct pci_bus *bus) { @@ -3612,6 +3551,32 @@ static void pci_bus_unlock(struct pci_bus *bus) } } +/* Return 1 on successful lock, 0 on contention */ +static int pci_bus_trylock(struct pci_bus *bus) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + if (!pci_dev_trylock(dev)) + goto unlock; + if (dev->subordinate) { + if (!pci_bus_trylock(dev->subordinate)) { + pci_dev_unlock(dev); + goto unlock; + } + } + } + return 1; + +unlock: + list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) { + if (dev->subordinate) + pci_bus_unlock(dev->subordinate); + pci_dev_unlock(dev); + } + return 0; +} + /* Lock devices from the top of the tree down */ static void pci_slot_lock(struct pci_slot *slot) { @@ -3640,6 +3605,37 @@ static void pci_slot_unlock(struct pci_slot *slot) } } +/* Return 1 on successful lock, 0 on contention */ +static int pci_slot_trylock(struct pci_slot *slot) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &slot->bus->devices, bus_list) { + if (!dev->slot || dev->slot != slot) + continue; + if (!pci_dev_trylock(dev)) + goto unlock; + if (dev->subordinate) { + if (!pci_bus_trylock(dev->subordinate)) { + pci_dev_unlock(dev); + goto unlock; + } + } + } + return 1; + +unlock: + list_for_each_entry_continue_reverse(dev, + &slot->bus->devices, bus_list) { + if (!dev->slot || dev->slot != slot) + continue; + if (dev->subordinate) + pci_bus_unlock(dev->subordinate); + pci_dev_unlock(dev); + } + return 0; +} + /* Save and disable devices from the top of the tree down */ static void pci_bus_save_and_disable(struct pci_bus *bus) { @@ -3763,6 +3759,35 @@ int pci_reset_slot(struct pci_slot *slot) } EXPORT_SYMBOL_GPL(pci_reset_slot); +/** + * pci_try_reset_slot - Try to reset a PCI slot + * @slot: PCI slot to reset + * + * Same as above except return -EAGAIN if the slot cannot be locked + */ +int pci_try_reset_slot(struct pci_slot *slot) +{ + int rc; + + rc = pci_slot_reset(slot, 1); + if (rc) + return rc; + + pci_slot_save_and_disable(slot); + + if (pci_slot_trylock(slot)) { + might_sleep(); + rc = pci_reset_hotplug_slot(slot->hotplug, 0); + pci_slot_unlock(slot); + } else + rc = -EAGAIN; + + pci_slot_restore(slot); + + return rc; +} +EXPORT_SYMBOL_GPL(pci_try_reset_slot); + static int pci_bus_reset(struct pci_bus *bus, int probe) { if (!bus->self) @@ -3822,6 +3847,35 @@ int pci_reset_bus(struct pci_bus *bus) EXPORT_SYMBOL_GPL(pci_reset_bus); /** + * pci_try_reset_bus - Try to reset a PCI bus + * @bus: top level PCI bus to reset + * + * Same as above except return -EAGAIN if the bus cannot be locked + */ +int pci_try_reset_bus(struct pci_bus *bus) +{ + int rc; + + rc = pci_bus_reset(bus, 1); + if (rc) + return rc; + + pci_bus_save_and_disable(bus); + + if (pci_bus_trylock(bus)) { + might_sleep(); + pci_reset_bridge_secondary_bus(bus->self); + pci_bus_unlock(bus); + } else + rc = -EAGAIN; + + pci_bus_restore(bus); + + return rc; +} +EXPORT_SYMBOL_GPL(pci_try_reset_bus); + +/** * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count * @dev: PCI device to query * @@ -4065,6 +4119,7 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags) bars |= (1 << i); return bars; } +EXPORT_SYMBOL(pci_select_bars); /** * pci_resource_bar - get position of the BAR associated with a resource @@ -4104,7 +4159,7 @@ void __init pci_register_set_vga_state(arch_set_vga_state_t func) } static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode, - unsigned int command_bits, u32 flags) + unsigned int command_bits, u32 flags) { if (arch_set_vga_state) return arch_set_vga_state(dev, decode, command_bits, @@ -4128,7 +4183,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, u16 cmd; int rc; - WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY))); + WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY))); /* ARCH specific VGA enables */ rc = pci_set_vga_state_arch(dev, decode, command_bits, flags); @@ -4216,11 +4271,10 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev) bus == dev->bus->number && slot == PCI_SLOT(dev->devfn) && func == PCI_FUNC(dev->devfn)) { - if (align_order == -1) { + if (align_order == -1) align = PAGE_SIZE; - } else { + else align = 1 << align_order; - } /* Found */ break; } @@ -4277,6 +4331,7 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev) "Rounding up size of resource #%d to %#llx.\n", i, (unsigned long long)size); } + r->flags |= IORESOURCE_UNSET; r->end = size - 1; r->start = 0; } @@ -4290,6 +4345,7 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev) r = &dev->resource[i]; if (!(r->flags & IORESOURCE_MEM)) continue; + r->flags |= IORESOURCE_UNSET; r->end = resource_size(r) - 1; r->start = 0; } @@ -4336,7 +4392,6 @@ static int __init pci_resource_alignment_sysfs_init(void) return bus_create_file(&pci_bus_type, &bus_attr_resource_alignment); } - late_initcall(pci_resource_alignment_sysfs_init); static void pci_no_domains(void) @@ -4415,42 +4470,3 @@ static int __init pci_setup(char *str) return 0; } early_param("pci", pci_setup); - -EXPORT_SYMBOL(pci_reenable_device); -EXPORT_SYMBOL(pci_enable_device_io); -EXPORT_SYMBOL(pci_enable_device_mem); -EXPORT_SYMBOL(pci_enable_device); -EXPORT_SYMBOL(pcim_enable_device); -EXPORT_SYMBOL(pcim_pin_device); -EXPORT_SYMBOL(pci_disable_device); -EXPORT_SYMBOL(pci_find_capability); -EXPORT_SYMBOL(pci_bus_find_capability); -EXPORT_SYMBOL(pci_release_regions); -EXPORT_SYMBOL(pci_request_regions); -EXPORT_SYMBOL(pci_request_regions_exclusive); -EXPORT_SYMBOL(pci_release_region); -EXPORT_SYMBOL(pci_request_region); -EXPORT_SYMBOL(pci_request_region_exclusive); -EXPORT_SYMBOL(pci_release_selected_regions); -EXPORT_SYMBOL(pci_request_selected_regions); -EXPORT_SYMBOL(pci_request_selected_regions_exclusive); -EXPORT_SYMBOL(pci_set_master); -EXPORT_SYMBOL(pci_clear_master); -EXPORT_SYMBOL(pci_set_mwi); -EXPORT_SYMBOL(pci_try_set_mwi); -EXPORT_SYMBOL(pci_clear_mwi); -EXPORT_SYMBOL_GPL(pci_intx); -EXPORT_SYMBOL(pci_assign_resource); -EXPORT_SYMBOL(pci_find_parent_resource); -EXPORT_SYMBOL(pci_select_bars); - -EXPORT_SYMBOL(pci_set_power_state); -EXPORT_SYMBOL(pci_save_state); -EXPORT_SYMBOL(pci_restore_state); -EXPORT_SYMBOL(pci_pme_capable); -EXPORT_SYMBOL(pci_pme_active); -EXPORT_SYMBOL(pci_wake_from_d3); -EXPORT_SYMBOL(pci_target_state); -EXPORT_SYMBOL(pci_prepare_to_sleep); -EXPORT_SYMBOL(pci_back_from_sleep); -EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); |
