aboutsummaryrefslogtreecommitdiff
path: root/drivers/usb/host
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host')
-rw-r--r--drivers/usb/host/Kconfig529
-rw-r--r--drivers/usb/host/Makefile43
-rw-r--r--drivers/usb/host/bcma-hcd.c333
-rw-r--r--drivers/usb/host/ehci-atmel.c193
-rw-r--r--drivers/usb/host/ehci-au1xxx.c325
-rw-r--r--drivers/usb/host/ehci-dbg.c307
-rw-r--r--drivers/usb/host/ehci-exynos.c390
-rw-r--r--drivers/usb/host/ehci-fsl.c405
-rw-r--r--drivers/usb/host/ehci-fsl.h25
-rw-r--r--drivers/usb/host/ehci-grlib.c193
-rw-r--r--drivers/usb/host/ehci-hcd.c946
-rw-r--r--drivers/usb/host/ehci-hub.c650
-rw-r--r--drivers/usb/host/ehci-ixp4xx.c156
-rw-r--r--drivers/usb/host/ehci-lpm.c83
-rw-r--r--drivers/usb/host/ehci-mem.c50
-rw-r--r--drivers/usb/host/ehci-msm.c232
-rw-r--r--drivers/usb/host/ehci-mv.c334
-rw-r--r--drivers/usb/host/ehci-mxc.c274
-rw-r--r--drivers/usb/host/ehci-octeon.c45
-rw-r--r--drivers/usb/host/ehci-omap.c874
-rw-r--r--drivers/usb/host/ehci-orion.c255
-rw-r--r--drivers/usb/host/ehci-pci.c395
-rw-r--r--drivers/usb/host/ehci-platform.c410
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c330
-rw-r--r--drivers/usb/host/ehci-ppc-of.c91
-rw-r--r--drivers/usb/host/ehci-ps3.c52
-rw-r--r--drivers/usb/host/ehci-q.c733
-rw-r--r--drivers/usb/host/ehci-sched.c1762
-rw-r--r--drivers/usb/host/ehci-sead3.c187
-rw-r--r--drivers/usb/host/ehci-sh.c206
-rw-r--r--drivers/usb/host/ehci-spear.c195
-rw-r--r--drivers/usb/host/ehci-sysfs.c187
-rw-r--r--drivers/usb/host/ehci-tegra.c569
-rw-r--r--drivers/usb/host/ehci-tilegx.c217
-rw-r--r--drivers/usb/host/ehci-timer.c433
-rw-r--r--drivers/usb/host/ehci-w90x900.c127
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c98
-rw-r--r--drivers/usb/host/ehci.h311
-rw-r--r--drivers/usb/host/fhci-dbg.c12
-rw-r--r--drivers/usb/host/fhci-hcd.c76
-rw-r--r--drivers/usb/host/fhci-hub.c16
-rw-r--r--drivers/usb/host/fhci-sched.c60
-rw-r--r--drivers/usb/host/fhci-tds.c28
-rw-r--r--drivers/usb/host/fhci.h28
-rw-r--r--drivers/usb/host/fotg210-hcd.c5981
-rw-r--r--drivers/usb/host/fotg210.h742
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c118
-rw-r--r--drivers/usb/host/fusbh200-hcd.c5894
-rw-r--r--drivers/usb/host/fusbh200.h731
-rw-r--r--drivers/usb/host/hwa-hc.c134
-rw-r--r--drivers/usb/host/imx21-dbg.c8
-rw-r--r--drivers/usb/host/imx21-hcd.c91
-rw-r--r--drivers/usb/host/imx21-hcd.h6
-rw-r--r--drivers/usb/host/isp116x-hcd.c42
-rw-r--r--drivers/usb/host/isp116x.h15
-rw-r--r--drivers/usb/host/isp1362-hcd.c153
-rw-r--r--drivers/usb/host/isp1362.h53
-rw-r--r--drivers/usb/host/isp1760-hcd.c2557
-rw-r--r--drivers/usb/host/isp1760-hcd.h135
-rw-r--r--drivers/usb/host/isp1760-if.c138
-rw-r--r--drivers/usb/host/max3421-hcd.c1957
-rw-r--r--drivers/usb/host/octeon2-common.c45
-rw-r--r--drivers/usb/host/ohci-at91.c586
-rw-r--r--drivers/usb/host/ohci-au1xxx.c319
-rw-r--r--drivers/usb/host/ohci-da8xx.c90
-rw-r--r--drivers/usb/host/ohci-dbg.c101
-rw-r--r--drivers/usb/host/ohci-ep93xx.c218
-rw-r--r--drivers/usb/host/ohci-exynos.c363
-rw-r--r--drivers/usb/host/ohci-hcd.c478
-rw-r--r--drivers/usb/host/ohci-hub.c113
-rw-r--r--drivers/usb/host/ohci-jz4740.c55
-rw-r--r--drivers/usb/host/ohci-lh7a404.c252
-rw-r--r--drivers/usb/host/ohci-nxp.c351
-rw-r--r--drivers/usb/host/ohci-octeon.c38
-rw-r--r--drivers/usb/host/ohci-omap.c230
-rw-r--r--drivers/usb/host/ohci-omap3.c730
-rw-r--r--drivers/usb/host/ohci-pci.c336
-rw-r--r--drivers/usb/host/ohci-platform.c395
-rw-r--r--drivers/usb/host/ohci-pnx4008.c453
-rw-r--r--drivers/usb/host/ohci-pnx8550.c242
-rw-r--r--drivers/usb/host/ohci-ppc-of.c62
-rw-r--r--drivers/usb/host/ohci-ppc-soc.c215
-rw-r--r--drivers/usb/host/ohci-ps3.c10
-rw-r--r--drivers/usb/host/ohci-pxa27x.c428
-rw-r--r--drivers/usb/host/ohci-q.c82
-rw-r--r--drivers/usb/host/ohci-s3c2410.c242
-rw-r--r--drivers/usb/host/ohci-sa1111.c309
-rw-r--r--drivers/usb/host/ohci-sh.c142
-rw-r--r--drivers/usb/host/ohci-sm501.c29
-rw-r--r--drivers/usb/host/ohci-spear.c209
-rw-r--r--drivers/usb/host/ohci-ssb.c260
-rw-r--r--drivers/usb/host/ohci-tilegx.c206
-rw-r--r--drivers/usb/host/ohci-tmio.c37
-rw-r--r--drivers/usb/host/ohci.h72
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c67
-rw-r--r--drivers/usb/host/pci-quirks.c791
-rw-r--r--drivers/usb/host/pci-quirks.h19
-rw-r--r--drivers/usb/host/r8a66597-hcd.c98
-rw-r--r--drivers/usb/host/r8a66597.h43
-rw-r--r--drivers/usb/host/sl811-hcd.c168
-rw-r--r--drivers/usb/host/sl811.h21
-rw-r--r--drivers/usb/host/sl811_cs.c18
-rw-r--r--drivers/usb/host/ssb-hcd.c278
-rw-r--r--drivers/usb/host/u132-hcd.c41
-rw-r--r--drivers/usb/host/uhci-debug.c267
-rw-r--r--drivers/usb/host/uhci-grlib.c207
-rw-r--r--drivers/usb/host/uhci-hcd.c628
-rw-r--r--drivers/usb/host/uhci-hcd.h252
-rw-r--r--drivers/usb/host/uhci-hub.c96
-rw-r--r--drivers/usb/host/uhci-pci.c306
-rw-r--r--drivers/usb/host/uhci-platform.c165
-rw-r--r--drivers/usb/host/uhci-q.c228
-rw-r--r--drivers/usb/host/whci/debug.c1
-rw-r--r--drivers/usb/host/whci/hcd.c18
-rw-r--r--drivers/usb/host/whci/init.c2
-rw-r--r--drivers/usb/host/whci/int.c1
-rw-r--r--drivers/usb/host/whci/qset.c19
-rw-r--r--drivers/usb/host/whci/wusb.c1
-rw-r--r--drivers/usb/host/xhci-dbg.c132
-rw-r--r--drivers/usb/host/xhci-ext-caps.h16
-rw-r--r--drivers/usb/host/xhci-hub.c1057
-rw-r--r--drivers/usb/host/xhci-mem.c1593
-rw-r--r--drivers/usb/host/xhci-mvebu.c72
-rw-r--r--drivers/usb/host/xhci-mvebu.h21
-rw-r--r--drivers/usb/host/xhci-pci.c302
-rw-r--r--drivers/usb/host/xhci-plat.c297
-rw-r--r--drivers/usb/host/xhci-ring.c2522
-rw-r--r--drivers/usb/host/xhci-trace.c15
-rw-r--r--drivers/usb/host/xhci-trace.h151
-rw-r--r--drivers/usb/host/xhci.c3391
-rw-r--r--drivers/usb/host/xhci.h704
131 files changed, 39540 insertions, 14086 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 6f4f8e6a40c..03314f861be 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -2,11 +2,9 @@
# USB Host Controller Drivers
#
comment "USB Host Controller Drivers"
- depends on USB
config USB_C67X00_HCD
tristate "Cypress C67x00 HCD support"
- depends on USB
help
The Cypress C67x00 (EZ-Host/EZ-OTG) chips are dual-role
host/peripheral/OTG USB controllers.
@@ -18,8 +16,7 @@ config USB_C67X00_HCD
module will be called c67x00.
config USB_XHCI_HCD
- tristate "xHCI HCD (USB 3.0) support (EXPERIMENTAL)"
- depends on USB && PCI && EXPERIMENTAL
+ tristate "xHCI HCD (USB 3.0) support"
---help---
The eXtensible Host Controller Interface (xHCI) is standard for USB 3.0
"SuperSpeed" host controller hardware.
@@ -27,19 +24,23 @@ config USB_XHCI_HCD
To compile this driver as a module, choose M here: the
module will be called xhci-hcd.
-config USB_XHCI_HCD_DEBUGGING
- bool "Debugging for the xHCI host controller"
- depends on USB_XHCI_HCD
+if USB_XHCI_HCD
+
+config USB_XHCI_PLATFORM
+ tristate
+
+config USB_XHCI_MVEBU
+ tristate "xHCI support for Marvell Armada 375/38x"
+ select USB_XHCI_PLATFORM
+ depends on ARCH_MVEBU || COMPILE_TEST
---help---
- Say 'Y' to turn on debugging for the xHCI host controller driver.
- This will spew debugging output, even in interrupt context.
- This should only be used for debugging xHCI driver bugs.
+ Say 'Y' to enable the support for the xHCI host controller
+ found in Marvell Armada 375/38x ARM SOCs.
- If unsure, say N.
+endif # USB_XHCI_HCD
config USB_EHCI_HCD
tristate "EHCI HCD (USB 2.0) support"
- depends on USB && USB_ARCH_HAS_EHCI
---help---
The Enhanced Host Controller Interface (EHCI) is standard for USB 2.0
"high speed" (480 Mbit/sec, 60 Mbyte/sec) host controller hardware.
@@ -91,22 +92,29 @@ config USB_EHCI_TT_NEWSCHED
If unsure, say Y.
-config USB_EHCI_BIG_ENDIAN_MMIO
- bool
- depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || \
- ARCH_IXP4XX || XPS_USB_HCD_XILINX || \
- PPC_MPC512x || CPU_CAVIUM_OCTEON)
- default y
+config USB_FSL_MPH_DR_OF
+ tristate
-config USB_EHCI_BIG_ENDIAN_DESC
- bool
- depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX || \
- PPC_MPC512x)
+if USB_EHCI_HCD
+
+config USB_EHCI_PCI
+ tristate
+ depends on PCI
default y
+config USB_EHCI_HCD_PMC_MSP
+ tristate "EHCI support for on-chip PMC MSP71xx USB controller"
+ depends on MSP_HAS_USB
+ default n
+ select USB_EHCI_BIG_ENDIAN_DESC
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ ---help---
+ Enables support for the onchip USB controller on the PMC_MSP7100 Family SoC's.
+ If unsure, say N.
+
config XPS_USB_HCD_XILINX
bool "Use Xilinx usb host EHCI controller core"
- depends on USB_EHCI_HCD && (PPC32 || MICROBLAZE)
+ depends on (PPC32 || MICROBLAZE)
select USB_EHCI_BIG_ENDIAN_DESC
select USB_EHCI_BIG_ENDIAN_MMIO
---help---
@@ -115,41 +123,167 @@ config XPS_USB_HCD_XILINX
support both high speed and full speed devices, or high speed
devices only.
-config USB_FSL_MPH_DR_OF
- tristate
-
config USB_EHCI_FSL
- bool "Support for Freescale on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && FSL_SOC
+ bool "Support for Freescale PPC on-chip EHCI USB controller"
+ depends on FSL_SOC
select USB_EHCI_ROOT_HUB_TT
select USB_FSL_MPH_DR_OF if OF
---help---
Variation of ARC USB block used in some Freescale chips.
config USB_EHCI_MXC
- bool "Support for Freescale on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && ARCH_MXC
+ tristate "Support for Freescale i.MX on-chip EHCI USB controller"
+ depends on ARCH_MXC
select USB_EHCI_ROOT_HUB_TT
---help---
Variation of ARC USB block used in some Freescale chips.
+config USB_EHCI_HCD_OMAP
+ tristate "EHCI support for OMAP3 and later chips"
+ depends on ARCH_OMAP
+ select NOP_USB_XCEIV
+ default y
+ ---help---
+ Enables support for the on-chip EHCI controller on
+ OMAP3 and later chips.
+
+config USB_EHCI_HCD_ORION
+ tristate "Support for Marvell EBU on-chip EHCI USB controller"
+ depends on USB_EHCI_HCD && PLAT_ORION
+ default y
+ ---help---
+ Enables support for the on-chip EHCI controller on Marvell's
+ embedded ARM SoCs, including Orion, Kirkwood, Dove, Armada XP,
+ Armada 370. This is different from the EHCI implementation
+ on Marvell's mobile PXA and MMP SoC, see "EHCI support for
+ Marvell PXA/MMP USB controller" for those.
+
+config USB_EHCI_HCD_SPEAR
+ tristate "Support for ST SPEAr on-chip EHCI USB controller"
+ depends on USB_EHCI_HCD && PLAT_SPEAR
+ default y
+ ---help---
+ Enables support for the on-chip EHCI controller on
+ ST SPEAr chips.
+
+config USB_EHCI_HCD_AT91
+ tristate "Support for Atmel on-chip EHCI USB controller"
+ depends on USB_EHCI_HCD && ARCH_AT91
+ default y
+ ---help---
+ Enables support for the on-chip EHCI controller on
+ Atmel chips.
+
+config USB_EHCI_MSM
+ tristate "Support for Qualcomm QSD/MSM on-chip EHCI USB controller"
+ depends on ARCH_MSM || ARCH_QCOM
+ select USB_EHCI_ROOT_HUB_TT
+ ---help---
+ Enables support for the USB Host controller present on the
+ Qualcomm chipsets. Root Hub has inbuilt TT.
+ This driver depends on OTG driver for PHY initialization,
+ clock management, powering up VBUS, and power management.
+ This driver is not supported on boards like trout which
+ has an external PHY.
+
+config USB_EHCI_TEGRA
+ tristate "NVIDIA Tegra HCD support"
+ depends on ARCH_TEGRA
+ select USB_EHCI_ROOT_HUB_TT
+ select USB_PHY
+ help
+ This driver enables support for the internal USB Host Controllers
+ found in NVIDIA Tegra SoCs. The controllers are EHCI compliant.
+
config USB_EHCI_HCD_PPC_OF
bool "EHCI support for PPC USB controller on OF platform bus"
- depends on USB_EHCI_HCD && PPC_OF
+ depends on PPC_OF
default y
---help---
Enables support for the USB controller present on the PowerPC
OpenFirmware platform bus.
+config USB_EHCI_SH
+ bool "EHCI support for SuperH USB controller"
+ depends on SUPERH
+ ---help---
+ Enables support for the on-chip EHCI controller on the SuperH.
+ If you use the PCI EHCI controller, this option is not necessary.
+
+config USB_EHCI_EXYNOS
+ tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
+ depends on PLAT_S5P || ARCH_EXYNOS
+ help
+ Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
+
+config USB_EHCI_MV
+ bool "EHCI support for Marvell PXA/MMP USB controller"
+ depends on (ARCH_PXA || ARCH_MMP)
+ select USB_EHCI_ROOT_HUB_TT
+ ---help---
+ Enables support for Marvell (including PXA and MMP series) on-chip
+ USB SPH and OTG controller. SPH is a single port host, and it can
+ only be EHCI host. OTG is controller that can switch to host mode.
+ Note that this driver will not work on Marvell's other EHCI
+ controller used by the EBU-type SoCs including Orion, Kirkwood,
+ Dova, Armada 370 and Armada XP. See "Support for Marvell EBU
+ on-chip EHCI USB controller" for those.
+
config USB_W90X900_EHCI
- bool "W90X900(W90P910) EHCI support"
- depends on USB_EHCI_HCD && ARCH_W90X900
+ tristate "W90X900(W90P910) EHCI support"
+ depends on ARCH_W90X900
---help---
Enables support for the W90X900 USB controller
+config USB_CNS3XXX_EHCI
+ bool "Cavium CNS3XXX EHCI Module (DEPRECATED)"
+ depends on ARCH_CNS3XXX
+ select USB_EHCI_HCD_PLATFORM
+ ---help---
+ This option is deprecated now and the driver was removed, use
+ USB_EHCI_HCD_PLATFORM instead.
+
+ Enable support for the CNS3XXX SOC's on-chip EHCI controller.
+ It is needed for high-speed (480Mbit/sec) USB 2.0 device
+ support.
+
+config USB_EHCI_ATH79
+ bool "EHCI support for AR7XXX/AR9XXX SoCs (DEPRECATED)"
+ depends on (SOC_AR71XX || SOC_AR724X || SOC_AR913X || SOC_AR933X)
+ select USB_EHCI_ROOT_HUB_TT
+ select USB_EHCI_HCD_PLATFORM
+ default y
+ ---help---
+ This option is deprecated now and the driver was removed, use
+ USB_EHCI_HCD_PLATFORM instead.
+
+ Enables support for the built-in EHCI controller present
+ on the Atheros AR7XXX/AR9XXX SoCs.
+
+config USB_EHCI_HCD_PLATFORM
+ tristate "Generic EHCI driver for a platform device"
+ default n
+ ---help---
+ Adds an EHCI host driver for a generic platform device, which
+ provides a memory space and an irq.
+
+ If unsure, say N.
+
+config USB_OCTEON_EHCI
+ bool "Octeon on-chip EHCI support"
+ depends on CAVIUM_OCTEON_SOC
+ default n
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ help
+ Enable support for the Octeon II SOC's on-chip EHCI
+ controller. It is needed for high-speed (480Mbit/sec)
+ USB 2.0 device support. All CN6XXX based chips with USB are
+ supported.
+
+endif # USB_EHCI_HCD
+
config USB_OXU210HP_HCD
tristate "OXU210HP HCD support"
- depends on USB
---help---
The OXU210HP is an USB host/OTG/device controller. Enable this
option if your board has this chip. If unsure, say N.
@@ -162,7 +296,6 @@ config USB_OXU210HP_HCD
config USB_ISP116X_HCD
tristate "ISP116X HCD support"
- depends on USB
---help---
The ISP1160 and ISP1161 chips are USB host controllers. Enable this
option if your board has this chip. If unsure, say N.
@@ -174,7 +307,6 @@ config USB_ISP116X_HCD
config USB_ISP1760_HCD
tristate "ISP 1760 HCD support"
- depends on USB && EXPERIMENTAL
---help---
The ISP1760 chip is a USB 2.0 host controller.
@@ -189,8 +321,6 @@ config USB_ISP1760_HCD
config USB_ISP1362_HCD
tristate "ISP1362 HCD support"
- depends on USB
- default N
---help---
Supports the Philips ISP1362 chip as a host controller
@@ -199,11 +329,40 @@ config USB_ISP1362_HCD
To compile this driver as a module, choose M here: the
module will be called isp1362-hcd.
+config USB_FUSBH200_HCD
+ tristate "FUSBH200 HCD support"
+ depends on USB
+ ---help---
+ Faraday FUSBH200 is designed to meet USB2.0 EHCI specification
+ with minor modification.
+
+ To compile this driver as a module, choose M here: the
+ module will be called fusbh200-hcd.
+
+config USB_FOTG210_HCD
+ tristate "FOTG210 HCD support"
+ depends on USB
+ ---help---
+ Faraday FOTG210 is an OTG controller which can be configured as
+ an USB2.0 host. It is designed to meet USB2.0 EHCI specification
+ with minor modification.
+
+ To compile this driver as a module, choose M here: the
+ module will be called fotg210-hcd.
+
+config USB_MAX3421_HCD
+ tristate "MAX3421 HCD (USB-over-SPI) support"
+ depends on USB && SPI
+ ---help---
+ The Maxim MAX3421E chip supports standard USB 2.0-compliant
+ full-speed devices either in host or peripheral mode. This
+ driver supports the host-mode of the MAX3421E only.
+
+ To compile this driver as a module, choose M here: the module will
+ be called max3421-hcd.
+
config USB_OHCI_HCD
- tristate "OHCI HCD support"
- depends on USB && USB_ARCH_HAS_OHCI
- select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3
- select USB_OTG_UTILS if ARCH_OMAP
+ tristate "OHCI HCD (USB 1.1) support"
---help---
The Open Host Controller Interface (OHCI) is a standard for accessing
USB 1.1 host controller hardware. It does more in hardware than Intel's
@@ -217,34 +376,90 @@ config USB_OHCI_HCD
To compile this driver as a module, choose M here: the
module will be called ohci-hcd.
+if USB_OHCI_HCD
+
config USB_OHCI_HCD_OMAP1
- bool "OHCI support for OMAP1/2 chips"
- depends on USB_OHCI_HCD && (ARCH_OMAP1 || ARCH_OMAP2)
+ tristate "OHCI support for OMAP1/2 chips"
+ depends on ARCH_OMAP1
+ depends on ISP1301_OMAP || !(MACH_OMAP_H2 || MACH_OMAP_H3)
default y
---help---
Enables support for the OHCI controller on OMAP1/2 chips.
+config USB_OHCI_HCD_SPEAR
+ tristate "Support for ST SPEAr on-chip OHCI USB controller"
+ depends on USB_OHCI_HCD && PLAT_SPEAR
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ ST SPEAr chips.
+
+config USB_OHCI_HCD_S3C2410
+ tristate "OHCI support for Samsung S3C24xx/S3C64xx SoC series"
+ depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX)
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ S3C24xx/S3C64xx chips.
+
+config USB_OHCI_HCD_LPC32XX
+ tristate "Support for LPC on-chip OHCI USB controller"
+ depends on USB_OHCI_HCD && ARCH_LPC32XX
+ depends on USB_ISP1301
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ NXP chips.
+
+config USB_OHCI_HCD_PXA27X
+ tristate "Support for PXA27X/PXA3XX on-chip OHCI USB controller"
+ depends on USB_OHCI_HCD && (PXA27x || PXA3xx)
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ PXA27x/PXA3xx chips.
+
+config USB_OHCI_HCD_AT91
+ tristate "Support for Atmel on-chip OHCI USB controller"
+ depends on USB_OHCI_HCD && ARCH_AT91
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ Atmel chips.
+
config USB_OHCI_HCD_OMAP3
- bool "OHCI support for OMAP3 and later chips"
- depends on USB_OHCI_HCD && (ARCH_OMAP3 || ARCH_OMAP4)
+ tristate "OHCI support for OMAP3 and later chips"
+ depends on (ARCH_OMAP3 || ARCH_OMAP4)
default y
---help---
Enables support for the on-chip OHCI controller on
OMAP3 and later chips.
-config USB_OHCI_HCD_PPC_SOC
- bool "OHCI support for on-chip PPC USB controller"
- depends on USB_OHCI_HCD && (STB03xxx || PPC_MPC52xx)
+config USB_OHCI_HCD_DAVINCI
+ bool "OHCI support for TI DaVinci DA8xx"
+ depends on ARCH_DAVINCI_DA8XX
+ depends on USB_OHCI_HCD=y
default y
- select USB_OHCI_BIG_ENDIAN_DESC
- select USB_OHCI_BIG_ENDIAN_MMIO
- ---help---
- Enables support for the USB controller on the MPC52xx or
- STB03xxx processor chip. If unsure, say Y.
+ help
+ Enables support for the DaVinci DA8xx integrated OHCI
+ controller. This driver cannot currently be a loadable
+ module because it lacks a proper PHY abstraction.
+
+config USB_OHCI_ATH79
+ bool "USB OHCI support for the Atheros AR71XX/AR7240 SoCs (DEPRECATED)"
+ depends on (SOC_AR71XX || SOC_AR724X)
+ select USB_OHCI_HCD_PLATFORM
+ default y
+ help
+ This option is deprecated now and the driver was removed, use
+ USB_OHCI_HCD_PLATFORM instead.
+
+ Enables support for the built-in OHCI controller present on the
+ Atheros AR71XX/AR7240 SoCs.
config USB_OHCI_HCD_PPC_OF_BE
bool "OHCI support for OF platform bus (big endian)"
- depends on USB_OHCI_HCD && PPC_OF
+ depends on PPC_OF
select USB_OHCI_BIG_ENDIAN_DESC
select USB_OHCI_BIG_ENDIAN_MMIO
---help---
@@ -253,7 +468,7 @@ config USB_OHCI_HCD_PPC_OF_BE
config USB_OHCI_HCD_PPC_OF_LE
bool "OHCI support for OF platform bus (little endian)"
- depends on USB_OHCI_HCD && PPC_OF
+ depends on PPC_OF
select USB_OHCI_LITTLE_ENDIAN
---help---
Enables support for little-endian USB controllers present on the
@@ -261,12 +476,12 @@ config USB_OHCI_HCD_PPC_OF_LE
config USB_OHCI_HCD_PPC_OF
bool
- depends on USB_OHCI_HCD && PPC_OF
+ depends on PPC_OF
default USB_OHCI_HCD_PPC_OF_BE || USB_OHCI_HCD_PPC_OF_LE
config USB_OHCI_HCD_PCI
- bool "OHCI support for PCI-bus USB controllers"
- depends on USB_OHCI_HCD && PCI && (STB03xxx || PPC_MPC52xx || USB_OHCI_HCD_PPC_OF)
+ tristate "OHCI support for PCI-bus USB controllers"
+ depends on PCI
default y
select USB_OHCI_LITTLE_ENDIAN
---help---
@@ -274,10 +489,15 @@ config USB_OHCI_HCD_PCI
If unsure, say Y.
config USB_OHCI_HCD_SSB
- bool "OHCI support for Broadcom SSB OHCI core"
- depends on USB_OHCI_HCD && (SSB = y || SSB = USB_OHCI_HCD) && EXPERIMENTAL
+ bool "OHCI support for Broadcom SSB OHCI core (DEPRECATED)"
+ depends on (SSB = y || SSB = USB_OHCI_HCD)
+ select USB_HCD_SSB
+ select USB_OHCI_HCD_PLATFORM
default n
---help---
+ This option is deprecated now and the driver was removed, use
+ USB_HCD_SSB and USB_OHCI_HCD_PLATFORM instead.
+
Support for the Sonics Silicon Backplane (SSB) attached
Broadcom USB OHCI core.
@@ -286,25 +506,59 @@ config USB_OHCI_HCD_SSB
If unsure, say N.
-config USB_OHCI_BIG_ENDIAN_DESC
- bool
- depends on USB_OHCI_HCD
- default n
+config USB_OHCI_SH
+ bool "OHCI support for SuperH USB controller (DEPRECATED)"
+ depends on SUPERH
+ select USB_OHCI_HCD_PLATFORM
+ ---help---
+ This option is deprecated now and the driver was removed, use
+ USB_OHCI_HCD_PLATFORM instead.
-config USB_OHCI_BIG_ENDIAN_MMIO
- bool
- depends on USB_OHCI_HCD
+ Enables support for the on-chip OHCI controller on the SuperH.
+ If you use the PCI OHCI controller, this option is not necessary.
+
+config USB_OHCI_EXYNOS
+ tristate "OHCI support for Samsung S5P/EXYNOS SoC Series"
+ depends on PLAT_S5P || ARCH_EXYNOS
+ help
+ Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
+
+config USB_CNS3XXX_OHCI
+ bool "Cavium CNS3XXX OHCI Module (DEPRECATED)"
+ depends on ARCH_CNS3XXX
+ select USB_OHCI_HCD_PLATFORM
+ ---help---
+ This option is deprecated now and the driver was removed, use
+ USB_OHCI_HCD_PLATFORM instead.
+
+ Enable support for the CNS3XXX SOC's on-chip OHCI controller.
+ It is needed for low-speed USB 1.0 device support.
+
+config USB_OHCI_HCD_PLATFORM
+ tristate "Generic OHCI driver for a platform device"
default n
+ ---help---
+ Adds an OHCI host driver for a generic platform device, which
+ provides a memory space and an irq.
-config USB_OHCI_LITTLE_ENDIAN
- bool
- depends on USB_OHCI_HCD
- default n if STB03xxx || PPC_MPC52xx
- default y
+ If unsure, say N.
+
+config USB_OCTEON_OHCI
+ bool "Octeon on-chip OHCI support"
+ depends on CAVIUM_OCTEON_SOC
+ default USB_OCTEON_EHCI
+ select USB_OHCI_BIG_ENDIAN_MMIO
+ select USB_OHCI_LITTLE_ENDIAN
+ help
+ Enable support for the Octeon II SOC's on-chip OHCI
+ controller. It is needed for low-speed USB 1.0 device
+ support. All CN6XXX based chips with USB are supported.
+
+endif # USB_OHCI_HCD
config USB_UHCI_HCD
tristate "UHCI HCD (most Intel and VIA) support"
- depends on USB && PCI
+ depends on PCI || USB_UHCI_SUPPORT_NON_PCI_HC
---help---
The Universal Host Controller Interface is a standard by Intel for
accessing the USB hardware in the PC (which is also called the USB
@@ -313,14 +567,31 @@ config USB_UHCI_HCD
with Intel PCI chipsets (like intel 430TX, 440FX, 440LX, 440BX,
i810, i820) conform to this standard. Also all VIA PCI chipsets
(like VIA VP2, VP3, MVP3, Apollo Pro, Apollo Pro II or Apollo Pro
- 133). If unsure, say Y.
+ 133) and LEON/GRLIB SoCs with the GRUSBHC controller.
+ If unsure, say Y.
To compile this driver as a module, choose M here: the
module will be called uhci-hcd.
+config USB_UHCI_SUPPORT_NON_PCI_HC
+ bool
+ default y if (SPARC_LEON || USB_UHCI_PLATFORM)
+
+config USB_UHCI_PLATFORM
+ bool
+ default y if ARCH_VT8500
+
+config USB_UHCI_BIG_ENDIAN_MMIO
+ bool
+ default y if SPARC_LEON
+
+config USB_UHCI_BIG_ENDIAN_DESC
+ bool
+ default y if SPARC_LEON
+
config USB_FHCI_HCD
tristate "Freescale QE USB Host Controller support"
- depends on USB && OF_GPIO && QE_GPIO && QUICC_ENGINE
+ depends on OF_GPIO && QE_GPIO && QUICC_ENGINE
select FSL_GTM
select QE_USB
help
@@ -333,12 +604,11 @@ config FHCI_DEBUG
depends on USB_FHCI_HCD && DEBUG_FS
help
Say "y" to see some FHCI debug information and statistics
- throught debugfs.
+ through debugfs.
config USB_U132_HCD
tristate "Elan U132 Adapter Host Controller"
- depends on USB && USB_FTDI_ELAN
- default M
+ depends on USB_FTDI_ELAN
help
The U132 adapter is a USB to CardBus adapter specifically designed
for PC cards that contain an OHCI host controller. Typical PC cards
@@ -365,7 +635,6 @@ config USB_U132_HCD
config USB_SL811_HCD
tristate "SL811HS HCD support"
- depends on USB
help
The SL811HS is a single-port USB controller that supports either
host side or peripheral side roles. Enable this option if your
@@ -375,6 +644,16 @@ config USB_SL811_HCD
To compile this driver as a module, choose M here: the
module will be called sl811-hcd.
+config USB_SL811_HCD_ISO
+ bool "partial ISO support"
+ depends on USB_SL811_HCD
+ help
+ The driver doesn't support iso_frame_desc (yet), but for some simple
+ devices that just queue one ISO frame per URB, then ISO transfers
+ "should" work using the normal urb status fields.
+
+ If unsure, say N.
+
config USB_SL811_CS
tristate "CF/PCMCIA support for SL811HS HCD"
depends on USB_SL811_HCD && PCMCIA
@@ -387,7 +666,6 @@ config USB_SL811_CS
config USB_R8A66597_HCD
tristate "R8A66597 HCD support"
- depends on USB
help
The R8A66597 is a USB 2.0 host and peripheral controller.
@@ -397,10 +675,21 @@ config USB_R8A66597_HCD
To compile this driver as a module, choose M here: the
module will be called r8a66597-hcd.
+config USB_RENESAS_USBHS_HCD
+ tristate "Renesas USBHS HCD support"
+ depends on USB_RENESAS_USBHS
+ help
+ The Renesas USBHS is a USB 2.0 host and peripheral controller.
+
+ Enable this option if your board has this chip, and you want
+ to use it as a host controller. If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called renesas-usbhs.
+
config USB_WHCI_HCD
- tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)"
- depends on EXPERIMENTAL
- depends on PCI && USB
+ tristate "Wireless USB Host Controller Interface (WHCI) driver"
+ depends on PCI && USB && UWB
select USB_WUSB
select UWB_WHCI
help
@@ -411,9 +700,8 @@ config USB_WHCI_HCD
will be called "whci-hcd".
config USB_HWA_HCD
- tristate "Host Wire Adapter (HWA) driver (EXPERIMENTAL)"
- depends on EXPERIMENTAL
- depends on USB
+ tristate "Host Wire Adapter (HWA) driver"
+ depends on UWB
select USB_WUSB
select UWB_HWA
help
@@ -426,37 +714,58 @@ config USB_HWA_HCD
will be called "hwa-hc".
config USB_IMX21_HCD
- tristate "iMX21 HCD support"
- depends on USB && ARM && MACH_MX21
+ tristate "i.MX21 HCD support"
+ depends on ARM && ARCH_MXC
help
This driver enables support for the on-chip USB host in the
- iMX21 processor.
+ i.MX21 processor.
To compile this driver as a module, choose M here: the
module will be called "imx21-hcd".
-config USB_OCTEON_EHCI
- bool "Octeon on-chip EHCI support"
- depends on USB && USB_EHCI_HCD && CPU_CAVIUM_OCTEON
- default n
- select USB_EHCI_BIG_ENDIAN_MMIO
- help
- Enable support for the Octeon II SOC's on-chip EHCI
- controller. It is needed for high-speed (480Mbit/sec)
- USB 2.0 device support. All CN6XXX based chips with USB are
- supported.
-config USB_OCTEON_OHCI
- bool "Octeon on-chip OHCI support"
- depends on USB && USB_OHCI_HCD && CPU_CAVIUM_OCTEON
- default USB_OCTEON_EHCI
- select USB_OHCI_BIG_ENDIAN_MMIO
- select USB_OHCI_LITTLE_ENDIAN
- help
- Enable support for the Octeon II SOC's on-chip OHCI
- controller. It is needed for low-speed USB 1.0 device
- support. All CN6XXX based chips with USB are supported.
config USB_OCTEON2_COMMON
bool
default y if USB_OCTEON_EHCI || USB_OCTEON_OHCI
+
+config USB_HCD_BCMA
+ tristate "BCMA usb host driver"
+ depends on BCMA
+ select USB_OHCI_HCD_PLATFORM if USB_OHCI_HCD
+ select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD
+ help
+ Enable support for the EHCI and OCHI host controller on an bcma bus.
+ It converts the bcma driver into two platform device drivers
+ for ehci and ohci.
+
+ If unsure, say N.
+
+config USB_HCD_SSB
+ tristate "SSB usb host driver"
+ depends on SSB
+ select USB_OHCI_HCD_PLATFORM if USB_OHCI_HCD
+ select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD
+ help
+ Enable support for the EHCI and OCHI host controller on an bcma bus.
+ It converts the bcma driver into two platform device drivers
+ for ehci and ohci.
+
+ If unsure, say N.
+
+config USB_HCD_TEST_MODE
+ bool "HCD test mode support"
+ ---help---
+ Say 'Y' to enable additional software test modes that may be
+ supported by the host controller drivers.
+
+ One such test mode is the Embedded High-speed Host Electrical Test
+ (EHSET) for EHCI host controller hardware, specifically the "Single
+ Step Set Feature" test. Typically this will be enabled for On-the-Go
+ or embedded hosts that need to undergo USB-IF compliance testing with
+ the aid of special testing hardware. In the future, this may expand
+ to include other tests that require support from a HCD driver.
+
+ This option is of interest only to developers who need to validate
+ their USB hardware designs. It is not needed for normal use. If
+ unsure, say N.
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 624a362f2fe..af89a903d97 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -2,7 +2,8 @@
# Makefile for USB Host Controller Drivers
#
-ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
+# tell define_trace.h where to find the xhci trace header
+CFLAGS_xhci-trace.o := -I$(src)
isp1760-y := isp1760-hcd.o isp1760-if.o
@@ -11,18 +12,51 @@ fhci-y += fhci-mem.o fhci-tds.o fhci-sched.o
fhci-$(CONFIG_FHCI_DEBUG) += fhci-dbg.o
-xhci-hcd-y := xhci.o xhci-mem.o xhci-pci.o
+xhci-hcd-y := xhci.o xhci-mem.o
xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o
+xhci-hcd-y += xhci-trace.o
+xhci-hcd-$(CONFIG_PCI) += xhci-pci.o
+
+ifneq ($(CONFIG_USB_XHCI_PLATFORM), )
+ xhci-hcd-y += xhci-plat.o
+ifneq ($(CONFIG_USB_XHCI_MVEBU), )
+ xhci-hcd-y += xhci-mvebu.o
+endif
+endif
obj-$(CONFIG_USB_WHCI_HCD) += whci/
obj-$(CONFIG_PCI) += pci-quirks.o
obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
+obj-$(CONFIG_USB_EHCI_PCI) += ehci-pci.o
+obj-$(CONFIG_USB_EHCI_HCD_PLATFORM) += ehci-platform.o
+obj-$(CONFIG_USB_EHCI_MXC) += ehci-mxc.o
+obj-$(CONFIG_USB_EHCI_HCD_OMAP) += ehci-omap.o
+obj-$(CONFIG_USB_EHCI_HCD_ORION) += ehci-orion.o
+obj-$(CONFIG_USB_EHCI_HCD_SPEAR) += ehci-spear.o
+obj-$(CONFIG_USB_EHCI_EXYNOS) += ehci-exynos.o
+obj-$(CONFIG_USB_EHCI_HCD_AT91) += ehci-atmel.o
+obj-$(CONFIG_USB_EHCI_MSM) += ehci-msm.o
+obj-$(CONFIG_USB_EHCI_TEGRA) += ehci-tegra.o
+obj-$(CONFIG_USB_W90X900_EHCI) += ehci-w90x900.o
+
obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
obj-$(CONFIG_USB_ISP1362_HCD) += isp1362-hcd.o
+
obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
+obj-$(CONFIG_USB_OHCI_HCD_PCI) += ohci-pci.o
+obj-$(CONFIG_USB_OHCI_HCD_PLATFORM) += ohci-platform.o
+obj-$(CONFIG_USB_OHCI_EXYNOS) += ohci-exynos.o
+obj-$(CONFIG_USB_OHCI_HCD_OMAP1) += ohci-omap.o
+obj-$(CONFIG_USB_OHCI_HCD_OMAP3) += ohci-omap3.o
+obj-$(CONFIG_USB_OHCI_HCD_SPEAR) += ohci-spear.o
+obj-$(CONFIG_USB_OHCI_HCD_AT91) += ohci-at91.o
+obj-$(CONFIG_USB_OHCI_HCD_S3C2410) += ohci-s3c2410.o
+obj-$(CONFIG_USB_OHCI_HCD_LPC32XX) += ohci-nxp.o
+obj-$(CONFIG_USB_OHCI_HCD_PXA27X) += ohci-pxa27x.o
+
obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
obj-$(CONFIG_USB_XHCI_HCD) += xhci-hcd.o
@@ -35,3 +69,8 @@ obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
obj-$(CONFIG_USB_FSL_MPH_DR_OF) += fsl-mph-dr-of.o
obj-$(CONFIG_USB_OCTEON2_COMMON) += octeon2-common.o
+obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o
+obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o
+obj-$(CONFIG_USB_FUSBH200_HCD) += fusbh200-hcd.o
+obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o
+obj-$(CONFIG_USB_MAX3421_HCD) += max3421-hcd.o
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
new file mode 100644
index 00000000000..205f4a33658
--- /dev/null
+++ b/drivers/usb/host/bcma-hcd.c
@@ -0,0 +1,333 @@
+/*
+ * Broadcom specific Advanced Microcontroller Bus
+ * Broadcom USB-core driver (BCMA bus glue)
+ *
+ * Copyright 2011-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Based on ssb-ohci driver
+ * Copyright 2007 Michael Buesch <m@bues.ch>
+ *
+ * Derived from the OHCI-PCI driver
+ * Copyright 1999 Roman Weissgaerber
+ * Copyright 2000-2002 David Brownell
+ * Copyright 1999 Linus Torvalds
+ * Copyright 1999 Gregory P. Smith
+ *
+ * Derived from the USBcore related parts of Broadcom-SB
+ * Copyright 2005-2011 Broadcom Corporation
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+#include <linux/bcma/bcma.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb/ehci_pdriver.h>
+#include <linux/usb/ohci_pdriver.h>
+
+MODULE_AUTHOR("Hauke Mehrtens");
+MODULE_DESCRIPTION("Common USB driver for BCMA Bus");
+MODULE_LICENSE("GPL");
+
+struct bcma_hcd_device {
+ struct platform_device *ehci_dev;
+ struct platform_device *ohci_dev;
+};
+
+/* Wait for bitmask in a register to get set or cleared.
+ * timeout is in units of ten-microseconds.
+ */
+static int bcma_wait_bits(struct bcma_device *dev, u16 reg, u32 bitmask,
+ int timeout)
+{
+ int i;
+ u32 val;
+
+ for (i = 0; i < timeout; i++) {
+ val = bcma_read32(dev, reg);
+ if ((val & bitmask) == bitmask)
+ return 0;
+ udelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void bcma_hcd_4716wa(struct bcma_device *dev)
+{
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+ /* Work around for 4716 failures. */
+ if (dev->bus->chipinfo.id == 0x4716) {
+ u32 tmp;
+
+ tmp = bcma_cpu_clock(&dev->bus->drv_mips);
+ if (tmp >= 480000000)
+ tmp = 0x1846b; /* set CDR to 0x11(fast) */
+ else if (tmp == 453000000)
+ tmp = 0x1046b; /* set CDR to 0x10(slow) */
+ else
+ tmp = 0;
+
+ /* Change Shim mdio control reg to fix host not acking at
+ * high frequencies
+ */
+ if (tmp) {
+ bcma_write32(dev, 0x524, 0x1); /* write sel to enable */
+ udelay(500);
+
+ bcma_write32(dev, 0x524, tmp);
+ udelay(500);
+ bcma_write32(dev, 0x524, 0x4ab);
+ udelay(500);
+ bcma_read32(dev, 0x528);
+ bcma_write32(dev, 0x528, 0x80000000);
+ }
+ }
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
+}
+
+/* based on arch/mips/brcm-boards/bcm947xx/pcibios.c */
+static void bcma_hcd_init_chip(struct bcma_device *dev)
+{
+ u32 tmp;
+
+ /*
+ * USB 2.0 special considerations:
+ *
+ * 1. Since the core supports both OHCI and EHCI functions, it must
+ * only be reset once.
+ *
+ * 2. In addition to the standard SI reset sequence, the Host Control
+ * Register must be programmed to bring the USB core and various
+ * phy components out of reset.
+ */
+ if (!bcma_core_is_enabled(dev)) {
+ bcma_core_enable(dev, 0);
+ mdelay(10);
+ if (dev->id.rev >= 5) {
+ /* Enable Misc PLL */
+ tmp = bcma_read32(dev, 0x1e0);
+ tmp |= 0x100;
+ bcma_write32(dev, 0x1e0, tmp);
+ if (bcma_wait_bits(dev, 0x1e0, 1 << 24, 100))
+ printk(KERN_EMERG "Failed to enable misc PPL!\n");
+
+ /* Take out of resets */
+ bcma_write32(dev, 0x200, 0x4ff);
+ udelay(25);
+ bcma_write32(dev, 0x200, 0x6ff);
+ udelay(25);
+
+ /* Make sure digital and AFE are locked in USB PHY */
+ bcma_write32(dev, 0x524, 0x6b);
+ udelay(50);
+ tmp = bcma_read32(dev, 0x524);
+ udelay(50);
+ bcma_write32(dev, 0x524, 0xab);
+ udelay(50);
+ tmp = bcma_read32(dev, 0x524);
+ udelay(50);
+ bcma_write32(dev, 0x524, 0x2b);
+ udelay(50);
+ tmp = bcma_read32(dev, 0x524);
+ udelay(50);
+ bcma_write32(dev, 0x524, 0x10ab);
+ udelay(50);
+ tmp = bcma_read32(dev, 0x524);
+
+ if (bcma_wait_bits(dev, 0x528, 0xc000, 10000)) {
+ tmp = bcma_read32(dev, 0x528);
+ printk(KERN_EMERG
+ "USB20H mdio_rddata 0x%08x\n", tmp);
+ }
+ bcma_write32(dev, 0x528, 0x80000000);
+ tmp = bcma_read32(dev, 0x314);
+ udelay(265);
+ bcma_write32(dev, 0x200, 0x7ff);
+ udelay(10);
+
+ /* Take USB and HSIC out of non-driving modes */
+ bcma_write32(dev, 0x510, 0);
+ } else {
+ bcma_write32(dev, 0x200, 0x7ff);
+
+ udelay(1);
+ }
+
+ bcma_hcd_4716wa(dev);
+ }
+}
+
+static const struct usb_ehci_pdata ehci_pdata = {
+};
+
+static const struct usb_ohci_pdata ohci_pdata = {
+};
+
+static struct platform_device *bcma_hcd_create_pdev(struct bcma_device *dev, bool ohci, u32 addr)
+{
+ struct platform_device *hci_dev;
+ struct resource hci_res[2];
+ int ret = -ENOMEM;
+
+ memset(hci_res, 0, sizeof(hci_res));
+
+ hci_res[0].start = addr;
+ hci_res[0].end = hci_res[0].start + 0x1000 - 1;
+ hci_res[0].flags = IORESOURCE_MEM;
+
+ hci_res[1].start = dev->irq;
+ hci_res[1].flags = IORESOURCE_IRQ;
+
+ hci_dev = platform_device_alloc(ohci ? "ohci-platform" :
+ "ehci-platform" , 0);
+ if (!hci_dev)
+ return NULL;
+
+ hci_dev->dev.parent = &dev->dev;
+ hci_dev->dev.dma_mask = &hci_dev->dev.coherent_dma_mask;
+
+ ret = platform_device_add_resources(hci_dev, hci_res,
+ ARRAY_SIZE(hci_res));
+ if (ret)
+ goto err_alloc;
+ if (ohci)
+ ret = platform_device_add_data(hci_dev, &ohci_pdata,
+ sizeof(ohci_pdata));
+ else
+ ret = platform_device_add_data(hci_dev, &ehci_pdata,
+ sizeof(ehci_pdata));
+ if (ret)
+ goto err_alloc;
+ ret = platform_device_add(hci_dev);
+ if (ret)
+ goto err_alloc;
+
+ return hci_dev;
+
+err_alloc:
+ platform_device_put(hci_dev);
+ return ERR_PTR(ret);
+}
+
+static int bcma_hcd_probe(struct bcma_device *dev)
+{
+ int err;
+ u16 chipid_top;
+ u32 ohci_addr;
+ struct bcma_hcd_device *usb_dev;
+ struct bcma_chipinfo *chipinfo;
+
+ chipinfo = &dev->bus->chipinfo;
+ /* USBcores are only connected on embedded devices. */
+ chipid_top = (chipinfo->id & 0xFF00);
+ if (chipid_top != 0x4700 && chipid_top != 0x5300)
+ return -ENODEV;
+
+ /* TODO: Probably need checks here; is the core connected? */
+
+ if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
+ return -EOPNOTSUPP;
+
+ usb_dev = kzalloc(sizeof(struct bcma_hcd_device), GFP_KERNEL);
+ if (!usb_dev)
+ return -ENOMEM;
+
+ bcma_hcd_init_chip(dev);
+
+ /* In AI chips EHCI is addrspace 0, OHCI is 1 */
+ ohci_addr = dev->addr1;
+ if ((chipinfo->id == 0x5357 || chipinfo->id == 0x4749)
+ && chipinfo->rev == 0)
+ ohci_addr = 0x18009000;
+
+ usb_dev->ohci_dev = bcma_hcd_create_pdev(dev, true, ohci_addr);
+ if (IS_ERR(usb_dev->ohci_dev)) {
+ err = PTR_ERR(usb_dev->ohci_dev);
+ goto err_free_usb_dev;
+ }
+
+ usb_dev->ehci_dev = bcma_hcd_create_pdev(dev, false, dev->addr);
+ if (IS_ERR(usb_dev->ehci_dev)) {
+ err = PTR_ERR(usb_dev->ehci_dev);
+ goto err_unregister_ohci_dev;
+ }
+
+ bcma_set_drvdata(dev, usb_dev);
+ return 0;
+
+err_unregister_ohci_dev:
+ platform_device_unregister(usb_dev->ohci_dev);
+err_free_usb_dev:
+ kfree(usb_dev);
+ return err;
+}
+
+static void bcma_hcd_remove(struct bcma_device *dev)
+{
+ struct bcma_hcd_device *usb_dev = bcma_get_drvdata(dev);
+ struct platform_device *ohci_dev = usb_dev->ohci_dev;
+ struct platform_device *ehci_dev = usb_dev->ehci_dev;
+
+ if (ohci_dev)
+ platform_device_unregister(ohci_dev);
+ if (ehci_dev)
+ platform_device_unregister(ehci_dev);
+
+ bcma_core_disable(dev, 0);
+}
+
+static void bcma_hcd_shutdown(struct bcma_device *dev)
+{
+ bcma_core_disable(dev, 0);
+}
+
+#ifdef CONFIG_PM
+
+static int bcma_hcd_suspend(struct bcma_device *dev)
+{
+ bcma_core_disable(dev, 0);
+
+ return 0;
+}
+
+static int bcma_hcd_resume(struct bcma_device *dev)
+{
+ bcma_core_enable(dev, 0);
+
+ return 0;
+}
+
+#else /* !CONFIG_PM */
+#define bcma_hcd_suspend NULL
+#define bcma_hcd_resume NULL
+#endif /* CONFIG_PM */
+
+static const struct bcma_device_id bcma_hcd_table[] = {
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_USB20_HOST, BCMA_ANY_REV, BCMA_ANY_CLASS),
+ BCMA_CORETABLE_END
+};
+MODULE_DEVICE_TABLE(bcma, bcma_hcd_table);
+
+static struct bcma_driver bcma_hcd_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = bcma_hcd_table,
+ .probe = bcma_hcd_probe,
+ .remove = bcma_hcd_remove,
+ .shutdown = bcma_hcd_shutdown,
+ .suspend = bcma_hcd_suspend,
+ .resume = bcma_hcd_resume,
+};
+
+static int __init bcma_hcd_init(void)
+{
+ return bcma_driver_register(&bcma_hcd_driver);
+}
+module_init(bcma_hcd_init);
+
+static void __exit bcma_hcd_exit(void)
+{
+ bcma_driver_unregister(&bcma_hcd_driver);
+}
+module_exit(bcma_hcd_exit);
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 51bd0edf544..ec9f7b75d49 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -12,25 +12,46 @@
*/
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+
+#define DRIVER_DESC "EHCI Atmel driver"
+
+static const char hcd_name[] = "ehci-atmel";
+static struct hc_driver __read_mostly ehci_atmel_hc_driver;
/* interface and function clocks */
-static struct clk *iclk, *fclk;
+static struct clk *iclk, *fclk, *uclk;
static int clocked;
/*-------------------------------------------------------------------------*/
static void atmel_start_clock(void)
{
- clk_enable(iclk);
- clk_enable(fclk);
+ if (IS_ENABLED(CONFIG_COMMON_CLK)) {
+ clk_set_rate(uclk, 48000000);
+ clk_prepare_enable(uclk);
+ }
+ clk_prepare_enable(iclk);
+ clk_prepare_enable(fclk);
clocked = 1;
}
static void atmel_stop_clock(void)
{
- clk_disable(fclk);
- clk_disable(iclk);
+ clk_disable_unprepare(fclk);
+ clk_disable_unprepare(iclk);
+ if (IS_ENABLED(CONFIG_COMMON_CLK))
+ clk_disable_unprepare(uclk);
clocked = 0;
}
@@ -48,75 +69,12 @@ static void atmel_stop_ehci(struct platform_device *pdev)
/*-------------------------------------------------------------------------*/
-static int ehci_atmel_setup(struct usb_hcd *hcd)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- int retval = 0;
-
- /* registers start at offset 0x0 */
- ehci->caps = hcd->regs;
- ehci->regs = hcd->regs +
- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
- dbg_hcs_params(ehci, "reset");
- dbg_hcc_params(ehci, "reset");
-
- /* cache this readonly data; minimize chip reads */
- ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
-
- retval = ehci_halt(ehci);
- if (retval)
- return retval;
-
- /* data structure init */
- retval = ehci_init(hcd);
- if (retval)
- return retval;
-
- ehci->sbrn = 0x20;
-
- ehci_reset(ehci);
- ehci_port_power(ehci, 0);
-
- return retval;
-}
-
-static const struct hc_driver ehci_atmel_hc_driver = {
- .description = hcd_name,
- .product_desc = "Atmel EHCI UHP HS",
- .hcd_priv_size = sizeof(struct ehci_hcd),
-
- /* generic hardware linkage */
- .irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
-
- /* basic lifecycle operations */
- .reset = ehci_atmel_setup,
- .start = ehci_run,
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
-
- /* managing i/o requests and associated device resources */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
-
- /* scheduling support */
- .get_frame_number = ehci_get_frame,
-
- /* root hub support */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
- .relinquish_port = ehci_relinquish_port,
- .port_handed_over = ehci_port_handed_over,
-};
-
-static int __init ehci_atmel_drv_probe(struct platform_device *pdev)
+static int ehci_atmel_drv_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
const struct hc_driver *driver = &ehci_atmel_hc_driver;
struct resource *res;
+ struct ehci_hcd *ehci;
int irq;
int retval;
@@ -134,6 +92,14 @@ static int __init ehci_atmel_drv_probe(struct platform_device *pdev)
goto fail_create_hcd;
}
+ /* Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+ retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto fail_create_hcd;
+
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
retval = -ENOMEM;
@@ -151,50 +117,48 @@ static int __init ehci_atmel_drv_probe(struct platform_device *pdev)
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
- driver->description)) {
- dev_dbg(&pdev->dev, "controller already in use\n");
- retval = -EBUSY;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
goto fail_request_resource;
}
- hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
- if (hcd->regs == NULL) {
- dev_dbg(&pdev->dev, "error mapping memory\n");
- retval = -EFAULT;
- goto fail_ioremap;
- }
-
- iclk = clk_get(&pdev->dev, "ehci_clk");
+ iclk = devm_clk_get(&pdev->dev, "ehci_clk");
if (IS_ERR(iclk)) {
dev_err(&pdev->dev, "Error getting interface clock\n");
retval = -ENOENT;
- goto fail_get_iclk;
+ goto fail_request_resource;
}
- fclk = clk_get(&pdev->dev, "uhpck");
+ fclk = devm_clk_get(&pdev->dev, "uhpck");
if (IS_ERR(fclk)) {
dev_err(&pdev->dev, "Error getting function clock\n");
retval = -ENOENT;
- goto fail_get_fclk;
+ goto fail_request_resource;
+ }
+ if (IS_ENABLED(CONFIG_COMMON_CLK)) {
+ uclk = devm_clk_get(&pdev->dev, "usb_clk");
+ if (IS_ERR(uclk)) {
+ dev_err(&pdev->dev, "failed to get uclk\n");
+ retval = PTR_ERR(uclk);
+ goto fail_request_resource;
+ }
}
+ ehci = hcd_to_ehci(hcd);
+ /* registers start at offset 0x0 */
+ ehci->caps = hcd->regs;
+
atmel_start_ehci(pdev);
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval)
goto fail_add_hcd;
+ device_wakeup_enable(hcd->self.controller);
return retval;
fail_add_hcd:
atmel_stop_ehci(pdev);
- clk_put(fclk);
-fail_get_fclk:
- clk_put(iclk);
-fail_get_iclk:
- iounmap(hcd->regs);
-fail_ioremap:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
fail_request_resource:
usb_put_hcd(hcd);
fail_create_hcd:
@@ -204,27 +168,56 @@ fail_create_hcd:
return retval;
}
-static int __exit ehci_atmel_drv_remove(struct platform_device *pdev)
+static int ehci_atmel_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
- ehci_shutdown(hcd);
usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
atmel_stop_ehci(pdev);
- clk_put(fclk);
- clk_put(iclk);
fclk = iclk = NULL;
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id atmel_ehci_dt_ids[] = {
+ { .compatible = "atmel,at91sam9g45-ehci" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_ehci_dt_ids);
+#endif
+
static struct platform_driver ehci_atmel_driver = {
.probe = ehci_atmel_drv_probe,
- .remove = __exit_p(ehci_atmel_drv_remove),
+ .remove = ehci_atmel_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
- .driver.name = "atmel-ehci",
+ .driver = {
+ .name = "atmel-ehci",
+ .of_match_table = of_match_ptr(atmel_ehci_dt_ids),
+ },
};
+
+static int __init ehci_atmel_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+ ehci_init_driver(&ehci_atmel_hc_driver, NULL);
+ return platform_driver_register(&ehci_atmel_driver);
+}
+module_init(ehci_atmel_init);
+
+static void __exit ehci_atmel_cleanup(void)
+{
+ platform_driver_unregister(&ehci_atmel_driver);
+}
+module_exit(ehci_atmel_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS("platform:atmel-ehci");
+MODULE_AUTHOR("Nicolas Ferre");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
deleted file mode 100644
index 2baf8a84908..00000000000
--- a/drivers/usb/host/ehci-au1xxx.c
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
- * EHCI HCD (Host Controller Driver) for USB.
- *
- * Bus Glue for AMD Alchemy Au1xxx
- *
- * Based on "ohci-au1xxx.c" by Matt Porter <mporter@kernel.crashing.org>
- *
- * Modified for AMD Alchemy Au1200 EHC
- * by K.Boge <karsten.boge@amd.com>
- *
- * This file is licenced under the GPL.
- */
-
-#include <linux/platform_device.h>
-#include <asm/mach-au1x00/au1000.h>
-
-#define USB_HOST_CONFIG (USB_MSR_BASE + USB_MSR_MCFG)
-#define USB_MCFG_PFEN (1<<31)
-#define USB_MCFG_RDCOMB (1<<30)
-#define USB_MCFG_SSDEN (1<<23)
-#define USB_MCFG_PHYPLLEN (1<<19)
-#define USB_MCFG_UCECLKEN (1<<18)
-#define USB_MCFG_EHCCLKEN (1<<17)
-#ifdef CONFIG_DMA_COHERENT
-#define USB_MCFG_UCAM (1<<7)
-#else
-#define USB_MCFG_UCAM (0)
-#endif
-#define USB_MCFG_EBMEN (1<<3)
-#define USB_MCFG_EMEMEN (1<<2)
-
-#define USBH_ENABLE_CE (USB_MCFG_PHYPLLEN | USB_MCFG_EHCCLKEN)
-#define USBH_ENABLE_INIT (USB_MCFG_PFEN | USB_MCFG_RDCOMB | \
- USBH_ENABLE_CE | USB_MCFG_SSDEN | \
- USB_MCFG_UCAM | USB_MCFG_EBMEN | \
- USB_MCFG_EMEMEN)
-
-#define USBH_DISABLE (USB_MCFG_EBMEN | USB_MCFG_EMEMEN)
-
-extern int usb_disabled(void);
-
-static void au1xxx_start_ehc(void)
-{
- /* enable clock to EHCI block and HS PHY PLL*/
- au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_CE, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-
- /* enable EHCI mmio */
- au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_INIT, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-}
-
-static void au1xxx_stop_ehc(void)
-{
- unsigned long c;
-
- /* Disable mem */
- au_writel(au_readl(USB_HOST_CONFIG) & ~USBH_DISABLE, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-
- /* Disable EHC clock. If the HS PHY is unused disable it too. */
- c = au_readl(USB_HOST_CONFIG) & ~USB_MCFG_EHCCLKEN;
- if (!(c & USB_MCFG_UCECLKEN)) /* UDC disabled? */
- c &= ~USB_MCFG_PHYPLLEN; /* yes: disable HS PHY PLL */
- au_writel(c, USB_HOST_CONFIG);
- au_sync();
-}
-
-static int au1xxx_ehci_setup(struct usb_hcd *hcd)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- int ret = ehci_init(hcd);
-
- ehci->need_io_watchdog = 0;
- return ret;
-}
-
-static const struct hc_driver ehci_au1xxx_hc_driver = {
- .description = hcd_name,
- .product_desc = "Au1xxx EHCI",
- .hcd_priv_size = sizeof(struct ehci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
-
- /*
- * basic lifecycle operations
- *
- * FIXME -- ehci_init() doesn't do enough here.
- * See ehci-ppc-soc for a complete implementation.
- */
- .reset = au1xxx_ehci_setup,
- .start = ehci_run,
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
- .endpoint_reset = ehci_endpoint_reset,
-
- /*
- * scheduling support
- */
- .get_frame_number = ehci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
- .relinquish_port = ehci_relinquish_port,
- .port_handed_over = ehci_port_handed_over,
-
- .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
-};
-
-static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
-{
- struct usb_hcd *hcd;
- struct ehci_hcd *ehci;
- struct resource *res;
- int ret;
-
- if (usb_disabled())
- return -ENODEV;
-
-#if defined(CONFIG_SOC_AU1200) && defined(CONFIG_DMA_COHERENT)
- /* Au1200 AB USB does not support coherent memory */
- if (!(read_c0_prid() & 0xff)) {
- printk(KERN_INFO "%s: this is chip revision AB!\n", pdev->name);
- printk(KERN_INFO "%s: update your board or re-configure"
- " the kernel\n", pdev->name);
- return -ENODEV;
- }
-#endif
-
- if (pdev->resource[1].flags != IORESOURCE_IRQ) {
- pr_debug("resource[1] is not IORESOURCE_IRQ");
- return -ENOMEM;
- }
- hcd = usb_create_hcd(&ehci_au1xxx_hc_driver, &pdev->dev, "Au1xxx");
- if (!hcd)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- pr_debug("request_mem_region failed");
- ret = -EBUSY;
- goto err1;
- }
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- pr_debug("ioremap failed");
- ret = -ENOMEM;
- goto err2;
- }
-
- au1xxx_start_ehc();
-
- ehci = hcd_to_ehci(hcd);
- ehci->caps = hcd->regs;
- ehci->regs = hcd->regs + HC_LENGTH(readl(&ehci->caps->hc_capbase));
- /* cache this readonly data; minimize chip reads */
- ehci->hcs_params = readl(&ehci->caps->hcs_params);
-
- ret = usb_add_hcd(hcd, pdev->resource[1].start,
- IRQF_DISABLED | IRQF_SHARED);
- if (ret == 0) {
- platform_set_drvdata(pdev, hcd);
- return ret;
- }
-
- au1xxx_stop_ehc();
- iounmap(hcd->regs);
-err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-err1:
- usb_put_hcd(hcd);
- return ret;
-}
-
-static int ehci_hcd_au1xxx_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
- au1xxx_stop_ehc();
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
-{
- struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- unsigned long flags;
- int rc;
-
- return 0;
- rc = 0;
-
- if (time_before(jiffies, ehci->next_statechange))
- msleep(10);
-
- /* Root hub was already suspended. Disable irq emission and
- * mark HW unaccessible. The PM and USB cores make sure that
- * the root hub is either suspended or stopped.
- */
- spin_lock_irqsave(&ehci->lock, flags);
- ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev));
- ehci_writel(ehci, 0, &ehci->regs->intr_enable);
- (void)ehci_readl(ehci, &ehci->regs->intr_enable);
-
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-
- au1xxx_stop_ehc();
- spin_unlock_irqrestore(&ehci->lock, flags);
-
- // could save FLADJ in case of Vaux power loss
- // ... we'd only use it to handle clock skew
-
- return rc;
-}
-
-static int ehci_hcd_au1xxx_drv_resume(struct device *dev)
-{
- struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-
- au1xxx_start_ehc();
-
- // maybe restore FLADJ
-
- if (time_before(jiffies, ehci->next_statechange))
- msleep(100);
-
- /* Mark hardware accessible again as we are out of D3 state by now */
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-
- /* If CF is still set, we maintained PCI Vaux power.
- * Just undo the effect of ehci_pci_suspend().
- */
- if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
- int mask = INTR_MASK;
-
- ehci_prepare_ports_for_controller_resume(ehci);
- if (!hcd->self.root_hub->do_remote_wakeup)
- mask &= ~STS_PCD;
- ehci_writel(ehci, mask, &ehci->regs->intr_enable);
- ehci_readl(ehci, &ehci->regs->intr_enable);
- return 0;
- }
-
- ehci_dbg(ehci, "lost power, restarting\n");
- usb_root_hub_lost_power(hcd->self.root_hub);
-
- /* Else reset, to cope with power loss or flush-to-storage
- * style "resume" having let BIOS kick in during reboot.
- */
- (void) ehci_halt(ehci);
- (void) ehci_reset(ehci);
-
- /* emptying the schedule aborts any urbs */
- spin_lock_irq(&ehci->lock);
- if (ehci->reclaim)
- end_unlink_async(ehci);
- ehci_work(ehci);
- spin_unlock_irq(&ehci->lock);
-
- ehci_writel(ehci, ehci->command, &ehci->regs->command);
- ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
- ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
-
- /* here we "know" root ports should always stay powered */
- ehci_port_power(ehci, 1);
-
- hcd->state = HC_STATE_SUSPENDED;
-
- return 0;
-}
-
-static const struct dev_pm_ops au1xxx_ehci_pmops = {
- .suspend = ehci_hcd_au1xxx_drv_suspend,
- .resume = ehci_hcd_au1xxx_drv_resume,
-};
-
-#define AU1XXX_EHCI_PMOPS &au1xxx_ehci_pmops
-
-#else
-#define AU1XXX_EHCI_PMOPS NULL
-#endif
-
-static struct platform_driver ehci_hcd_au1xxx_driver = {
- .probe = ehci_hcd_au1xxx_drv_probe,
- .remove = ehci_hcd_au1xxx_drv_remove,
- .shutdown = usb_hcd_platform_shutdown,
- .driver = {
- .name = "au1xxx-ehci",
- .owner = THIS_MODULE,
- .pm = AU1XXX_EHCI_PMOPS,
- }
-};
-
-MODULE_ALIAS("platform:au1xxx-ehci");
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 86afdc73322..524cbf26d99 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -18,24 +18,7 @@
/* this file is part of ehci-hcd.c */
-#define ehci_dbg(ehci, fmt, args...) \
- dev_dbg (ehci_to_hcd(ehci)->self.controller , fmt , ## args )
-#define ehci_err(ehci, fmt, args...) \
- dev_err (ehci_to_hcd(ehci)->self.controller , fmt , ## args )
-#define ehci_info(ehci, fmt, args...) \
- dev_info (ehci_to_hcd(ehci)->self.controller , fmt , ## args )
-#define ehci_warn(ehci, fmt, args...) \
- dev_warn (ehci_to_hcd(ehci)->self.controller , fmt , ## args )
-
-#ifdef VERBOSE_DEBUG
-# define vdbg dbg
-# define ehci_vdbg ehci_dbg
-#else
-# define vdbg(fmt,args...) do { } while (0)
-# define ehci_vdbg(ehci, fmt, args...) do { } while (0)
-#endif
-
-#ifdef DEBUG
+#ifdef CONFIG_DYNAMIC_DEBUG
/* check the values in the HCSPARAMS register
* (host controller _Structural_ parameters)
@@ -79,7 +62,7 @@ static inline void dbg_hcs_params (struct ehci_hcd *ehci, char *label) {}
#endif
-#ifdef DEBUG
+#ifdef CONFIG_DYNAMIC_DEBUG
/* check the values in the HCCPARAMS register
* (host controller _Capability_ parameters)
@@ -109,7 +92,7 @@ static void dbg_hcc_params (struct ehci_hcd *ehci, char *label)
HCC_PER_PORT_CHANGE_EVENT(params) ? " ppce" : "",
HCC_HW_PREFETCH(params) ? " hw prefetch" : "",
HCC_32FRAME_PERIODIC_LIST(params) ?
- " 32 peridic list" : "");
+ " 32 periodic list" : "");
}
}
#else
@@ -118,7 +101,7 @@ static inline void dbg_hcc_params (struct ehci_hcd *ehci, char *label) {}
#endif
-#ifdef DEBUG
+#ifdef CONFIG_DYNAMIC_DEBUG
static void __maybe_unused
dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
@@ -318,7 +301,7 @@ static inline int __maybe_unused
dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
{ return 0; }
-#endif /* DEBUG */
+#endif /* CONFIG_DYNAMIC_DEBUG */
/* functions have the "wrong" filename when they're output... */
#define dbg_status(ehci, label, status) { \
@@ -351,15 +334,9 @@ static inline void remove_debug_files (struct ehci_hcd *bus) { }
/* troubleshooting help: expose state in debugfs */
static int debug_async_open(struct inode *, struct file *);
+static int debug_bandwidth_open(struct inode *, struct file *);
static int debug_periodic_open(struct inode *, struct file *);
static int debug_registers_open(struct inode *, struct file *);
-static int debug_async_open(struct inode *, struct file *);
-static int debug_lpm_open(struct inode *, struct file *);
-static ssize_t debug_lpm_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-static ssize_t debug_lpm_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos);
-static int debug_lpm_close(struct inode *inode, struct file *file);
static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
static int debug_close(struct inode *, struct file *);
@@ -371,6 +348,13 @@ static const struct file_operations debug_async_fops = {
.release = debug_close,
.llseek = default_llseek,
};
+static const struct file_operations debug_bandwidth_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_bandwidth_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
static const struct file_operations debug_periodic_fops = {
.owner = THIS_MODULE,
.open = debug_periodic_open,
@@ -385,14 +369,6 @@ static const struct file_operations debug_registers_fops = {
.release = debug_close,
.llseek = default_llseek,
};
-static const struct file_operations debug_lpm_fops = {
- .owner = THIS_MODULE,
- .open = debug_lpm_open,
- .read = debug_lpm_read,
- .write = debug_lpm_write,
- .release = debug_lpm_close,
- .llseek = noop_llseek,
-};
static struct dentry *ehci_debug_root;
@@ -407,11 +383,11 @@ struct debug_buffer {
#define speed_char(info1) ({ char tmp; \
switch (info1 & (3 << 12)) { \
- case 0 << 12: tmp = 'f'; break; \
- case 1 << 12: tmp = 'l'; break; \
- case 2 << 12: tmp = 'h'; break; \
+ case QH_FULL_SPEED: tmp = 'f'; break; \
+ case QH_LOW_SPEED: tmp = 'l'; break; \
+ case QH_HIGH_SPEED: tmp = 'h'; break; \
default: tmp = '?'; break; \
- }; tmp; })
+ } tmp; })
static inline char token_mark(struct ehci_hcd *ehci, __hc32 token)
{
@@ -541,19 +517,105 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf)
spin_lock_irqsave (&ehci->lock, flags);
for (qh = ehci->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh)
qh_lines (ehci, qh, &next, &size);
- if (ehci->reclaim && size > 0) {
- temp = scnprintf (next, size, "\nreclaim =\n");
+ if (!list_empty(&ehci->async_unlink) && size > 0) {
+ temp = scnprintf(next, size, "\nunlink =\n");
size -= temp;
next += temp;
- for (qh = ehci->reclaim; size > 0 && qh; qh = qh->reclaim)
- qh_lines (ehci, qh, &next, &size);
+ list_for_each_entry(qh, &ehci->async_unlink, unlink_node) {
+ if (size <= 0)
+ break;
+ qh_lines(ehci, qh, &next, &size);
+ }
}
spin_unlock_irqrestore (&ehci->lock, flags);
return strlen(buf->output_buf);
}
+static ssize_t fill_bandwidth_buffer(struct debug_buffer *buf)
+{
+ struct ehci_hcd *ehci;
+ struct ehci_tt *tt;
+ struct ehci_per_sched *ps;
+ unsigned temp, size;
+ char *next;
+ unsigned i;
+ u8 *bw;
+ u16 *bf;
+ u8 budget[EHCI_BANDWIDTH_SIZE];
+
+ ehci = hcd_to_ehci(bus_to_hcd(buf->bus));
+ next = buf->output_buf;
+ size = buf->alloc_size;
+
+ *next = 0;
+
+ spin_lock_irq(&ehci->lock);
+
+ /* Dump the HS bandwidth table */
+ temp = scnprintf(next, size,
+ "HS bandwidth allocation (us per microframe)\n");
+ size -= temp;
+ next += temp;
+ for (i = 0; i < EHCI_BANDWIDTH_SIZE; i += 8) {
+ bw = &ehci->bandwidth[i];
+ temp = scnprintf(next, size,
+ "%2u: %4u%4u%4u%4u%4u%4u%4u%4u\n",
+ i, bw[0], bw[1], bw[2], bw[3],
+ bw[4], bw[5], bw[6], bw[7]);
+ size -= temp;
+ next += temp;
+ }
+
+ /* Dump all the FS/LS tables */
+ list_for_each_entry(tt, &ehci->tt_list, tt_list) {
+ temp = scnprintf(next, size,
+ "\nTT %s port %d FS/LS bandwidth allocation (us per frame)\n",
+ dev_name(&tt->usb_tt->hub->dev),
+ tt->tt_port + !!tt->usb_tt->multi);
+ size -= temp;
+ next += temp;
+
+ bf = tt->bandwidth;
+ temp = scnprintf(next, size,
+ " %5u%5u%5u%5u%5u%5u%5u%5u\n",
+ bf[0], bf[1], bf[2], bf[3],
+ bf[4], bf[5], bf[6], bf[7]);
+ size -= temp;
+ next += temp;
+
+ temp = scnprintf(next, size,
+ "FS/LS budget (us per microframe)\n");
+ size -= temp;
+ next += temp;
+ compute_tt_budget(budget, tt);
+ for (i = 0; i < EHCI_BANDWIDTH_SIZE; i += 8) {
+ bw = &budget[i];
+ temp = scnprintf(next, size,
+ "%2u: %4u%4u%4u%4u%4u%4u%4u%4u\n",
+ i, bw[0], bw[1], bw[2], bw[3],
+ bw[4], bw[5], bw[6], bw[7]);
+ size -= temp;
+ next += temp;
+ }
+ list_for_each_entry(ps, &tt->ps_list, ps_list) {
+ temp = scnprintf(next, size,
+ "%s ep %02x: %4u @ %2u.%u+%u mask %04x\n",
+ dev_name(&ps->udev->dev),
+ ps->ep->desc.bEndpointAddress,
+ ps->tt_usecs,
+ ps->bw_phase, ps->phase_uf,
+ ps->bw_period, ps->cs_mask);
+ size -= temp;
+ next += temp;
+ }
+ }
+ spin_unlock_irq(&ehci->lock);
+
+ return next - buf->output_buf;
+}
+
#define DBG_SCHED_LIMIT 64
static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
{
@@ -600,7 +662,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
case Q_TYPE_QH:
hw = p.qh->hw;
temp = scnprintf (next, size, " qh%d-%04x/%p",
- p.qh->period,
+ p.qh->ps.period,
hc32_to_cpup(ehci,
&hw->hw_info2)
/* uframe masks */
@@ -647,7 +709,8 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
speed_char (scratch),
scratch & 0x007f,
(scratch >> 8) & 0x000f, type,
- p.qh->usecs, p.qh->c_usecs,
+ p.qh->ps.usecs,
+ p.qh->ps.c_usecs,
temp,
0x7ff & (scratch >> 16));
@@ -655,10 +718,8 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
seen [seen_count++].qh = p.qh;
} else
temp = 0;
- if (p.qh) {
- tag = Q_NEXT_TYPE(ehci, hw->hw_next);
- p = p.qh->qh_next;
- }
+ tag = Q_NEXT_TYPE(ehci, hw->hw_next);
+ p = p.qh->qh_next;
break;
case Q_TYPE_FSTN:
temp = scnprintf (next, size,
@@ -676,7 +737,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
case Q_TYPE_SITD:
temp = scnprintf (next, size,
" sitd%d-%04x/%p",
- p.sitd->stream->interval,
+ p.sitd->stream->ps.period,
hc32_to_cpup(ehci, &p.sitd->hw_uframe)
& 0x0000ffff,
p.sitd);
@@ -699,6 +760,21 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
}
#undef DBG_SCHED_LIMIT
+static const char *rh_state_string(struct ehci_hcd *ehci)
+{
+ switch (ehci->rh_state) {
+ case EHCI_RH_HALTED:
+ return "halted";
+ case EHCI_RH_SUSPENDED:
+ return "suspended";
+ case EHCI_RH_RUNNING:
+ return "running";
+ case EHCI_RH_STOPPING:
+ return "stopping";
+ }
+ return "?";
+}
+
static ssize_t fill_registers_buffer(struct debug_buffer *buf)
{
struct usb_hcd *hcd;
@@ -728,21 +804,21 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
}
/* Capability Registers */
- i = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
+ i = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
temp = scnprintf (next, size,
"bus %s, device %s\n"
"%s\n"
- "EHCI %x.%02x, hcd state %d\n",
+ "EHCI %x.%02x, rh state %s\n",
hcd->self.controller->bus->name,
dev_name(hcd->self.controller),
hcd->product_desc,
- i >> 8, i & 0x0ff, hcd->state);
+ i >> 8, i & 0x0ff, rh_state_string(ehci));
size -= temp;
next += temp;
#ifdef CONFIG_PCI
/* EHCI 0.96 and later may have "extended capabilities" */
- if (hcd->self.controller->bus == &pci_bus_type) {
+ if (dev_is_pci(hcd->self.controller)) {
struct pci_dev *pdev;
u32 offset, cap, cap2;
unsigned count = 256/4;
@@ -810,7 +886,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
next += temp;
temp = scnprintf (next, size, "uframe %04x\n",
- ehci_readl(ehci, &ehci->regs->frame_index));
+ ehci_read_frame_index(ehci));
size -= temp;
next += temp;
@@ -831,16 +907,18 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
}
}
- if (ehci->reclaim) {
- temp = scnprintf(next, size, "reclaim qh %p\n", ehci->reclaim);
+ if (!list_empty(&ehci->async_unlink)) {
+ temp = scnprintf(next, size, "async unlink qh %p\n",
+ list_first_entry(&ehci->async_unlink,
+ struct ehci_qh, unlink_node));
size -= temp;
next += temp;
}
#ifdef EHCI_STATS
temp = scnprintf (next, size,
- "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
- ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
+ "irq normal %ld err %ld iaa %ld (lost %ld)\n",
+ ehci->stats.normal, ehci->stats.error, ehci->stats.iaa,
ehci->stats.lost_iaa);
size -= temp;
next += temp;
@@ -879,7 +957,7 @@ static int fill_buffer(struct debug_buffer *buf)
int ret = 0;
if (!buf->output_buf)
- buf->output_buf = (char *)vmalloc(buf->alloc_size);
+ buf->output_buf = vmalloc(buf->alloc_size);
if (!buf->output_buf) {
ret = -ENOMEM;
@@ -932,6 +1010,7 @@ static int debug_close(struct inode *inode, struct file *file)
return 0;
}
+
static int debug_async_open(struct inode *inode, struct file *file)
{
file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
@@ -939,6 +1018,14 @@ static int debug_async_open(struct inode *inode, struct file *file)
return file->private_data ? 0 : -ENOMEM;
}
+static int debug_bandwidth_open(struct inode *inode, struct file *file)
+{
+ file->private_data = alloc_buffer(inode->i_private,
+ fill_bandwidth_buffer);
+
+ return file->private_data ? 0 : -ENOMEM;
+}
+
static int debug_periodic_open(struct inode *inode, struct file *file)
{
struct debug_buffer *buf;
@@ -959,94 +1046,6 @@ static int debug_registers_open(struct inode *inode, struct file *file)
return file->private_data ? 0 : -ENOMEM;
}
-static int debug_lpm_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
-static int debug_lpm_close(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-static ssize_t debug_lpm_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- /* TODO: show lpm stats */
- return 0;
-}
-
-static ssize_t debug_lpm_write(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct usb_hcd *hcd;
- struct ehci_hcd *ehci;
- char buf[50];
- size_t len;
- u32 temp;
- unsigned long port;
- u32 __iomem *portsc ;
- u32 params;
-
- hcd = bus_to_hcd(file->private_data);
- ehci = hcd_to_ehci(hcd);
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
- buf[len] = '\0';
- if (len > 0 && buf[len - 1] == '\n')
- buf[len - 1] = '\0';
-
- if (strncmp(buf, "enable", 5) == 0) {
- if (strict_strtoul(buf + 7, 10, &port))
- return -EINVAL;
- params = ehci_readl(ehci, &ehci->caps->hcs_params);
- if (port > HCS_N_PORTS(params)) {
- ehci_dbg(ehci, "ERR: LPM on bad port %lu\n", port);
- return -ENODEV;
- }
- portsc = &ehci->regs->port_status[port-1];
- temp = ehci_readl(ehci, portsc);
- if (!(temp & PORT_DEV_ADDR)) {
- ehci_dbg(ehci, "LPM: no device attached\n");
- return -ENODEV;
- }
- temp |= PORT_LPM;
- ehci_writel(ehci, temp, portsc);
- printk(KERN_INFO "force enable LPM for port %lu\n", port);
- } else if (strncmp(buf, "hird=", 5) == 0) {
- unsigned long hird;
- if (strict_strtoul(buf + 5, 16, &hird))
- return -EINVAL;
- printk(KERN_INFO "setting hird %s %lu\n", buf + 6, hird);
- temp = ehci_readl(ehci, &ehci->regs->command);
- temp &= ~CMD_HIRD;
- temp |= hird << 24;
- ehci_writel(ehci, temp, &ehci->regs->command);
- } else if (strncmp(buf, "disable", 7) == 0) {
- if (strict_strtoul(buf + 8, 10, &port))
- return -EINVAL;
- params = ehci_readl(ehci, &ehci->caps->hcs_params);
- if (port > HCS_N_PORTS(params)) {
- ehci_dbg(ehci, "ERR: LPM off bad port %lu\n", port);
- return -ENODEV;
- }
- portsc = &ehci->regs->port_status[port-1];
- temp = ehci_readl(ehci, portsc);
- if (!(temp & PORT_DEV_ADDR)) {
- ehci_dbg(ehci, "ERR: no device attached\n");
- return -ENODEV;
- }
- temp &= ~PORT_LPM;
- ehci_writel(ehci, temp, portsc);
- printk(KERN_INFO "disabled LPM for port %lu\n", port);
- } else
- return -EOPNOTSUPP;
- return count;
-}
-
static inline void create_debug_files (struct ehci_hcd *ehci)
{
struct usb_bus *bus = &ehci_to_hcd(ehci)->self;
@@ -1059,6 +1058,10 @@ static inline void create_debug_files (struct ehci_hcd *ehci)
&debug_async_fops))
goto file_error;
+ if (!debugfs_create_file("bandwidth", S_IRUGO, ehci->debug_dir, bus,
+ &debug_bandwidth_fops))
+ goto file_error;
+
if (!debugfs_create_file("periodic", S_IRUGO, ehci->debug_dir, bus,
&debug_periodic_fops))
goto file_error;
@@ -1067,10 +1070,6 @@ static inline void create_debug_files (struct ehci_hcd *ehci)
&debug_registers_fops))
goto file_error;
- if (!debugfs_create_file("lpm", S_IRUGO|S_IWUGO, ehci->debug_dir, bus,
- &debug_lpm_fops))
- goto file_error;
-
return;
file_error:
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
new file mode 100644
index 00000000000..d1c76216350
--- /dev/null
+++ b/drivers/usb/host/ehci-exynos.c
@@ -0,0 +1,390 @@
+/*
+ * SAMSUNG EXYNOS USB HOST EHCI Controller
+ *
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/samsung_usb_phy.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
+
+#include "ehci.h"
+
+#define DRIVER_DESC "EHCI EXYNOS driver"
+
+#define EHCI_INSNREG00(base) (base + 0x90)
+#define EHCI_INSNREG00_ENA_INCR16 (0x1 << 25)
+#define EHCI_INSNREG00_ENA_INCR8 (0x1 << 24)
+#define EHCI_INSNREG00_ENA_INCR4 (0x1 << 23)
+#define EHCI_INSNREG00_ENA_INCRX_ALIGN (0x1 << 22)
+#define EHCI_INSNREG00_ENABLE_DMA_BURST \
+ (EHCI_INSNREG00_ENA_INCR16 | EHCI_INSNREG00_ENA_INCR8 | \
+ EHCI_INSNREG00_ENA_INCR4 | EHCI_INSNREG00_ENA_INCRX_ALIGN)
+
+static const char hcd_name[] = "ehci-exynos";
+static struct hc_driver __read_mostly exynos_ehci_hc_driver;
+
+#define PHY_NUMBER 3
+
+struct exynos_ehci_hcd {
+ struct clk *clk;
+ struct usb_phy *phy;
+ struct usb_otg *otg;
+ struct phy *phy_g[PHY_NUMBER];
+};
+
+#define to_exynos_ehci(hcd) (struct exynos_ehci_hcd *)(hcd_to_ehci(hcd)->priv)
+
+static int exynos_ehci_get_phy(struct device *dev,
+ struct exynos_ehci_hcd *exynos_ehci)
+{
+ struct device_node *child;
+ struct phy *phy;
+ int phy_number;
+ int ret = 0;
+
+ exynos_ehci->phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(exynos_ehci->phy)) {
+ ret = PTR_ERR(exynos_ehci->phy);
+ if (ret != -ENXIO && ret != -ENODEV) {
+ dev_err(dev, "no usb2 phy configured\n");
+ return ret;
+ }
+ dev_dbg(dev, "Failed to get usb2 phy\n");
+ } else {
+ exynos_ehci->otg = exynos_ehci->phy->otg;
+ }
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ ret = of_property_read_u32(child, "reg", &phy_number);
+ if (ret) {
+ dev_err(dev, "Failed to parse device tree\n");
+ of_node_put(child);
+ return ret;
+ }
+
+ if (phy_number >= PHY_NUMBER) {
+ dev_err(dev, "Invalid number of PHYs\n");
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ phy = devm_of_phy_get(dev, child, 0);
+ of_node_put(child);
+ if (IS_ERR(phy)) {
+ ret = PTR_ERR(phy);
+ if (ret != -ENOSYS && ret != -ENODEV) {
+ dev_err(dev, "no usb2 phy configured\n");
+ return ret;
+ }
+ dev_dbg(dev, "Failed to get usb2 phy\n");
+ }
+ exynos_ehci->phy_g[phy_number] = phy;
+ }
+
+ return ret;
+}
+
+static int exynos_ehci_phy_enable(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
+ int i;
+ int ret = 0;
+
+ if (!IS_ERR(exynos_ehci->phy))
+ return usb_phy_init(exynos_ehci->phy);
+
+ for (i = 0; ret == 0 && i < PHY_NUMBER; i++)
+ if (!IS_ERR(exynos_ehci->phy_g[i]))
+ ret = phy_power_on(exynos_ehci->phy_g[i]);
+ if (ret)
+ for (i--; i >= 0; i--)
+ if (!IS_ERR(exynos_ehci->phy_g[i]))
+ phy_power_off(exynos_ehci->phy_g[i]);
+
+ return ret;
+}
+
+static void exynos_ehci_phy_disable(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
+ int i;
+
+ if (!IS_ERR(exynos_ehci->phy)) {
+ usb_phy_shutdown(exynos_ehci->phy);
+ return;
+ }
+
+ for (i = 0; i < PHY_NUMBER; i++)
+ if (!IS_ERR(exynos_ehci->phy_g[i]))
+ phy_power_off(exynos_ehci->phy_g[i]);
+}
+
+static void exynos_setup_vbus_gpio(struct device *dev)
+{
+ int err;
+ int gpio;
+
+ if (!dev->of_node)
+ return;
+
+ gpio = of_get_named_gpio(dev->of_node, "samsung,vbus-gpio", 0);
+ if (!gpio_is_valid(gpio))
+ return;
+
+ err = devm_gpio_request_one(dev, gpio, GPIOF_OUT_INIT_HIGH,
+ "ehci_vbus_gpio");
+ if (err)
+ dev_err(dev, "can't request ehci vbus gpio %d", gpio);
+}
+
+static int exynos_ehci_probe(struct platform_device *pdev)
+{
+ struct exynos_ehci_hcd *exynos_ehci;
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct resource *res;
+ int irq;
+ int err;
+
+ /*
+ * Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we move to full device tree support this will vanish off.
+ */
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ exynos_setup_vbus_gpio(&pdev->dev);
+
+ hcd = usb_create_hcd(&exynos_ehci_hc_driver,
+ &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ dev_err(&pdev->dev, "Unable to create HCD\n");
+ return -ENOMEM;
+ }
+ exynos_ehci = to_exynos_ehci(hcd);
+
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "samsung,exynos5440-ehci"))
+ goto skip_phy;
+
+ err = exynos_ehci_get_phy(&pdev->dev, exynos_ehci);
+ if (err)
+ goto fail_clk;
+
+skip_phy:
+
+ exynos_ehci->clk = devm_clk_get(&pdev->dev, "usbhost");
+
+ if (IS_ERR(exynos_ehci->clk)) {
+ dev_err(&pdev->dev, "Failed to get usbhost clock\n");
+ err = PTR_ERR(exynos_ehci->clk);
+ goto fail_clk;
+ }
+
+ err = clk_prepare_enable(exynos_ehci->clk);
+ if (err)
+ goto fail_clk;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get I/O memory\n");
+ err = -ENXIO;
+ goto fail_io;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ err = PTR_ERR(hcd->regs);
+ goto fail_io;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ err = -ENODEV;
+ goto fail_io;
+ }
+
+ if (exynos_ehci->otg)
+ exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
+
+ err = exynos_ehci_phy_enable(&pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable USB phy\n");
+ goto fail_io;
+ }
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = hcd->regs;
+
+ /* DMA burst Enable */
+ writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs));
+
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add USB HCD\n");
+ goto fail_add_hcd;
+ }
+ device_wakeup_enable(hcd->self.controller);
+
+ platform_set_drvdata(pdev, hcd);
+
+ return 0;
+
+fail_add_hcd:
+ exynos_ehci_phy_disable(&pdev->dev);
+fail_io:
+ clk_disable_unprepare(exynos_ehci->clk);
+fail_clk:
+ usb_put_hcd(hcd);
+ return err;
+}
+
+static int exynos_ehci_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
+
+ usb_remove_hcd(hcd);
+
+ if (exynos_ehci->otg)
+ exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
+
+ exynos_ehci_phy_disable(&pdev->dev);
+
+ clk_disable_unprepare(exynos_ehci->clk);
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int exynos_ehci_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
+
+ bool do_wakeup = device_may_wakeup(dev);
+ int rc;
+
+ rc = ehci_suspend(hcd, do_wakeup);
+ if (rc)
+ return rc;
+
+ if (exynos_ehci->otg)
+ exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
+
+ exynos_ehci_phy_disable(dev);
+
+ clk_disable_unprepare(exynos_ehci->clk);
+
+ return rc;
+}
+
+static int exynos_ehci_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
+ int ret;
+
+ clk_prepare_enable(exynos_ehci->clk);
+
+ if (exynos_ehci->otg)
+ exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
+
+ ret = exynos_ehci_phy_enable(dev);
+ if (ret) {
+ dev_err(dev, "Failed to enable USB phy\n");
+ clk_disable_unprepare(exynos_ehci->clk);
+ return ret;
+ }
+
+ /* DMA burst Enable */
+ writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs));
+
+ ehci_resume(hcd, false);
+ return 0;
+}
+#else
+#define exynos_ehci_suspend NULL
+#define exynos_ehci_resume NULL
+#endif
+
+static const struct dev_pm_ops exynos_ehci_pm_ops = {
+ .suspend = exynos_ehci_suspend,
+ .resume = exynos_ehci_resume,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id exynos_ehci_match[] = {
+ { .compatible = "samsung,exynos4210-ehci" },
+ { .compatible = "samsung,exynos5440-ehci" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_ehci_match);
+#endif
+
+static struct platform_driver exynos_ehci_driver = {
+ .probe = exynos_ehci_probe,
+ .remove = exynos_ehci_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "exynos-ehci",
+ .owner = THIS_MODULE,
+ .pm = &exynos_ehci_pm_ops,
+ .of_match_table = of_match_ptr(exynos_ehci_match),
+ }
+};
+static const struct ehci_driver_overrides exynos_overrides __initdata = {
+ .extra_priv_size = sizeof(struct exynos_ehci_hcd),
+};
+
+static int __init ehci_exynos_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+ ehci_init_driver(&exynos_ehci_hc_driver, &exynos_overrides);
+ return platform_driver_register(&exynos_ehci_driver);
+}
+module_init(ehci_exynos_init);
+
+static void __exit ehci_exynos_cleanup(void)
+{
+ platform_driver_unregister(&exynos_ehci_driver);
+}
+module_exit(ehci_exynos_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS("platform:exynos-ehci");
+MODULE_AUTHOR("Jingoo Han");
+MODULE_AUTHOR("Joonyoung Shim");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 86e42892016..cf2734b532a 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -1,6 +1,6 @@
/*
* Copyright 2005-2009 MontaVista Software, Inc.
- * Copyright 2008 Freescale Semiconductor, Inc.
+ * Copyright 2008,2012 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -27,6 +27,7 @@
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/pm.h>
+#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
@@ -52,12 +53,11 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
struct resource *res;
int irq;
int retval;
- unsigned int temp;
pr_debug("initializing FSL-SOC USB Controller\n");
/* Need platform data for setup */
- pdata = (struct fsl_usb2_platform_data *)pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev,
"No platform data for %s.\n", dev_name(&pdev->dev));
@@ -101,58 +101,61 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
goto err2;
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
- driver->description)) {
- dev_dbg(&pdev->dev, "controller already in use\n");
- retval = -EBUSY;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
goto err2;
}
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
-
- if (hcd->regs == NULL) {
- dev_dbg(&pdev->dev, "error mapping memory\n");
- retval = -EFAULT;
- goto err3;
- }
pdata->regs = hcd->regs;
+ if (pdata->power_budget)
+ hcd->power_budget = pdata->power_budget;
+
/*
* do platform specific init: check the clock, grab/config pins, etc.
*/
if (pdata->init && pdata->init(pdev)) {
retval = -ENODEV;
- goto err3;
+ goto err2;
}
- /*
- * Check if it is MPC5121 SoC, otherwise set pdata->have_sysif_regs
- * flag for 83xx or 8536 system interface registers.
- */
- if (pdata->big_endian_mmio)
- temp = in_be32(hcd->regs + FSL_SOC_USB_ID);
- else
- temp = in_le32(hcd->regs + FSL_SOC_USB_ID);
-
- if ((temp & ID_MSK) != (~((temp & NID_MSK) >> 8) & ID_MSK))
- pdata->have_sysif_regs = 1;
-
/* Enable USB controller, 83xx or 8536 */
- if (pdata->have_sysif_regs)
+ if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6)
setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
/* Don't need to set host mode here. It will be done by tdi_reset() */
- retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval != 0)
- goto err4;
+ goto err2;
+ device_wakeup_enable(hcd->self.controller);
+
+#ifdef CONFIG_USB_OTG
+ if (pdata->operating_mode == FSL_USB2_DR_OTG) {
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ hcd->phy = usb_get_phy(USB_PHY_TYPE_USB2);
+ dev_dbg(&pdev->dev, "hcd=0x%p ehci=0x%p, phy=0x%p\n",
+ hcd, ehci, hcd->phy);
+
+ if (!IS_ERR_OR_NULL(hcd->phy)) {
+ retval = otg_set_host(hcd->phy->otg,
+ &ehci_to_hcd(ehci)->self);
+ if (retval) {
+ usb_put_phy(hcd->phy);
+ goto err2;
+ }
+ } else {
+ dev_err(&pdev->dev, "can't find phy\n");
+ retval = -ENODEV;
+ goto err2;
+ }
+ }
+#endif
return retval;
- err4:
- iounmap(hcd->regs);
- err3:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err2:
usb_put_hcd(hcd);
err1:
@@ -176,7 +179,12 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
static void usb_hcd_fsl_remove(struct usb_hcd *hcd,
struct platform_device *pdev)
{
- struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ if (!IS_ERR_OR_NULL(hcd->phy)) {
+ otg_set_host(hcd->phy->otg, NULL);
+ usb_put_phy(hcd->phy);
+ }
usb_remove_hcd(hcd);
@@ -186,22 +194,35 @@ static void usb_hcd_fsl_remove(struct usb_hcd *hcd,
*/
if (pdata->exit)
pdata->exit(pdev);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
}
-static void ehci_fsl_setup_phy(struct ehci_hcd *ehci,
+static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
enum fsl_usb2_phy_modes phy_mode,
unsigned int port_offset)
{
u32 portsc;
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ void __iomem *non_ehci = hcd->regs;
+ struct device *dev = hcd->self.controller;
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
+
+ if (pdata->controller_ver < 0) {
+ dev_warn(hcd->self.controller, "Could not get controller version\n");
+ return -ENODEV;
+ }
portsc = ehci_readl(ehci, &ehci->regs->port_status[port_offset]);
portsc &= ~(PORT_PTS_MSK | PORT_PTS_PTW);
switch (phy_mode) {
case FSL_USB2_PHY_ULPI:
+ if (pdata->have_sysif_regs && pdata->controller_ver) {
+ /* controller version 1.6 or above */
+ clrbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
+ setbits32(non_ehci + FSL_SOC_USB_CTRL,
+ ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN);
+ }
portsc |= PORT_PTS_ULPI;
break;
case FSL_USB2_PHY_SERIAL:
@@ -211,46 +232,67 @@ static void ehci_fsl_setup_phy(struct ehci_hcd *ehci,
portsc |= PORT_PTS_PTW;
/* fall through */
case FSL_USB2_PHY_UTMI:
+ if (pdata->have_sysif_regs && pdata->controller_ver) {
+ /* controller version 1.6 or above */
+ setbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
+ mdelay(FSL_UTMI_PHY_DLY); /* Delay for UTMI PHY CLK to
+ become stable - 10ms*/
+ }
+ /* enable UTMI PHY */
+ if (pdata->have_sysif_regs)
+ setbits32(non_ehci + FSL_SOC_USB_CTRL,
+ CTRL_UTMI_PHY_EN);
portsc |= PORT_PTS_UTMI;
break;
case FSL_USB2_PHY_NONE:
break;
}
+
+ if (pdata->have_sysif_regs &&
+ pdata->controller_ver > FSL_USB_VER_1_6 &&
+ (phy_mode == FSL_USB2_PHY_ULPI)) {
+ /* check PHY_CLK_VALID to get phy clk valid */
+ if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
+ PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0) ||
+ in_be32(non_ehci + FSL_SOC_USB_PRICTRL))) {
+ dev_warn(hcd->self.controller, "USB PHY clock invalid\n");
+ return -EINVAL;
+ }
+ }
+
ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
+
+ if (phy_mode != FSL_USB2_PHY_ULPI && pdata->have_sysif_regs)
+ setbits32(non_ehci + FSL_SOC_USB_CTRL, USB_CTRL_USB_EN);
+
+ return 0;
}
-static void ehci_fsl_usb_setup(struct ehci_hcd *ehci)
+static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
{
struct usb_hcd *hcd = ehci_to_hcd(ehci);
struct fsl_usb2_platform_data *pdata;
void __iomem *non_ehci = hcd->regs;
- u32 temp;
- pdata = hcd->self.controller->platform_data;
+ pdata = dev_get_platdata(hcd->self.controller);
- /* Enable PHY interface in the control reg. */
if (pdata->have_sysif_regs) {
- temp = in_be32(non_ehci + FSL_SOC_USB_CTRL);
- out_be32(non_ehci + FSL_SOC_USB_CTRL, temp | 0x00000004);
- out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0000001b);
+ /*
+ * Turn on cache snooping hardware, since some PowerPC platforms
+ * wholly rely on hardware to deal with cache coherent
+ */
+
+ /* Setup Snooping for all the 4GB space */
+ /* SNOOP1 starts from 0x0, size 2G */
+ out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0 | SNOOP_SIZE_2GB);
+ /* SNOOP2 starts from 0x80000000, size 2G */
+ out_be32(non_ehci + FSL_SOC_USB_SNOOP2, 0x80000000 | SNOOP_SIZE_2GB);
}
-#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
- /*
- * Turn on cache snooping hardware, since some PowerPC platforms
- * wholly rely on hardware to deal with cache coherent
- */
-
- /* Setup Snooping for all the 4GB space */
- /* SNOOP1 starts from 0x0, size 2G */
- out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0 | SNOOP_SIZE_2GB);
- /* SNOOP2 starts from 0x80000000, size 2G */
- out_be32(non_ehci + FSL_SOC_USB_SNOOP2, 0x80000000 | SNOOP_SIZE_2GB);
-#endif
-
if ((pdata->operating_mode == FSL_USB2_DR_HOST) ||
(pdata->operating_mode == FSL_USB2_DR_OTG))
- ehci_fsl_setup_phy(ehci, pdata->phy_mode, 0);
+ if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 0))
+ return -EINVAL;
if (pdata->operating_mode == FSL_USB2_MPH_HOST) {
unsigned int chip, rev, svr;
@@ -264,13 +306,16 @@ static void ehci_fsl_usb_setup(struct ehci_hcd *ehci)
ehci->has_fsl_port_bug = 1;
if (pdata->port_enables & FSL_USB2_PORT0_ENABLED)
- ehci_fsl_setup_phy(ehci, pdata->phy_mode, 0);
+ if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 0))
+ return -EINVAL;
+
if (pdata->port_enables & FSL_USB2_PORT1_ENABLED)
- ehci_fsl_setup_phy(ehci, pdata->phy_mode, 1);
+ if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 1))
+ return -EINVAL;
}
if (pdata->have_sysif_regs) {
-#ifdef CONFIG_PPC_85xx
+#ifdef CONFIG_FSL_SOC_BOOKE
out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x00000008);
out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000080);
#else
@@ -279,13 +324,15 @@ static void ehci_fsl_usb_setup(struct ehci_hcd *ehci)
#endif
out_be32(non_ehci + FSL_SOC_USB_SICTRL, 0x00000001);
}
+
+ return 0;
}
/* called after powerup, by probe or system-pm "wakeup" */
static int ehci_fsl_reinit(struct ehci_hcd *ehci)
{
- ehci_fsl_usb_setup(ehci);
- ehci_port_power(ehci, 0);
+ if (ehci_fsl_usb_setup(ehci))
+ return -EINVAL;
return 0;
}
@@ -296,35 +343,40 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int retval;
struct fsl_usb2_platform_data *pdata;
+ struct device *dev;
- pdata = hcd->self.controller->platform_data;
+ dev = hcd->self.controller;
+ pdata = dev_get_platdata(hcd->self.controller);
ehci->big_endian_desc = pdata->big_endian_desc;
ehci->big_endian_mmio = pdata->big_endian_mmio;
/* EHCI registers start at offset 0x100 */
ehci->caps = hcd->regs + 0x100;
- ehci->regs = hcd->regs + 0x100 +
- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
- dbg_hcs_params(ehci, "reset");
- dbg_hcc_params(ehci, "reset");
- /* cache this readonly data; minimize chip reads */
- ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+#ifdef CONFIG_PPC_83xx
+ /*
+ * Deal with MPC834X that need port power to be cycled after the power
+ * fault condition is removed. Otherwise the state machine does not
+ * reflect PORTSC[CSC] correctly.
+ */
+ ehci->need_oc_pp_cycle = 1;
+#endif
hcd->has_tt = 1;
- retval = ehci_halt(ehci);
+ retval = ehci_setup(hcd);
if (retval)
return retval;
- /* data structure init */
- retval = ehci_init(hcd);
- if (retval)
- return retval;
-
- ehci->sbrn = 0x20;
-
- ehci_reset(ehci);
+ if (of_device_is_compatible(dev->parent->of_node,
+ "fsl,mpc5121-usb2-dr")) {
+ /*
+ * set SBUSCFG:AHBBRST so that control msgs don't
+ * fail when doing heavy PATA writes.
+ */
+ ehci_writel(ehci, SBUSCFG_INCR8,
+ hcd->regs + FSL_SOC_USB_SBUSCFG);
+ }
retval = ehci_fsl_reinit(ehci);
return retval;
@@ -341,6 +393,151 @@ struct ehci_fsl {
#ifdef CONFIG_PM
+#ifdef CONFIG_PPC_MPC512x
+static int ehci_fsl_mpc512x_drv_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
+ u32 tmp;
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+ u32 mode = ehci_readl(ehci, hcd->regs + FSL_SOC_USB_USBMODE);
+ mode &= USBMODE_CM_MASK;
+ tmp = ehci_readl(ehci, hcd->regs + 0x140); /* usbcmd */
+
+ dev_dbg(dev, "suspend=%d already_suspended=%d "
+ "mode=%d usbcmd %08x\n", pdata->suspended,
+ pdata->already_suspended, mode, tmp);
+#endif
+
+ /*
+ * If the controller is already suspended, then this must be a
+ * PM suspend. Remember this fact, so that we will leave the
+ * controller suspended at PM resume time.
+ */
+ if (pdata->suspended) {
+ dev_dbg(dev, "already suspended, leaving early\n");
+ pdata->already_suspended = 1;
+ return 0;
+ }
+
+ dev_dbg(dev, "suspending...\n");
+
+ ehci->rh_state = EHCI_RH_SUSPENDED;
+ dev->power.power_state = PMSG_SUSPEND;
+
+ /* ignore non-host interrupts */
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ /* stop the controller */
+ tmp = ehci_readl(ehci, &ehci->regs->command);
+ tmp &= ~CMD_RUN;
+ ehci_writel(ehci, tmp, &ehci->regs->command);
+
+ /* save EHCI registers */
+ pdata->pm_command = ehci_readl(ehci, &ehci->regs->command);
+ pdata->pm_command &= ~CMD_RUN;
+ pdata->pm_status = ehci_readl(ehci, &ehci->regs->status);
+ pdata->pm_intr_enable = ehci_readl(ehci, &ehci->regs->intr_enable);
+ pdata->pm_frame_index = ehci_readl(ehci, &ehci->regs->frame_index);
+ pdata->pm_segment = ehci_readl(ehci, &ehci->regs->segment);
+ pdata->pm_frame_list = ehci_readl(ehci, &ehci->regs->frame_list);
+ pdata->pm_async_next = ehci_readl(ehci, &ehci->regs->async_next);
+ pdata->pm_configured_flag =
+ ehci_readl(ehci, &ehci->regs->configured_flag);
+ pdata->pm_portsc = ehci_readl(ehci, &ehci->regs->port_status[0]);
+ pdata->pm_usbgenctrl = ehci_readl(ehci,
+ hcd->regs + FSL_SOC_USB_USBGENCTRL);
+
+ /* clear the W1C bits */
+ pdata->pm_portsc &= cpu_to_hc32(ehci, ~PORT_RWC_BITS);
+
+ pdata->suspended = 1;
+
+ /* clear PP to cut power to the port */
+ tmp = ehci_readl(ehci, &ehci->regs->port_status[0]);
+ tmp &= ~PORT_POWER;
+ ehci_writel(ehci, tmp, &ehci->regs->port_status[0]);
+
+ return 0;
+}
+
+static int ehci_fsl_mpc512x_drv_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
+ u32 tmp;
+
+ dev_dbg(dev, "suspend=%d already_suspended=%d\n",
+ pdata->suspended, pdata->already_suspended);
+
+ /*
+ * If the controller was already suspended at suspend time,
+ * then don't resume it now.
+ */
+ if (pdata->already_suspended) {
+ dev_dbg(dev, "already suspended, leaving early\n");
+ pdata->already_suspended = 0;
+ return 0;
+ }
+
+ if (!pdata->suspended) {
+ dev_dbg(dev, "not suspended, leaving early\n");
+ return 0;
+ }
+
+ pdata->suspended = 0;
+
+ dev_dbg(dev, "resuming...\n");
+
+ /* set host mode */
+ tmp = USBMODE_CM_HOST | (pdata->es ? USBMODE_ES : 0);
+ ehci_writel(ehci, tmp, hcd->regs + FSL_SOC_USB_USBMODE);
+
+ ehci_writel(ehci, pdata->pm_usbgenctrl,
+ hcd->regs + FSL_SOC_USB_USBGENCTRL);
+ ehci_writel(ehci, ISIPHYCTRL_PXE | ISIPHYCTRL_PHYE,
+ hcd->regs + FSL_SOC_USB_ISIPHYCTRL);
+
+ ehci_writel(ehci, SBUSCFG_INCR8, hcd->regs + FSL_SOC_USB_SBUSCFG);
+
+ /* restore EHCI registers */
+ ehci_writel(ehci, pdata->pm_command, &ehci->regs->command);
+ ehci_writel(ehci, pdata->pm_intr_enable, &ehci->regs->intr_enable);
+ ehci_writel(ehci, pdata->pm_frame_index, &ehci->regs->frame_index);
+ ehci_writel(ehci, pdata->pm_segment, &ehci->regs->segment);
+ ehci_writel(ehci, pdata->pm_frame_list, &ehci->regs->frame_list);
+ ehci_writel(ehci, pdata->pm_async_next, &ehci->regs->async_next);
+ ehci_writel(ehci, pdata->pm_configured_flag,
+ &ehci->regs->configured_flag);
+ ehci_writel(ehci, pdata->pm_portsc, &ehci->regs->port_status[0]);
+
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ ehci->rh_state = EHCI_RH_RUNNING;
+ dev->power.power_state = PMSG_ON;
+
+ tmp = ehci_readl(ehci, &ehci->regs->command);
+ tmp |= CMD_RUN;
+ ehci_writel(ehci, tmp, &ehci->regs->command);
+
+ usb_hcd_resume_root_hub(hcd);
+
+ return 0;
+}
+#else
+static inline int ehci_fsl_mpc512x_drv_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static inline int ehci_fsl_mpc512x_drv_resume(struct device *dev)
+{
+ return 0;
+}
+#endif /* CONFIG_PPC_MPC512x */
+
static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
@@ -354,6 +551,11 @@ static int ehci_fsl_drv_suspend(struct device *dev)
struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
void __iomem *non_ehci = hcd->regs;
+ if (of_device_is_compatible(dev->parent->of_node,
+ "fsl,mpc5121-usb2-dr")) {
+ return ehci_fsl_mpc512x_drv_suspend(dev);
+ }
+
ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd),
device_may_wakeup(dev));
if (!fsl_deep_sleep())
@@ -370,6 +572,11 @@ static int ehci_fsl_drv_resume(struct device *dev)
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
void __iomem *non_ehci = hcd->regs;
+ if (of_device_is_compatible(dev->parent->of_node,
+ "fsl,mpc5121-usb2-dr")) {
+ return ehci_fsl_mpc512x_drv_resume(dev);
+ }
+
ehci_prepare_ports_for_controller_resume(ehci);
if (!fsl_deep_sleep())
return 0;
@@ -404,6 +611,38 @@ static struct dev_pm_ops ehci_fsl_pm_ops = {
#define EHCI_FSL_PM_OPS NULL
#endif /* CONFIG_PM */
+#ifdef CONFIG_USB_OTG
+static int ehci_start_port_reset(struct usb_hcd *hcd, unsigned port)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ u32 status;
+
+ if (!port)
+ return -EINVAL;
+
+ port--;
+
+ /* start port reset before HNP protocol time out */
+ status = readl(&ehci->regs->port_status[port]);
+ if (!(status & PORT_CONNECT))
+ return -ENODEV;
+
+ /* khubd will finish the reset later */
+ if (ehci_is_TDI(ehci)) {
+ writel(PORT_RESET |
+ (status & ~(PORT_CSC | PORT_PEC | PORT_OCC)),
+ &ehci->regs->port_status[port]);
+ } else {
+ writel(PORT_RESET, &ehci->regs->port_status[port]);
+ }
+
+ return 0;
+}
+#else
+#define ehci_start_port_reset NULL
+#endif /* CONFIG_USB_OTG */
+
+
static const struct hc_driver ehci_fsl_hc_driver = {
.description = hcd_name,
.product_desc = "Freescale On-Chip EHCI Host Controller",
@@ -413,7 +652,7 @@ static const struct hc_driver ehci_fsl_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_USB2 | HCD_MEMORY,
+ .flags = HCD_USB2 | HCD_MEMORY | HCD_BH,
/*
* basic lifecycle operations
@@ -443,6 +682,7 @@ static const struct hc_driver ehci_fsl_hc_driver = {
.hub_control = ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
+ .start_port_reset = ehci_start_port_reset,
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
@@ -475,6 +715,7 @@ static struct platform_driver ehci_fsl_driver = {
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "fsl-ehci",
+ .owner = THIS_MODULE,
.pm = EHCI_FSL_PM_OPS,
},
};
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
index 2c835379522..dbd292e9f0a 100644
--- a/drivers/usb/host/ehci-fsl.h
+++ b/drivers/usb/host/ehci-fsl.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2005-2010 Freescale Semiconductor, Inc.
+/* Copyright (C) 2005-2010,2012 Freescale Semiconductor, Inc.
* Copyright (c) 2005 MontaVista Software
*
* This program is free software; you can redistribute it and/or modify it
@@ -19,9 +19,8 @@
#define _EHCI_FSL_H
/* offsets for the non-ehci registers in the FSL SOC USB controller */
-#define FSL_SOC_USB_ID 0x0
-#define ID_MSK 0x3f
-#define NID_MSK 0x3f00
+#define FSL_SOC_USB_SBUSCFG 0x90
+#define SBUSCFG_INCR8 0x02 /* INCR8, specified */
#define FSL_SOC_USB_ULPIVP 0x170
#define FSL_SOC_USB_PORTSC1 0x184
#define PORT_PTS_MSK (3<<30)
@@ -30,6 +29,10 @@
#define PORT_PTS_SERIAL (3<<30)
#define PORT_PTS_PTW (1<<28)
#define FSL_SOC_USB_PORTSC2 0x188
+#define FSL_SOC_USB_USBMODE 0x1a8
+#define USBMODE_CM_MASK (3 << 0) /* controller mode mask */
+#define USBMODE_CM_HOST (3 << 0) /* controller mode: host */
+#define USBMODE_ES (1 << 2) /* (Big) Endian Select */
#define FSL_SOC_USB_USBGENCTRL 0x200
#define USBGENCTRL_PPP (1 << 3)
@@ -44,5 +47,19 @@
#define FSL_SOC_USB_PRICTRL 0x40c /* NOTE: big-endian */
#define FSL_SOC_USB_SICTRL 0x410 /* NOTE: big-endian */
#define FSL_SOC_USB_CTRL 0x500 /* NOTE: big-endian */
+#define CTRL_UTMI_PHY_EN (1<<9)
+#define CTRL_PHY_CLK_VALID (1 << 17)
#define SNOOP_SIZE_2GB 0x1e
+
+/* control Register Bit Masks */
+#define ULPI_INT_EN (1<<0)
+#define WU_INT_EN (1<<1)
+#define USB_CTRL_USB_EN (1<<2)
+#define LINE_STATE_FILTER__EN (1<<3)
+#define KEEP_OTG_ON (1<<4)
+#define OTG_PORT (1<<5)
+#define PLL_RESET (1<<8)
+#define UTMI_PHY_EN (1<<9)
+#define ULPI_PHY_CLK_SEL (1<<10)
+#define PHY_CLK_VALID (1<<17)
#endif /* _EHCI_FSL_H */
diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c
new file mode 100644
index 00000000000..495b6fbcbcd
--- /dev/null
+++ b/drivers/usb/host/ehci-grlib.c
@@ -0,0 +1,193 @@
+/*
+ * Driver for Aeroflex Gaisler GRLIB GRUSBHC EHCI host controller
+ *
+ * GRUSBHC is typically found on LEON/GRLIB SoCs
+ *
+ * (c) Jan Andersson <jan@gaisler.com>
+ *
+ * Based on ehci-ppc-of.c which is:
+ * (c) Valentine Barshak <vbarshak@ru.mvista.com>
+ * and in turn based on "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de>
+ * and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/signal.h>
+
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#define GRUSBHC_HCIVERSION 0x0100 /* Known value of cap. reg. HCIVERSION */
+
+static const struct hc_driver ehci_grlib_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "GRLIB GRUSBHC EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ehci_setup,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+#endif
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+
+static int ehci_hcd_grlib_probe(struct platform_device *op)
+{
+ struct device_node *dn = op->dev.of_node;
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci = NULL;
+ struct resource res;
+ u32 hc_capbase;
+ int irq;
+ int rv;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ dev_dbg(&op->dev, "initializing GRUSBHC EHCI USB Controller\n");
+
+ rv = of_address_to_resource(dn, 0, &res);
+ if (rv)
+ return rv;
+
+ /* usb_create_hcd requires dma_mask != NULL */
+ op->dev.dma_mask = &op->dev.coherent_dma_mask;
+ hcd = usb_create_hcd(&ehci_grlib_hc_driver, &op->dev,
+ "GRUSBHC EHCI USB");
+ if (!hcd)
+ return -ENOMEM;
+
+ hcd->rsrc_start = res.start;
+ hcd->rsrc_len = resource_size(&res);
+
+ irq = irq_of_parse_and_map(dn, 0);
+ if (irq == NO_IRQ) {
+ dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
+ __FILE__);
+ rv = -EBUSY;
+ goto err_irq;
+ }
+
+ hcd->regs = devm_ioremap_resource(&op->dev, &res);
+ if (IS_ERR(hcd->regs)) {
+ rv = PTR_ERR(hcd->regs);
+ goto err_ioremap;
+ }
+
+ ehci = hcd_to_ehci(hcd);
+
+ ehci->caps = hcd->regs;
+
+ /* determine endianness of this implementation */
+ hc_capbase = ehci_readl(ehci, &ehci->caps->hc_capbase);
+ if (HC_VERSION(ehci, hc_capbase) != GRUSBHC_HCIVERSION) {
+ ehci->big_endian_mmio = 1;
+ ehci->big_endian_desc = 1;
+ ehci->big_endian_capbase = 1;
+ }
+
+ rv = usb_add_hcd(hcd, irq, 0);
+ if (rv)
+ goto err_ioremap;
+
+ device_wakeup_enable(hcd->self.controller);
+ return 0;
+
+err_ioremap:
+ irq_dispose_mapping(irq);
+err_irq:
+ usb_put_hcd(hcd);
+
+ return rv;
+}
+
+
+static int ehci_hcd_grlib_remove(struct platform_device *op)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(op);
+
+ dev_dbg(&op->dev, "stopping GRLIB GRUSBHC EHCI USB Controller\n");
+
+ usb_remove_hcd(hcd);
+
+ irq_dispose_mapping(hcd->irq);
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+
+static const struct of_device_id ehci_hcd_grlib_of_match[] = {
+ {
+ .name = "GAISLER_EHCI",
+ },
+ {
+ .name = "01_026",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ehci_hcd_grlib_of_match);
+
+
+static struct platform_driver ehci_grlib_driver = {
+ .probe = ehci_hcd_grlib_probe,
+ .remove = ehci_hcd_grlib_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "grlib-ehci",
+ .owner = THIS_MODULE,
+ .of_match_table = ehci_hcd_grlib_of_match,
+ },
+};
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 502a7e6fef4..81cda09b47e 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1,4 +1,8 @@
/*
+ * Enhanced Host Controller Interface (EHCI) driver for USB.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
+ *
* Copyright (c) 2000-2004 by David Brownell
*
* This program is free software; you can redistribute it and/or modify it
@@ -26,8 +30,7 @@
#include <linux/vmalloc.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/timer.h>
-#include <linux/ktime.h>
+#include <linux/hrtimer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
@@ -36,14 +39,16 @@
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
-#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/system.h>
#include <asm/unaligned.h>
+#if defined(CONFIG_PPC_PS3)
+#include <asm/firmware.h>
+#endif
+
/*-------------------------------------------------------------------------*/
/*
@@ -66,13 +71,8 @@
static const char hcd_name [] = "ehci_hcd";
-#undef VERBOSE_DEBUG
#undef EHCI_URB_TRACE
-#ifdef DEBUG
-#define EHCI_STATS
-#endif
-
/* magic numbers that can affect system performance */
#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
@@ -87,11 +87,6 @@ static const char hcd_name [] = "ehci_hcd";
*/
#define EHCI_TUNE_FLS 1 /* (medium) 512-frame schedule */
-#define EHCI_IAA_MSECS 10 /* arbitrary */
-#define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */
-#define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */
-#define EHCI_SHRINK_FRAMES 5 /* async qh unlink delay */
-
/* Initial IRQ latency: faster than hw default */
static int log2_irq_thresh = 0; // 0 to 6
module_param (log2_irq_thresh, int, S_IRUGO);
@@ -103,64 +98,50 @@ module_param (park, uint, S_IRUGO);
MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
/* for flakey hardware, ignore overcurrent indicators */
-static int ignore_oc = 0;
+static bool ignore_oc = 0;
module_param (ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
-/* for link power management(LPM) feature */
-static unsigned int hird;
-module_param(hird, int, S_IRUGO);
-MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us\n");
-
#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
/*-------------------------------------------------------------------------*/
#include "ehci.h"
-#include "ehci-dbg.c"
+#include "pci-quirks.h"
-/*-------------------------------------------------------------------------*/
+static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
+ struct ehci_tt *tt);
-static void
-timer_action(struct ehci_hcd *ehci, enum ehci_timer_action action)
+/*
+ * The MosChip MCS9990 controller updates its microframe counter
+ * a little before the frame counter, and occasionally we will read
+ * the invalid intermediate value. Avoid problems by checking the
+ * microframe number (the low-order 3 bits); if they are 0 then
+ * re-read the register to get the correct value.
+ */
+static unsigned ehci_moschip_read_frame_index(struct ehci_hcd *ehci)
{
- /* Don't override timeouts which shrink or (later) disable
- * the async ring; just the I/O watchdog. Note that if a
- * SHRINK were pending, OFF would never be requested.
- */
- if (timer_pending(&ehci->watchdog)
- && ((BIT(TIMER_ASYNC_SHRINK) | BIT(TIMER_ASYNC_OFF))
- & ehci->actions))
- return;
+ unsigned uf;
- if (!test_and_set_bit(action, &ehci->actions)) {
- unsigned long t;
+ uf = ehci_readl(ehci, &ehci->regs->frame_index);
+ if (unlikely((uf & 7) == 0))
+ uf = ehci_readl(ehci, &ehci->regs->frame_index);
+ return uf;
+}
- switch (action) {
- case TIMER_IO_WATCHDOG:
- if (!ehci->need_io_watchdog)
- return;
- t = EHCI_IO_JIFFIES;
- break;
- case TIMER_ASYNC_OFF:
- t = EHCI_ASYNC_JIFFIES;
- break;
- /* case TIMER_ASYNC_SHRINK: */
- default:
- /* add a jiffie since we synch against the
- * 8 KHz uframe counter.
- */
- t = DIV_ROUND_UP(EHCI_SHRINK_FRAMES * HZ, 1000) + 1;
- break;
- }
- mod_timer(&ehci->watchdog, t + jiffies);
- }
+static inline unsigned ehci_read_frame_index(struct ehci_hcd *ehci)
+{
+ if (ehci->frame_index_bug)
+ return ehci_moschip_read_frame_index(ehci);
+ return ehci_readl(ehci, &ehci->regs->frame_index);
}
+#include "ehci-dbg.c"
+
/*-------------------------------------------------------------------------*/
/*
- * handshake - spin reading hc until handshake completes or fails
+ * ehci_handshake - spin reading hc until handshake completes or fails
* @ptr: address of hc register to be read
* @mask: bits to look at in result of read
* @done: value of those bits when handshake succeeds
@@ -176,8 +157,8 @@ timer_action(struct ehci_hcd *ehci, enum ehci_timer_action action)
* before driver shutdown. But it also seems to be caused by bugs in cardbus
* bridge shutdown: shutting down the bridge before the devices using it.
*/
-static int handshake (struct ehci_hcd *ehci, void __iomem *ptr,
- u32 mask, u32 done, int usec)
+int ehci_handshake(struct ehci_hcd *ehci, void __iomem *ptr,
+ u32 mask, u32 done, int usec)
{
u32 result;
@@ -193,64 +174,57 @@ static int handshake (struct ehci_hcd *ehci, void __iomem *ptr,
} while (usec > 0);
return -ETIMEDOUT;
}
+EXPORT_SYMBOL_GPL(ehci_handshake);
/* check TDI/ARC silicon is in host mode */
static int tdi_in_host_mode (struct ehci_hcd *ehci)
{
- u32 __iomem *reg_ptr;
u32 tmp;
- reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE);
- tmp = ehci_readl(ehci, reg_ptr);
+ tmp = ehci_readl(ehci, &ehci->regs->usbmode);
return (tmp & 3) == USBMODE_CM_HC;
}
-/* force HC to halt state from unknown (EHCI spec section 2.3) */
+/*
+ * Force HC to halt state from unknown (EHCI spec section 2.3).
+ * Must be called with interrupts enabled and the lock not held.
+ */
static int ehci_halt (struct ehci_hcd *ehci)
{
- u32 temp = ehci_readl(ehci, &ehci->regs->status);
+ u32 temp;
+
+ spin_lock_irq(&ehci->lock);
/* disable any irqs left enabled by previous code */
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
- if (ehci_is_TDI(ehci) && tdi_in_host_mode(ehci) == 0) {
+ if (ehci_is_TDI(ehci) && !tdi_in_host_mode(ehci)) {
+ spin_unlock_irq(&ehci->lock);
return 0;
}
- if ((temp & STS_HALT) != 0)
- return 0;
-
+ /*
+ * This routine gets called during probe before ehci->command
+ * has been initialized, so we can't rely on its value.
+ */
+ ehci->command &= ~CMD_RUN;
temp = ehci_readl(ehci, &ehci->regs->command);
- temp &= ~CMD_RUN;
+ temp &= ~(CMD_RUN | CMD_IAAD);
ehci_writel(ehci, temp, &ehci->regs->command);
- return handshake (ehci, &ehci->regs->status,
- STS_HALT, STS_HALT, 16 * 125);
-}
-static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr,
- u32 mask, u32 done, int usec)
-{
- int error;
-
- error = handshake(ehci, ptr, mask, done, usec);
- if (error) {
- ehci_halt(ehci);
- ehci_to_hcd(ehci)->state = HC_STATE_HALT;
- ehci_err(ehci, "force halt; handshake %p %08x %08x -> %d\n",
- ptr, mask, done, error);
- }
+ spin_unlock_irq(&ehci->lock);
+ synchronize_irq(ehci_to_hcd(ehci)->irq);
- return error;
+ return ehci_handshake(ehci, &ehci->regs->status,
+ STS_HALT, STS_HALT, 16 * 125);
}
/* put TDI/ARC silicon into EHCI mode */
static void tdi_reset (struct ehci_hcd *ehci)
{
- u32 __iomem *reg_ptr;
u32 tmp;
- reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE);
- tmp = ehci_readl(ehci, reg_ptr);
+ tmp = ehci_readl(ehci, &ehci->regs->usbmode);
tmp |= USBMODE_CM_HC;
/* The default byte access to MMR space is LE after
* controller reset. Set the required endian mode
@@ -258,10 +232,13 @@ static void tdi_reset (struct ehci_hcd *ehci)
*/
if (ehci_big_endian_mmio(ehci))
tmp |= USBMODE_BE;
- ehci_writel(ehci, tmp, reg_ptr);
+ ehci_writel(ehci, tmp, &ehci->regs->usbmode);
}
-/* reset a non-running (STS_HALT == 1) controller */
+/*
+ * Reset a non-running (STS_HALT == 1) controller.
+ * Must be called with interrupts enabled and the lock not held.
+ */
static int ehci_reset (struct ehci_hcd *ehci)
{
int retval;
@@ -269,22 +246,21 @@ static int ehci_reset (struct ehci_hcd *ehci)
/* If the EHCI debug controller is active, special care must be
* taken before and after a host controller reset */
- if (ehci->debug && !dbgp_reset_prep())
+ if (ehci->debug && !dbgp_reset_prep(ehci_to_hcd(ehci)))
ehci->debug = NULL;
command |= CMD_RESET;
dbg_cmd (ehci, "reset", command);
ehci_writel(ehci, command, &ehci->regs->command);
- ehci_to_hcd(ehci)->state = HC_STATE_HALT;
+ ehci->rh_state = EHCI_RH_HALTED;
ehci->next_statechange = jiffies;
- retval = handshake (ehci, &ehci->regs->command,
+ retval = ehci_handshake(ehci, &ehci->regs->command,
CMD_RESET, 0, 250 * 1000);
if (ehci->has_hostpc) {
ehci_writel(ehci, USBMODE_EX_HC | USBMODE_EX_VBPS,
- (u32 __iomem *)(((u8 *)ehci->regs) + USBMODE_EX));
- ehci_writel(ehci, TXFIFO_DEFAULT,
- (u32 __iomem *)(((u8 *)ehci->regs) + TXFILLTUNING));
+ &ehci->regs->usbmode_ex);
+ ehci_writel(ehci, TXFIFO_DEFAULT, &ehci->regs->txfill_tuning);
}
if (retval)
return retval;
@@ -293,116 +269,58 @@ static int ehci_reset (struct ehci_hcd *ehci)
tdi_reset (ehci);
if (ehci->debug)
- dbgp_external_startup();
+ dbgp_external_startup(ehci_to_hcd(ehci));
+ ehci->port_c_suspend = ehci->suspended_ports =
+ ehci->resuming_ports = 0;
return retval;
}
-/* idle the controller (from running) */
+/*
+ * Idle the controller (turn off the schedules).
+ * Must be called with interrupts enabled and the lock not held.
+ */
static void ehci_quiesce (struct ehci_hcd *ehci)
{
u32 temp;
-#ifdef DEBUG
- if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
- BUG ();
-#endif
+ if (ehci->rh_state != EHCI_RH_RUNNING)
+ return;
/* wait for any schedule enables/disables to take effect */
- temp = ehci_readl(ehci, &ehci->regs->command) << 10;
- temp &= STS_ASS | STS_PSS;
- if (handshake_on_error_set_halt(ehci, &ehci->regs->status,
- STS_ASS | STS_PSS, temp, 16 * 125))
- return;
+ temp = (ehci->command << 10) & (STS_ASS | STS_PSS);
+ ehci_handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, temp,
+ 16 * 125);
/* then disable anything that's still active */
- temp = ehci_readl(ehci, &ehci->regs->command);
- temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
- ehci_writel(ehci, temp, &ehci->regs->command);
+ spin_lock_irq(&ehci->lock);
+ ehci->command &= ~(CMD_ASE | CMD_PSE);
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+ spin_unlock_irq(&ehci->lock);
/* hardware can take 16 microframes to turn off ... */
- handshake_on_error_set_halt(ehci, &ehci->regs->status,
- STS_ASS | STS_PSS, 0, 16 * 125);
+ ehci_handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, 0,
+ 16 * 125);
}
/*-------------------------------------------------------------------------*/
static void end_unlink_async(struct ehci_hcd *ehci);
+static void unlink_empty_async(struct ehci_hcd *ehci);
+static void unlink_empty_async_suspended(struct ehci_hcd *ehci);
static void ehci_work(struct ehci_hcd *ehci);
+static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
+static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
+#include "ehci-timer.c"
#include "ehci-hub.c"
-#include "ehci-lpm.c"
#include "ehci-mem.c"
#include "ehci-q.c"
#include "ehci-sched.c"
+#include "ehci-sysfs.c"
/*-------------------------------------------------------------------------*/
-static void ehci_iaa_watchdog(unsigned long param)
-{
- struct ehci_hcd *ehci = (struct ehci_hcd *) param;
- unsigned long flags;
-
- spin_lock_irqsave (&ehci->lock, flags);
-
- /* Lost IAA irqs wedge things badly; seen first with a vt8235.
- * So we need this watchdog, but must protect it against both
- * (a) SMP races against real IAA firing and retriggering, and
- * (b) clean HC shutdown, when IAA watchdog was pending.
- */
- if (ehci->reclaim
- && !timer_pending(&ehci->iaa_watchdog)
- && HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
- u32 cmd, status;
-
- /* If we get here, IAA is *REALLY* late. It's barely
- * conceivable that the system is so busy that CMD_IAAD
- * is still legitimately set, so let's be sure it's
- * clear before we read STS_IAA. (The HC should clear
- * CMD_IAAD when it sets STS_IAA.)
- */
- cmd = ehci_readl(ehci, &ehci->regs->command);
- if (cmd & CMD_IAAD)
- ehci_writel(ehci, cmd & ~CMD_IAAD,
- &ehci->regs->command);
-
- /* If IAA is set here it either legitimately triggered
- * before we cleared IAAD above (but _way_ late, so we'll
- * still count it as lost) ... or a silicon erratum:
- * - VIA seems to set IAA without triggering the IRQ;
- * - IAAD potentially cleared without setting IAA.
- */
- status = ehci_readl(ehci, &ehci->regs->status);
- if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
- COUNT (ehci->stats.lost_iaa);
- ehci_writel(ehci, STS_IAA, &ehci->regs->status);
- }
-
- ehci_vdbg(ehci, "IAA watchdog: status %x cmd %x\n",
- status, cmd);
- end_unlink_async(ehci);
- }
-
- spin_unlock_irqrestore(&ehci->lock, flags);
-}
-
-static void ehci_watchdog(unsigned long param)
-{
- struct ehci_hcd *ehci = (struct ehci_hcd *) param;
- unsigned long flags;
-
- spin_lock_irqsave(&ehci->lock, flags);
-
- /* stop async processing after it's idled a bit */
- if (test_bit (TIMER_ASYNC_OFF, &ehci->actions))
- start_unlink_async (ehci, ehci->async);
-
- /* ehci could run by timer, without IRQs ... */
- ehci_work (ehci);
-
- spin_unlock_irqrestore (&ehci->lock, flags);
-}
-
/* On some systems, leaving remote wakeup enabled prevents system shutdown.
* The firmware seems to think that powering off is a wakeup event!
* This routine turns off remote wakeup and everything else, on all ports.
@@ -418,11 +336,14 @@ static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
/*
* Halt HC, turn off all ports, and let the BIOS use the companion controllers.
- * Should be called with ehci->lock held.
+ * Must be called with interrupts enabled and the lock not held.
*/
static void ehci_silence_controller(struct ehci_hcd *ehci)
{
ehci_halt(ehci);
+
+ spin_lock_irq(&ehci->lock);
+ ehci->rh_state = EHCI_RH_HALTED;
ehci_turn_off_all_ports(ehci);
/* make BIOS/etc use companion controller during reboot */
@@ -430,6 +351,7 @@ static void ehci_silence_controller(struct ehci_hcd *ehci)
/* unblock posted writes */
ehci_readl(ehci, &ehci->regs->configured_flag);
+ spin_unlock_irq(&ehci->lock);
}
/* ehci_shutdown kick in for silicon on any bus (not just pci, etc).
@@ -440,30 +362,15 @@ static void ehci_shutdown(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- del_timer_sync(&ehci->watchdog);
- del_timer_sync(&ehci->iaa_watchdog);
-
spin_lock_irq(&ehci->lock);
- ehci_silence_controller(ehci);
+ ehci->shutdown = true;
+ ehci->rh_state = EHCI_RH_STOPPING;
+ ehci->enabled_hrtimer_events = 0;
spin_unlock_irq(&ehci->lock);
-}
-static void ehci_port_power (struct ehci_hcd *ehci, int is_on)
-{
- unsigned port;
-
- if (!HCS_PPC (ehci->hcs_params))
- return;
+ ehci_silence_controller(ehci);
- ehci_dbg (ehci, "...power%s ports...\n", is_on ? "up" : "down");
- for (port = HCS_N_PORTS (ehci->hcs_params); port > 0; )
- (void) ehci_hub_control(ehci_to_hcd(ehci),
- is_on ? SetPortFeature : ClearPortFeature,
- USB_PORT_FEAT_POWER,
- port--, NULL, 0);
- /* Flush those writes */
- ehci_readl(ehci, &ehci->regs->command);
- msleep(20);
+ hrtimer_cancel(&ehci->hrtimer);
}
/*-------------------------------------------------------------------------*/
@@ -474,28 +381,33 @@ static void ehci_port_power (struct ehci_hcd *ehci, int is_on)
*/
static void ehci_work (struct ehci_hcd *ehci)
{
- timer_action_done (ehci, TIMER_IO_WATCHDOG);
-
/* another CPU may drop ehci->lock during a schedule scan while
* it reports urb completions. this flag guards against bogus
* attempts at re-entrant schedule scanning.
*/
- if (ehci->scanning)
+ if (ehci->scanning) {
+ ehci->need_rescan = true;
return;
- ehci->scanning = 1;
- scan_async (ehci);
- if (ehci->next_uframe != -1)
- scan_periodic (ehci);
- ehci->scanning = 0;
+ }
+ ehci->scanning = true;
+
+ rescan:
+ ehci->need_rescan = false;
+ if (ehci->async_count)
+ scan_async(ehci);
+ if (ehci->intr_count > 0)
+ scan_intr(ehci);
+ if (ehci->isoc_count > 0)
+ scan_isoc(ehci);
+ if (ehci->need_rescan)
+ goto rescan;
+ ehci->scanning = false;
/* the IO watchdog guards against hardware or driver bugs that
* misplace IRQs, and should let us run completely without IRQs.
* such lossage has been observed on both VT6202 and VT8235.
*/
- if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) &&
- (ehci->async->qh_next.ptr != NULL ||
- ehci->periodic_sched != 0))
- timer_action (ehci, TIMER_IO_WATCHDOG);
+ turn_on_io_watchdog(ehci);
}
/*
@@ -508,34 +420,27 @@ static void ehci_stop (struct usb_hcd *hcd)
ehci_dbg (ehci, "stop\n");
/* no more interrupts ... */
- del_timer_sync (&ehci->watchdog);
- del_timer_sync(&ehci->iaa_watchdog);
spin_lock_irq(&ehci->lock);
- if (HC_IS_RUNNING (hcd->state))
- ehci_quiesce (ehci);
+ ehci->enabled_hrtimer_events = 0;
+ spin_unlock_irq(&ehci->lock);
+ ehci_quiesce(ehci);
ehci_silence_controller(ehci);
ehci_reset (ehci);
- spin_unlock_irq(&ehci->lock);
- remove_companion_file(ehci);
+ hrtimer_cancel(&ehci->hrtimer);
+ remove_sysfs_files(ehci);
remove_debug_files (ehci);
/* root hub is shut down separately (first, when possible) */
spin_lock_irq (&ehci->lock);
- if (ehci->async)
- ehci_work (ehci);
+ end_free_itds(ehci);
spin_unlock_irq (&ehci->lock);
ehci_mem_cleanup (ehci);
-#ifdef EHCI_STATS
- ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
- ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
- ehci->stats.lost_iaa);
- ehci_dbg (ehci, "complete %ld unlink %ld\n",
- ehci->stats.complete, ehci->stats.unlink);
-#endif
+ if (ehci->amd_pll_fix == 1)
+ usb_amd_dev_put();
dbg_status (ehci, "ehci_stop completed",
ehci_readl(ehci, &ehci->regs->status));
@@ -556,35 +461,51 @@ static int ehci_init(struct usb_hcd *hcd)
* keep io watchdog by default, those good HCDs could turn off it later
*/
ehci->need_io_watchdog = 1;
- init_timer(&ehci->watchdog);
- ehci->watchdog.function = ehci_watchdog;
- ehci->watchdog.data = (unsigned long) ehci;
- init_timer(&ehci->iaa_watchdog);
- ehci->iaa_watchdog.function = ehci_iaa_watchdog;
- ehci->iaa_watchdog.data = (unsigned long) ehci;
+ hrtimer_init(&ehci->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ ehci->hrtimer.function = ehci_hrtimer_func;
+ ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
+
+ hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
+
+ /*
+ * by default set standard 80% (== 100 usec/uframe) max periodic
+ * bandwidth as required by USB 2.0
+ */
+ ehci->uframe_periodic_max = 100;
/*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
*/
ehci->periodic_size = DEFAULT_I_TDPS;
+ INIT_LIST_HEAD(&ehci->async_unlink);
+ INIT_LIST_HEAD(&ehci->async_idle);
+ INIT_LIST_HEAD(&ehci->intr_unlink_wait);
+ INIT_LIST_HEAD(&ehci->intr_unlink);
+ INIT_LIST_HEAD(&ehci->intr_qh_list);
INIT_LIST_HEAD(&ehci->cached_itd_list);
INIT_LIST_HEAD(&ehci->cached_sitd_list);
+ INIT_LIST_HEAD(&ehci->tt_list);
+
+ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+ /* periodic schedule size can be smaller than default */
+ switch (EHCI_TUNE_FLS) {
+ case 0: ehci->periodic_size = 1024; break;
+ case 1: ehci->periodic_size = 512; break;
+ case 2: ehci->periodic_size = 256; break;
+ default: BUG();
+ }
+ }
if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
return retval;
/* controllers may cache some of the periodic schedule ... */
- hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
- ehci->i_thresh = 2 + 8;
+ ehci->i_thresh = 0;
else // N microframes cached
ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
- ehci->reclaim = NULL;
- ehci->next_uframe = -1;
- ehci->clock_frame = -1;
-
/*
* dedicate a qh for the async ring head, since we couldn't unlink
* a 'real' qh without stopping the async schedule [4.8]. use it
@@ -596,6 +517,9 @@ static int ehci_init(struct usb_hcd *hcd)
hw = ehci->async->hw;
hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
+#if defined(CONFIG_PPC_PS3)
+ hw->hw_info1 |= cpu_to_hc32(ehci, QH_INACTIVATE);
+#endif
hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
hw->hw_qtd_next = EHCI_LIST_END(ehci);
ehci->async->qh_state = QH_STATE_LINKED;
@@ -629,23 +553,6 @@ static int ehci_init(struct usb_hcd *hcd)
/* periodic schedule size can be smaller than default */
temp &= ~(3 << 2);
temp |= (EHCI_TUNE_FLS << 2);
- switch (EHCI_TUNE_FLS) {
- case 0: ehci->periodic_size = 1024; break;
- case 1: ehci->periodic_size = 512; break;
- case 2: ehci->periodic_size = 256; break;
- default: BUG();
- }
- }
- if (HCC_LPM(hcc_params)) {
- /* support link power management EHCI 1.1 addendum */
- ehci_dbg(ehci, "support lpm\n");
- ehci->has_lpm = 1;
- if (hird > 0xf) {
- ehci_dbg(ehci, "hird %d invalid, use default 0",
- hird);
- hird = 0;
- }
- temp |= hird << 24;
}
ehci->command = temp;
@@ -659,17 +566,13 @@ static int ehci_init(struct usb_hcd *hcd)
static int ehci_run (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
- int retval;
u32 temp;
u32 hcc_params;
hcd->uses_new_polling = 1;
/* EHCI spec section 4.1 */
- if ((retval = ehci_reset(ehci)) != 0) {
- ehci_mem_cleanup(ehci);
- return retval;
- }
+
ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
@@ -718,14 +621,14 @@ static int ehci_run (struct usb_hcd *hcd)
* be started before the port switching actions could complete.
*/
down_write(&ehci_cf_port_reset_rwsem);
- hcd->state = HC_STATE_RUNNING;
+ ehci->rh_state = EHCI_RH_RUNNING;
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
msleep(5);
up_write(&ehci_cf_port_reset_rwsem);
ehci->last_periodic_enable = ktime_get_real();
- temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
+ temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
ehci_info (ehci,
"USB %x.%x started, EHCI %x.%02x%s\n",
((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
@@ -740,10 +643,40 @@ static int ehci_run (struct usb_hcd *hcd)
* since the class device isn't created that early.
*/
create_debug_files(ehci);
- create_companion_file(ehci);
+ create_sysfs_files(ehci);
+
+ return 0;
+}
+
+int ehci_setup(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+
+ ehci->regs = (void __iomem *)ehci->caps +
+ HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+ dbg_hcs_params(ehci, "reset");
+ dbg_hcc_params(ehci, "reset");
+
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+ ehci->sbrn = HCD_USB2;
+
+ /* data structure init */
+ retval = ehci_init(hcd);
+ if (retval)
+ return retval;
+
+ retval = ehci_halt(ehci);
+ if (retval)
+ return retval;
+
+ ehci_reset(ehci);
return 0;
}
+EXPORT_SYMBOL_GPL(ehci_setup);
/*-------------------------------------------------------------------------*/
@@ -752,8 +685,15 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 status, masked_status, pcd_status = 0, cmd;
int bh;
+ unsigned long flags;
- spin_lock (&ehci->lock);
+ /*
+ * For threadirqs option we use spin_lock_irqsave() variant to prevent
+ * deadlock with ehci hrtimer callback, because hrtimer callbacks run
+ * in interrupt context even when threadirqs is specified. We can go
+ * back to spin_lock() variant when hrtimer callbacks become threaded.
+ */
+ spin_lock_irqsave(&ehci->lock, flags);
status = ehci_readl(ehci, &ehci->regs->status);
@@ -763,9 +703,15 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
goto dead;
}
- masked_status = status & INTR_MASK;
- if (!masked_status) { /* irq sharing? */
- spin_unlock(&ehci->lock);
+ /*
+ * We don't use STS_FLR, but some controllers don't like it to
+ * remain on, so mask it out along with the other status bits.
+ */
+ masked_status = status & (INTR_MASK | STS_FLR);
+
+ /* Shared IRQ? */
+ if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
+ spin_unlock_irqrestore(&ehci->lock, flags);
return IRQ_NONE;
}
@@ -774,13 +720,6 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
cmd = ehci_readl(ehci, &ehci->regs->command);
bh = 0;
-#ifdef VERBOSE_DEBUG
- /* unrequested/ignored: Frame List Rollover */
- dbg_status (ehci, "irq", status);
-#endif
-
- /* INT, ERR, and IAA interrupt rates can be throttled */
-
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
if (likely ((status & STS_ERR) == 0))
@@ -792,29 +731,38 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* complete the unlinking of some qh [4.15.2.3] */
if (status & STS_IAA) {
+
+ /* Turn off the IAA watchdog */
+ ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_IAA_WATCHDOG);
+
+ /*
+ * Mild optimization: Allow another IAAD to reset the
+ * hrtimer, if one occurs before the next expiration.
+ * In theory we could always cancel the hrtimer, but
+ * tests show that about half the time it will be reset
+ * for some other event anyway.
+ */
+ if (ehci->next_hrtimer_event == EHCI_HRTIMER_IAA_WATCHDOG)
+ ++ehci->next_hrtimer_event;
+
/* guard against (alleged) silicon errata */
- if (cmd & CMD_IAAD) {
- ehci_writel(ehci, cmd & ~CMD_IAAD,
- &ehci->regs->command);
+ if (cmd & CMD_IAAD)
ehci_dbg(ehci, "IAA with IAAD still set?\n");
- }
- if (ehci->reclaim) {
- COUNT(ehci->stats.reclaim);
- end_unlink_async(ehci);
- } else
- ehci_dbg(ehci, "IAA with nothing to reclaim?\n");
+ if (ehci->iaa_in_progress)
+ COUNT(ehci->stats.iaa);
+ end_unlink_async(ehci);
}
/* remote wakeup [4.3.1] */
if (status & STS_PCD) {
unsigned i = HCS_N_PORTS (ehci->hcs_params);
- u32 ppcd = 0;
+ u32 ppcd = ~0;
/* kick root hub later */
pcd_status = status;
/* resume root hub? */
- if (!(cmd & CMD_RUN))
+ if (ehci->rh_state == EHCI_RH_SUSPENDED)
usb_hcd_resume_root_hub(hcd);
/* get per-port change detect bits */
@@ -825,7 +773,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
int pstatus;
/* leverage per-port change bits feature */
- if (ehci->has_ppcd && !(ppcd & (1 << i)))
+ if (!(ppcd & (1 << i)))
continue;
pstatus = ehci_readl(ehci,
&ehci->regs->port_status[i]);
@@ -845,7 +793,9 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
* like usb_port_resume() does.
*/
ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
+ set_bit(i, &ehci->resuming_ports);
ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
+ usb_hcd_start_port_resume(&hcd->self, i);
mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
}
}
@@ -855,19 +805,24 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
ehci_err(ehci, "fatal error\n");
dbg_cmd(ehci, "fatal", cmd);
dbg_status(ehci, "fatal", status);
- ehci_halt(ehci);
dead:
- ehci_reset(ehci);
- ehci_writel(ehci, 0, &ehci->regs->configured_flag);
- /* generic layer kills/unlinks all urbs, then
- * uses ehci_stop to clean up the rest
- */
- bh = 1;
+ usb_hc_died(hcd);
+
+ /* Don't let the controller do anything more */
+ ehci->shutdown = true;
+ ehci->rh_state = EHCI_RH_STOPPING;
+ ehci->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+ ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+ ehci_handle_controller_death(ehci);
+
+ /* Handle completions when the controller stops */
+ bh = 0;
}
if (bh)
ehci_work (ehci);
- spin_unlock (&ehci->lock);
+ spin_unlock_irqrestore(&ehci->lock, flags);
if (pcd_status)
usb_hcd_poll_rh_status(hcd);
return IRQ_HANDLED;
@@ -924,38 +879,6 @@ static int ehci_urb_enqueue (
}
}
-static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
-{
- /* failfast */
- if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state) && ehci->reclaim)
- end_unlink_async(ehci);
-
- /* If the QH isn't linked then there's nothing we can do
- * unless we were called during a giveback, in which case
- * qh_completions() has to deal with it.
- */
- if (qh->qh_state != QH_STATE_LINKED) {
- if (qh->qh_state == QH_STATE_COMPLETING)
- qh->needs_rescan = 1;
- return;
- }
-
- /* defer till later if busy */
- if (ehci->reclaim) {
- struct ehci_qh *last;
-
- for (last = ehci->reclaim;
- last->reclaim;
- last = last->reclaim)
- continue;
- qh->qh_state = QH_STATE_UNLINK_WAIT;
- last->reclaim = qh;
-
- /* start IAA cycle */
- } else
- start_unlink_async (ehci, qh);
-}
-
/* remove from hardware lists
* completions normally happen asynchronously
*/
@@ -972,17 +895,24 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
if (rc)
goto done;
- switch (usb_pipetype (urb->pipe)) {
- // case PIPE_CONTROL:
- // case PIPE_BULK:
- default:
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ /*
+ * We don't expedite dequeue for isochronous URBs.
+ * Just wait until they complete normally or their
+ * time slot expires.
+ */
+ } else {
qh = (struct ehci_qh *) urb->hcpriv;
- if (!qh)
- break;
+ qh->exception = 1;
switch (qh->qh_state) {
case QH_STATE_LINKED:
+ if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)
+ start_unlink_intr(ehci, qh);
+ else
+ start_unlink_async(ehci, qh);
+ break;
case QH_STATE_COMPLETING:
- unlink_async(ehci, qh);
+ qh->dequeue_during_giveback = 1;
break;
case QH_STATE_UNLINK:
case QH_STATE_UNLINK_WAIT:
@@ -993,33 +923,6 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
qh_completions(ehci, qh);
break;
}
- break;
-
- case PIPE_INTERRUPT:
- qh = (struct ehci_qh *) urb->hcpriv;
- if (!qh)
- break;
- switch (qh->qh_state) {
- case QH_STATE_LINKED:
- case QH_STATE_COMPLETING:
- intr_deschedule (ehci, qh);
- break;
- case QH_STATE_IDLE:
- qh_completions (ehci, qh);
- break;
- default:
- ehci_dbg (ehci, "bogus qh %p state %d\n",
- qh, qh->qh_state);
- goto done;
- }
- break;
-
- case PIPE_ISOCHRONOUS:
- // itd or sitd ...
-
- // wait till next completion, do it then.
- // completion irqs can wait up to 1024 msec,
- break;
}
done:
spin_unlock_irqrestore (&ehci->lock, flags);
@@ -1035,7 +938,7 @@ ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
unsigned long flags;
- struct ehci_qh *qh, *tmp;
+ struct ehci_qh *qh;
/* ASSERT: any requests/urbs are being unlinked */
/* ASSERT: nobody can be submitting urbs for this any more */
@@ -1050,24 +953,29 @@ rescan:
* accelerate iso completions ... so spin a while.
*/
if (qh->hw == NULL) {
- ehci_vdbg (ehci, "iso delay\n");
- goto idle_timeout;
+ struct ehci_iso_stream *stream = ep->hcpriv;
+
+ if (!list_empty(&stream->td_list))
+ goto idle_timeout;
+
+ /* BUG_ON(!list_empty(&stream->free_list)); */
+ reserve_release_iso_bandwidth(ehci, stream, -1);
+ kfree(stream);
+ goto done;
}
- if (!HC_IS_RUNNING (hcd->state))
+ qh->exception = 1;
+ if (ehci->rh_state < EHCI_RH_RUNNING)
qh->qh_state = QH_STATE_IDLE;
switch (qh->qh_state) {
case QH_STATE_LINKED:
- case QH_STATE_COMPLETING:
- for (tmp = ehci->async->qh_next.qh;
- tmp && tmp != qh;
- tmp = tmp->qh_next.qh)
- continue;
- /* periodic qh self-unlinks on empty */
- if (!tmp)
- goto nogood;
- unlink_async (ehci, qh);
+ WARN_ON(!list_empty(&qh->qtd_list));
+ if (usb_endpoint_type(&ep->desc) != USB_ENDPOINT_XFER_INT)
+ start_unlink_async(ehci, qh);
+ else
+ start_unlink_intr(ehci, qh);
/* FALL THROUGH */
+ case QH_STATE_COMPLETING: /* already in unlinking */
case QH_STATE_UNLINK: /* wait for hw to finish? */
case QH_STATE_UNLINK_WAIT:
idle_timeout:
@@ -1078,12 +986,13 @@ idle_timeout:
if (qh->clearing_tt)
goto idle_timeout;
if (list_empty (&qh->qtd_list)) {
- qh_put (qh);
+ if (qh->ps.bw_uperiod)
+ reserve_release_intr_bandwidth(ehci, qh, -1);
+ qh_destroy(ehci, qh);
break;
}
/* else FALL THROUGH */
default:
-nogood:
/* caller was supposed to have unlinked any requests;
* that's not our job. just leak this memory.
*/
@@ -1092,8 +1001,8 @@ nogood:
list_empty (&qh->qtd_list) ? "" : "(has tds)");
break;
}
+ done:
ep->hcpriv = NULL;
-done:
spin_unlock_irqrestore (&ehci->lock, flags);
}
@@ -1119,20 +1028,19 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
* the toggle bit in the QH.
*/
if (qh) {
- usb_settoggle(qh->dev, epnum, is_out, 0);
if (!list_empty(&qh->qtd_list)) {
WARN_ONCE(1, "clear_halt for a busy endpoint\n");
- } else if (qh->qh_state == QH_STATE_LINKED ||
- qh->qh_state == QH_STATE_COMPLETING) {
-
+ } else {
/* The toggle value in the QH can't be updated
* while the QH is active. Unlink it now;
* re-linking will call qh_refresh().
*/
+ usb_settoggle(qh->ps.udev, epnum, is_out, 0);
+ qh->exception = 1;
if (eptype == USB_ENDPOINT_XFER_BULK)
- unlink_async(ehci, qh);
+ start_unlink_async(ehci, qh);
else
- intr_deschedule(ehci, qh);
+ start_unlink_intr(ehci, qh);
}
}
spin_unlock_irqrestore(&ehci->lock, flags);
@@ -1141,9 +1049,195 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
static int ehci_get_frame (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
- return (ehci_readl(ehci, &ehci->regs->frame_index) >> 3) %
- ehci->periodic_size;
+ return (ehci_read_frame_index(ehci) >> 3) % ehci->periodic_size;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Device addition and removal */
+
+static void ehci_remove_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ spin_lock_irq(&ehci->lock);
+ drop_tt(udev);
+ spin_unlock_irq(&ehci->lock);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_PM
+
+/* suspend/resume, section 4.3 */
+
+/* These routines handle the generic parts of controller suspend/resume */
+
+int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ if (time_before(jiffies, ehci->next_statechange))
+ msleep(10);
+
+ /*
+ * Root hub was already suspended. Disable IRQ emission and
+ * mark HW unaccessible. The PM and USB cores make sure that
+ * the root hub is either suspended or stopped.
+ */
+ ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
+
+ spin_lock_irq(&ehci->lock);
+ ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+ (void) ehci_readl(ehci, &ehci->regs->intr_enable);
+
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ spin_unlock_irq(&ehci->lock);
+
+ synchronize_irq(hcd->irq);
+
+ /* Check for race with a wakeup request */
+ if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
+ ehci_resume(hcd, false);
+ return -EBUSY;
+ }
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(ehci_suspend);
+
+/* Returns 0 if power was preserved, 1 if power was lost */
+int ehci_resume(struct usb_hcd *hcd, bool hibernated)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ if (time_before(jiffies, ehci->next_statechange))
+ msleep(100);
+
+ /* Mark hardware accessible again as we are back to full power by now */
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ if (ehci->shutdown)
+ return 0; /* Controller is dead */
+
+ /*
+ * If CF is still set and we aren't resuming from hibernation
+ * then we maintained suspend power.
+ * Just undo the effect of ehci_suspend().
+ */
+ if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF &&
+ !hibernated) {
+ int mask = INTR_MASK;
+
+ ehci_prepare_ports_for_controller_resume(ehci);
+
+ spin_lock_irq(&ehci->lock);
+ if (ehci->shutdown)
+ goto skip;
+
+ if (!hcd->self.root_hub->do_remote_wakeup)
+ mask &= ~STS_PCD;
+ ehci_writel(ehci, mask, &ehci->regs->intr_enable);
+ ehci_readl(ehci, &ehci->regs->intr_enable);
+ skip:
+ spin_unlock_irq(&ehci->lock);
+ return 0;
+ }
+
+ /*
+ * Else reset, to cope with power loss or resume from hibernation
+ * having let the firmware kick in during reboot.
+ */
+ usb_root_hub_lost_power(hcd->self.root_hub);
+ (void) ehci_halt(ehci);
+ (void) ehci_reset(ehci);
+
+ spin_lock_irq(&ehci->lock);
+ if (ehci->shutdown)
+ goto skip;
+
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+ ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+ ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
+
+ ehci->rh_state = EHCI_RH_SUSPENDED;
+ spin_unlock_irq(&ehci->lock);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(ehci_resume);
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Generic structure: This gets copied for platform drivers so that
+ * individual entries can be overridden as needed.
+ */
+
+static const struct hc_driver ehci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "EHCI Host Controller",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ehci_setup,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ /*
+ * device support
+ */
+ .free_dev = ehci_remove_device,
+};
+
+void ehci_init_driver(struct hc_driver *drv,
+ const struct ehci_driver_overrides *over)
+{
+ /* Copy the generic table to drv and then apply the overrides */
+ *drv = ehci_hc_driver;
+
+ if (over) {
+ drv->hcd_priv_size += over->extra_priv_size;
+ if (over->reset)
+ drv->reset = over->reset;
+ }
+}
+EXPORT_SYMBOL_GPL(ehci_init_driver);
/*-------------------------------------------------------------------------*/
@@ -1151,29 +1245,14 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR (DRIVER_AUTHOR);
MODULE_LICENSE ("GPL");
-#ifdef CONFIG_PCI
-#include "ehci-pci.c"
-#define PCI_DRIVER ehci_pci_driver
-#endif
-
#ifdef CONFIG_USB_EHCI_FSL
#include "ehci-fsl.c"
#define PLATFORM_DRIVER ehci_fsl_driver
#endif
-#ifdef CONFIG_USB_EHCI_MXC
-#include "ehci-mxc.c"
-#define PLATFORM_DRIVER ehci_mxc_driver
-#endif
-
-#ifdef CONFIG_SOC_AU1200
-#include "ehci-au1xxx.c"
-#define PLATFORM_DRIVER ehci_hcd_au1xxx_driver
-#endif
-
-#ifdef CONFIG_ARCH_OMAP3
-#include "ehci-omap.c"
-#define PLATFORM_DRIVER ehci_hcd_omap_driver
+#ifdef CONFIG_USB_EHCI_SH
+#include "ehci-sh.c"
+#define PLATFORM_DRIVER ehci_hcd_sh_driver
#endif
#ifdef CONFIG_PPC_PS3
@@ -1191,35 +1270,34 @@ MODULE_LICENSE ("GPL");
#define XILINX_OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver
#endif
-#ifdef CONFIG_PLAT_ORION
-#include "ehci-orion.c"
-#define PLATFORM_DRIVER ehci_orion_driver
+#ifdef CONFIG_USB_OCTEON_EHCI
+#include "ehci-octeon.c"
+#define PLATFORM_DRIVER ehci_octeon_driver
#endif
-#ifdef CONFIG_ARCH_IXP4XX
-#include "ehci-ixp4xx.c"
-#define PLATFORM_DRIVER ixp4xx_ehci_driver
+#ifdef CONFIG_TILE_USB
+#include "ehci-tilegx.c"
+#define PLATFORM_DRIVER ehci_hcd_tilegx_driver
#endif
-#ifdef CONFIG_USB_W90X900_EHCI
-#include "ehci-w90x900.c"
-#define PLATFORM_DRIVER ehci_hcd_w90x900_driver
+#ifdef CONFIG_USB_EHCI_HCD_PMC_MSP
+#include "ehci-pmcmsp.c"
+#define PLATFORM_DRIVER ehci_hcd_msp_driver
#endif
-#ifdef CONFIG_ARCH_AT91
-#include "ehci-atmel.c"
-#define PLATFORM_DRIVER ehci_atmel_driver
+#ifdef CONFIG_SPARC_LEON
+#include "ehci-grlib.c"
+#define PLATFORM_DRIVER ehci_grlib_driver
#endif
-#ifdef CONFIG_USB_OCTEON_EHCI
-#include "ehci-octeon.c"
-#define PLATFORM_DRIVER ehci_octeon_driver
+#ifdef CONFIG_USB_EHCI_MV
+#include "ehci-mv.c"
+#define PLATFORM_DRIVER ehci_mv_driver
#endif
-#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
- !defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \
- !defined(XILINX_OF_PLATFORM_DRIVER)
-#error "missing bus glue for ehci-hcd"
+#ifdef CONFIG_MIPS_SEAD3
+#include "ehci-sead3.c"
+#define PLATFORM_DRIVER ehci_hcd_sead3_driver
#endif
static int __init ehci_hcd_init(void)
@@ -1241,7 +1319,7 @@ static int __init ehci_hcd_init(void)
sizeof(struct ehci_qh), sizeof(struct ehci_qtd),
sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
-#ifdef DEBUG
+#ifdef CONFIG_DYNAMIC_DEBUG
ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
if (!ehci_debug_root) {
retval = -ENOENT;
@@ -1255,12 +1333,6 @@ static int __init ehci_hcd_init(void)
goto clean0;
#endif
-#ifdef PCI_DRIVER
- retval = pci_register_driver(&PCI_DRIVER);
- if (retval < 0)
- goto clean1;
-#endif
-
#ifdef PS3_SYSTEM_BUS_DRIVER
retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
if (retval < 0)
@@ -1268,39 +1340,35 @@ static int __init ehci_hcd_init(void)
#endif
#ifdef OF_PLATFORM_DRIVER
- retval = of_register_platform_driver(&OF_PLATFORM_DRIVER);
+ retval = platform_driver_register(&OF_PLATFORM_DRIVER);
if (retval < 0)
goto clean3;
#endif
#ifdef XILINX_OF_PLATFORM_DRIVER
- retval = of_register_platform_driver(&XILINX_OF_PLATFORM_DRIVER);
+ retval = platform_driver_register(&XILINX_OF_PLATFORM_DRIVER);
if (retval < 0)
goto clean4;
#endif
return retval;
#ifdef XILINX_OF_PLATFORM_DRIVER
- /* of_unregister_platform_driver(&XILINX_OF_PLATFORM_DRIVER); */
+ /* platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER); */
clean4:
#endif
#ifdef OF_PLATFORM_DRIVER
- of_unregister_platform_driver(&OF_PLATFORM_DRIVER);
+ platform_driver_unregister(&OF_PLATFORM_DRIVER);
clean3:
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
clean2:
#endif
-#ifdef PCI_DRIVER
- pci_unregister_driver(&PCI_DRIVER);
-clean1:
-#endif
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
clean0:
#endif
-#ifdef DEBUG
+#ifdef CONFIG_DYNAMIC_DEBUG
debugfs_remove(ehci_debug_root);
ehci_debug_root = NULL;
err_debug:
@@ -1313,24 +1381,20 @@ module_init(ehci_hcd_init);
static void __exit ehci_hcd_cleanup(void)
{
#ifdef XILINX_OF_PLATFORM_DRIVER
- of_unregister_platform_driver(&XILINX_OF_PLATFORM_DRIVER);
+ platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER);
#endif
#ifdef OF_PLATFORM_DRIVER
- of_unregister_platform_driver(&OF_PLATFORM_DRIVER);
+ platform_driver_unregister(&OF_PLATFORM_DRIVER);
#endif
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
#endif
-#ifdef PCI_DRIVER
- pci_unregister_driver(&PCI_DRIVER);
-#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
#endif
-#ifdef DEBUG
+#ifdef CONFIG_DYNAMIC_DEBUG
debugfs_remove(ehci_debug_root);
#endif
clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
}
module_exit(ehci_hcd_cleanup);
-
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 796ea0c8900..cc305c71ac3 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -27,19 +27,17 @@
*/
/*-------------------------------------------------------------------------*/
+#include <linux/usb/otg.h>
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
#ifdef CONFIG_PM
-static int ehci_hub_control(
- struct usb_hcd *hcd,
- u16 typeReq,
- u16 wValue,
- u16 wIndex,
- char *buf,
- u16 wLength
-);
+static int persist_enabled_on_companion(struct usb_device *udev, void *unused)
+{
+ return !udev->maxchild && udev->persist_enabled &&
+ udev->bus->root_hub->speed < USB_SPEED_HIGH;
+}
/* After a power loss, ports that were owned by the companion must be
* reset so that the companion can still own them.
@@ -55,9 +53,33 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
if (!ehci->owned_ports)
return;
+ /*
+ * USB 1.1 devices are mostly HIDs, which don't need to persist across
+ * suspends. If we ensure that none of our companion's devices have
+ * persist_enabled (by looking through all USB 1.1 buses in the system),
+ * we can skip this and avoid slowing resume down. Devices without
+ * persist will just get reenumerated shortly after resume anyway.
+ */
+ if (!usb_for_each_dev(NULL, persist_enabled_on_companion))
+ return;
+
+ /* Make sure the ports are powered */
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ if (test_bit(port, &ehci->owned_ports)) {
+ reg = &ehci->regs->port_status[port];
+ status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
+ if (!(status & PORT_POWER)) {
+ status |= PORT_POWER;
+ ehci_writel(ehci, status, reg);
+ }
+ }
+ }
+
/* Give the connections some time to appear */
msleep(20);
+ spin_lock_irq(&ehci->lock);
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
if (test_bit(port, &ehci->owned_ports)) {
@@ -69,23 +91,30 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
clear_bit(port, &ehci->owned_ports);
else if (test_bit(port, &ehci->companion_ports))
ehci_writel(ehci, status & ~PORT_PE, reg);
- else
+ else {
+ spin_unlock_irq(&ehci->lock);
ehci_hub_control(hcd, SetPortFeature,
USB_PORT_FEAT_RESET, port + 1,
NULL, 0);
+ spin_lock_irq(&ehci->lock);
+ }
}
}
+ spin_unlock_irq(&ehci->lock);
if (!ehci->owned_ports)
return;
msleep(90); /* Wait for resets to complete */
+ spin_lock_irq(&ehci->lock);
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
if (test_bit(port, &ehci->owned_ports)) {
+ spin_unlock_irq(&ehci->lock);
ehci_hub_control(hcd, GetPortStatus,
0, port + 1,
(char *) &buf, sizeof(buf));
+ spin_lock_irq(&ehci->lock);
/* The companion should now own the port,
* but if something went wrong the port must not
@@ -104,6 +133,28 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
}
ehci->owned_ports = 0;
+ spin_unlock_irq(&ehci->lock);
+}
+
+static int ehci_port_change(struct ehci_hcd *ehci)
+{
+ int i = HCS_N_PORTS(ehci->hcs_params);
+
+ /* First check if the controller indicates a change event */
+
+ if (ehci_readl(ehci, &ehci->regs->status) & STS_PCD)
+ return 1;
+
+ /*
+ * Not all controllers appear to update this while going from D3 to D0,
+ * so check the individual port status registers as well
+ */
+
+ while (i--)
+ if (ehci_readl(ehci, &ehci->regs->port_status[i]) & PORT_CSC)
+ return 1;
+
+ return 0;
}
static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
@@ -120,18 +171,20 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup)
return;
+ spin_lock_irq(&ehci->lock);
+
/* clear phy low-power mode before changing wakeup flags */
- if (ehci->has_hostpc) {
+ if (ehci->has_tdi_phy_lpm) {
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
- u32 __iomem *hostpc_reg;
+ u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
- hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
- + HOSTPC0 + 4 * port);
temp = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
}
+ spin_unlock_irq(&ehci->lock);
msleep(5);
+ spin_lock_irq(&ehci->lock);
}
port = HCS_N_PORTS(ehci->hcs_params);
@@ -149,27 +202,25 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
else
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
}
- ehci_vdbg(ehci, "port %d, %08x -> %08x\n",
- port + 1, t1, t2);
ehci_writel(ehci, t2, reg);
}
/* enter phy low-power mode again */
- if (ehci->has_hostpc) {
+ if (ehci->has_tdi_phy_lpm) {
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
- u32 __iomem *hostpc_reg;
+ u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
- hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
- + HOSTPC0 + 4 * port);
temp = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg);
}
}
/* Does the root hub have a port wakeup pending? */
- if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD))
+ if (!suspending && ehci_port_change(ehci))
usb_hcd_resume_root_hub(ehci_to_hcd(ehci));
+
+ spin_unlock_irq(&ehci->lock);
}
static int ehci_bus_suspend (struct usb_hcd *hcd)
@@ -178,15 +229,19 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
int port;
int mask;
int changed;
+ bool fs_idle_delay;
ehci_dbg(ehci, "suspend root hub\n");
if (time_before (jiffies, ehci->next_statechange))
msleep(5);
- del_timer_sync(&ehci->watchdog);
- del_timer_sync(&ehci->iaa_watchdog);
+
+ /* stop the schedules */
+ ehci_quiesce(ehci);
spin_lock_irq (&ehci->lock);
+ if (ehci->rh_state < EHCI_RH_RUNNING)
+ goto done;
/* Once the controller is stopped, port resumes that are already
* in progress won't complete. Hence if remote wakeup is enabled
@@ -194,26 +249,13 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
* remote wakeup, we must fail the suspend.
*/
if (hcd->self.root_hub->do_remote_wakeup) {
- port = HCS_N_PORTS(ehci->hcs_params);
- while (port--) {
- if (ehci->reset_done[port] != 0) {
- spin_unlock_irq(&ehci->lock);
- ehci_dbg(ehci, "suspend failed because "
- "port %d is resuming\n",
- port + 1);
- return -EBUSY;
- }
+ if (ehci->resuming_ports) {
+ spin_unlock_irq(&ehci->lock);
+ ehci_dbg(ehci, "suspend failed because a port is resuming\n");
+ return -EBUSY;
}
}
- /* stop schedules, clean any completed work */
- if (HC_IS_RUNNING(hcd->state)) {
- ehci_quiesce (ehci);
- hcd->state = HC_STATE_QUIESCING;
- }
- ehci->command = ehci_readl(ehci, &ehci->regs->command);
- ehci_work(ehci);
-
/* Unlike other USB host controller types, EHCI doesn't have
* any notion of "global" or bus-wide suspend. The driver has
* to manually suspend all the active unsuspended ports, and
@@ -222,6 +264,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
ehci->bus_suspended = 0;
ehci->owned_ports = 0;
changed = 0;
+ fs_idle_delay = false;
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *reg = &ehci->regs->port_status [port];
@@ -250,25 +293,37 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
}
if (t1 != t2) {
- ehci_vdbg (ehci, "port %d, %08x -> %08x\n",
- port + 1, t1, t2);
+ /*
+ * On some controllers, Wake-On-Disconnect will
+ * generate false wakeup signals until the bus
+ * switches over to full-speed idle. For their
+ * sake, add a delay if we need one.
+ */
+ if ((t2 & PORT_WKDISC_E) &&
+ ehci_port_speed(ehci, t2) ==
+ USB_PORT_STAT_HIGH_SPEED)
+ fs_idle_delay = true;
ehci_writel(ehci, t2, reg);
changed = 1;
}
}
+ spin_unlock_irq(&ehci->lock);
- if (changed && ehci->has_hostpc) {
- spin_unlock_irq(&ehci->lock);
- msleep(5); /* 5 ms for HCD to enter low-power mode */
- spin_lock_irq(&ehci->lock);
+ if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) {
+ /*
+ * Wait for HCD to enter low-power mode or for the bus
+ * to switch to full-speed idle.
+ */
+ usleep_range(5000, 5500);
+ }
+ if (changed && ehci->has_tdi_phy_lpm) {
+ spin_lock_irq(&ehci->lock);
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
- u32 __iomem *hostpc_reg;
+ u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
u32 t3;
- hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
- + HOSTPC0 + 4 * port);
t3 = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
t3 = ehci_readl(ehci, hostpc_reg);
@@ -276,6 +331,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
port, (t3 & HOSTPC_PHCD) ?
"succeeded" : "failed");
}
+ spin_unlock_irq(&ehci->lock);
}
/* Apparently some devices need a >= 1-uframe delay here */
@@ -284,10 +340,19 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
/* turn off now-idle HC */
ehci_halt (ehci);
- hcd->state = HC_STATE_SUSPENDED;
- if (ehci->reclaim)
- end_unlink_async(ehci);
+ spin_lock_irq(&ehci->lock);
+ if (ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_POLL_DEAD))
+ ehci_handle_controller_death(ehci);
+ if (ehci->rh_state != EHCI_RH_RUNNING)
+ goto done;
+ ehci->rh_state = EHCI_RH_SUSPENDED;
+
+ end_unlink_async(ehci);
+ unlink_empty_async_suspended(ehci);
+ ehci_handle_start_intr_unlinks(ehci);
+ ehci_handle_intr_unlinks(ehci);
+ end_free_itds(ehci);
/* allow remote wakeup */
mask = INTR_MASK;
@@ -296,13 +361,13 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
ehci_writel(ehci, mask, &ehci->regs->intr_enable);
ehci_readl(ehci, &ehci->regs->intr_enable);
+ done:
ehci->next_statechange = jiffies + msecs_to_jiffies(10);
+ ehci->enabled_hrtimer_events = 0;
+ ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
spin_unlock_irq (&ehci->lock);
- /* ehci_work() may have re-enabled the watchdog timer, which we do not
- * want, and so we must delete any pending watchdog timer events.
- */
- del_timer_sync(&ehci->watchdog);
+ hrtimer_cancel(&ehci->hrtimer);
return 0;
}
@@ -314,21 +379,19 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
u32 temp;
u32 power_okay;
int i;
- u8 resume_needed = 0;
+ unsigned long resume_needed = 0;
if (time_before (jiffies, ehci->next_statechange))
msleep(5);
spin_lock_irq (&ehci->lock);
- if (!HCD_HW_ACCESSIBLE(hcd)) {
- spin_unlock_irq(&ehci->lock);
- return -ESHUTDOWN;
- }
+ if (!HCD_HW_ACCESSIBLE(hcd) || ehci->shutdown)
+ goto shutdown;
if (unlikely(ehci->debug)) {
- if (!dbgp_reset_prep())
+ if (!dbgp_reset_prep(hcd))
ehci->debug = NULL;
else
- dbgp_external_startup();
+ dbgp_external_startup(hcd);
}
/* Ideally and we've got a real resume here, and no port's power
@@ -352,23 +415,39 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next);
/* restore CMD_RUN, framelist size, and irq threshold */
+ ehci->command |= CMD_RUN;
ehci_writel(ehci, ehci->command, &ehci->regs->command);
+ ehci->rh_state = EHCI_RH_RUNNING;
- /* Some controller/firmware combinations need a delay during which
- * they set up the port statuses. See Bugzilla #8190. */
- spin_unlock_irq(&ehci->lock);
- msleep(8);
- spin_lock_irq(&ehci->lock);
+ /*
+ * According to Bugzilla #8190, the port status for some controllers
+ * will be wrong without a delay. At their wrong status, the port
+ * is enabled, but not suspended neither resumed.
+ */
+ i = HCS_N_PORTS(ehci->hcs_params);
+ while (i--) {
+ temp = ehci_readl(ehci, &ehci->regs->port_status[i]);
+ if ((temp & PORT_PE) &&
+ !(temp & (PORT_SUSPEND | PORT_RESUME))) {
+ ehci_dbg(ehci, "Port status(0x%x) is wrong\n", temp);
+ spin_unlock_irq(&ehci->lock);
+ msleep(8);
+ spin_lock_irq(&ehci->lock);
+ break;
+ }
+ }
+
+ if (ehci->shutdown)
+ goto shutdown;
/* clear phy low-power mode before resume */
- if (ehci->bus_suspended && ehci->has_hostpc) {
+ if (ehci->bus_suspended && ehci->has_tdi_phy_lpm) {
i = HCS_N_PORTS(ehci->hcs_params);
while (i--) {
if (test_bit(i, &ehci->bus_suspended)) {
- u32 __iomem *hostpc_reg;
+ u32 __iomem *hostpc_reg =
+ &ehci->regs->hostpc[i];
- hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
- + HOSTPC0 + 4 * i);
temp = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, temp & ~HOSTPC_PHCD,
hostpc_reg);
@@ -377,6 +456,8 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
spin_unlock_irq(&ehci->lock);
msleep(5);
spin_lock_irq(&ehci->lock);
+ if (ehci->shutdown)
+ goto shutdown;
}
/* manually resume the ports we suspended during bus_suspend() */
@@ -387,7 +468,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
if (test_bit(i, &ehci->bus_suspended) &&
(temp & PORT_SUSPEND)) {
temp |= PORT_RESUME;
- resume_needed = 1;
+ set_bit(i, &resume_needed);
}
ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
}
@@ -397,40 +478,37 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
spin_unlock_irq(&ehci->lock);
msleep(20);
spin_lock_irq(&ehci->lock);
+ if (ehci->shutdown)
+ goto shutdown;
}
i = HCS_N_PORTS (ehci->hcs_params);
while (i--) {
temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
- if (test_bit(i, &ehci->bus_suspended) &&
- (temp & PORT_SUSPEND)) {
- temp &= ~(PORT_RWC_BITS | PORT_RESUME);
+ if (test_bit(i, &resume_needed)) {
+ temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME);
ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
- ehci_vdbg (ehci, "resumed port %d\n", i + 1);
}
}
- (void) ehci_readl(ehci, &ehci->regs->command);
-
- /* maybe re-activate the schedule(s) */
- temp = 0;
- if (ehci->async->qh_next.qh)
- temp |= CMD_ASE;
- if (ehci->periodic_sched)
- temp |= CMD_PSE;
- if (temp) {
- ehci->command |= temp;
- ehci_writel(ehci, ehci->command, &ehci->regs->command);
- }
ehci->next_statechange = jiffies + msecs_to_jiffies(5);
- hcd->state = HC_STATE_RUNNING;
+ spin_unlock_irq(&ehci->lock);
+
+ ehci_handover_companion_ports(ehci);
/* Now we can safely re-enable irqs */
+ spin_lock_irq(&ehci->lock);
+ if (ehci->shutdown)
+ goto shutdown;
ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
+ (void) ehci_readl(ehci, &ehci->regs->intr_enable);
+ spin_unlock_irq(&ehci->lock);
- spin_unlock_irq (&ehci->lock);
- ehci_handover_companion_ports(ehci);
return 0;
+
+ shutdown:
+ spin_unlock_irq(&ehci->lock);
+ return -ESHUTDOWN;
}
#else
@@ -442,29 +520,6 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
-/* Display the ports dedicated to the companion controller */
-static ssize_t show_companion(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ehci_hcd *ehci;
- int nports, index, n;
- int count = PAGE_SIZE;
- char *ptr = buf;
-
- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
- nports = HCS_N_PORTS(ehci->hcs_params);
-
- for (index = 0; index < nports; ++index) {
- if (test_bit(index, &ehci->companion_ports)) {
- n = scnprintf(ptr, count, "%d\n", index + 1);
- ptr += n;
- count -= n;
- }
- }
- return ptr - buf;
-}
-
/*
* Sets the owner of a port
*/
@@ -499,57 +554,6 @@ static void set_owner(struct ehci_hcd *ehci, int portnum, int new_owner)
}
}
-/*
- * Dedicate or undedicate a port to the companion controller.
- * Syntax is "[-]portnum", where a leading '-' sign means
- * return control of the port to the EHCI controller.
- */
-static ssize_t store_companion(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct ehci_hcd *ehci;
- int portnum, new_owner;
-
- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
- new_owner = PORT_OWNER; /* Owned by companion */
- if (sscanf(buf, "%d", &portnum) != 1)
- return -EINVAL;
- if (portnum < 0) {
- portnum = - portnum;
- new_owner = 0; /* Owned by EHCI */
- }
- if (portnum <= 0 || portnum > HCS_N_PORTS(ehci->hcs_params))
- return -ENOENT;
- portnum--;
- if (new_owner)
- set_bit(portnum, &ehci->companion_ports);
- else
- clear_bit(portnum, &ehci->companion_ports);
- set_owner(ehci, portnum, new_owner);
- return count;
-}
-static DEVICE_ATTR(companion, 0644, show_companion, store_companion);
-
-static inline void create_companion_file(struct ehci_hcd *ehci)
-{
- int i;
-
- /* with integrated TT there is no companion! */
- if (!ehci_is_TDI(ehci))
- i = device_create_file(ehci_to_hcd(ehci)->self.controller,
- &dev_attr_companion);
-}
-
-static inline void remove_companion_file(struct ehci_hcd *ehci)
-{
- /* with integrated TT there is no companion! */
- if (!ehci_is_TDI(ehci))
- device_remove_file(ehci_to_hcd(ehci)->self.controller,
- &dev_attr_companion);
-}
-
-
/*-------------------------------------------------------------------------*/
static int check_reset_complete (
@@ -584,7 +588,8 @@ static int check_reset_complete (
if (ehci->has_amcc_usb23)
set_ohci_hcfs(ehci, 1);
} else {
- ehci_dbg (ehci, "port %d high speed\n", index + 1);
+ ehci_dbg(ehci, "port %d reset complete, port enabled\n",
+ index + 1);
/* ensure 440EPx ohci controller state is suspended */
if (ehci->has_amcc_usb23)
set_ohci_hcfs(ehci, 0);
@@ -602,15 +607,11 @@ static int
ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
- u32 temp, status = 0;
+ u32 temp, status;
u32 mask;
int ports, i, retval = 1;
unsigned long flags;
- u32 ppcd = 0;
-
- /* if !USB_SUSPEND, root hub timers won't get shut down ... */
- if (!HC_IS_RUNNING(hcd->state))
- return 0;
+ u32 ppcd = ~0;
/* init status to no-changes */
buf [0] = 0;
@@ -620,6 +621,11 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
retval++;
}
+ /* Inform the core about resumes-in-progress by returning
+ * a non-zero value even if there are no status changes.
+ */
+ status = ehci->resuming_ports;
+
/* Some boards (mostly VIA?) report bogus overcurrent indications,
* causing massive log spam unless we completely ignore them. It
* may be relevant that VIA VT8235 controllers, where PORT_POWER is
@@ -643,9 +649,10 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
for (i = 0; i < ports; i++) {
/* leverage per-port change bits feature */
- if (ehci->has_ppcd && !(ppcd & (1 << i)))
- continue;
- temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
+ if (ppcd & (1 << i))
+ temp = ehci_readl(ehci, &ehci->regs->port_status[i]);
+ else
+ temp = 0;
/*
* Return status information even for ports with OWNER set.
@@ -664,7 +671,11 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
status = STS_PCD;
}
}
- /* FIXME autosuspend idle root hubs */
+
+ /* If a resume is in progress, make sure it can finish */
+ if (ehci->resuming_ports)
+ mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25));
+
spin_unlock_irqrestore (&ehci->lock, flags);
return status ? retval : 0;
}
@@ -688,8 +699,8 @@ ehci_hub_descriptor (
desc->bDescLength = 7 + 2 * temp;
/* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
- memset (&desc->bitmap [0], 0, temp);
- memset (&desc->bitmap [temp], 0xff, temp);
+ memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+ memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
temp = 0x0008; /* per-port overcurrent reporting */
if (HCS_PPC (ehci->hcs_params))
@@ -705,8 +716,147 @@ ehci_hub_descriptor (
}
/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_USB_HCD_TEST_MODE
+
+#define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06
-static int ehci_hub_control (
+static void usb_ehset_completion(struct urb *urb)
+{
+ struct completion *done = urb->context;
+
+ complete(done);
+}
+static int submit_single_step_set_feature(
+ struct usb_hcd *hcd,
+ struct urb *urb,
+ int is_setup
+);
+
+/*
+ * Allocate and initialize a control URB. This request will be used by the
+ * EHSET SINGLE_STEP_SET_FEATURE test in which the DATA and STATUS stages
+ * of the GetDescriptor request are sent 15 seconds after the SETUP stage.
+ * Return NULL if failed.
+ */
+static struct urb *request_single_step_set_feature_urb(
+ struct usb_device *udev,
+ void *dr,
+ void *buf,
+ struct completion *done
+) {
+ struct urb *urb;
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ struct usb_host_endpoint *ep;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return NULL;
+
+ urb->pipe = usb_rcvctrlpipe(udev, 0);
+ ep = (usb_pipein(urb->pipe) ? udev->ep_in : udev->ep_out)
+ [usb_pipeendpoint(urb->pipe)];
+ if (!ep) {
+ usb_free_urb(urb);
+ return NULL;
+ }
+
+ urb->ep = ep;
+ urb->dev = udev;
+ urb->setup_packet = (void *)dr;
+ urb->transfer_buffer = buf;
+ urb->transfer_buffer_length = USB_DT_DEVICE_SIZE;
+ urb->complete = usb_ehset_completion;
+ urb->status = -EINPROGRESS;
+ urb->actual_length = 0;
+ urb->transfer_flags = URB_DIR_IN;
+ usb_get_urb(urb);
+ atomic_inc(&urb->use_count);
+ atomic_inc(&urb->dev->urbnum);
+ urb->setup_dma = dma_map_single(
+ hcd->self.controller,
+ urb->setup_packet,
+ sizeof(struct usb_ctrlrequest),
+ DMA_TO_DEVICE);
+ urb->transfer_dma = dma_map_single(
+ hcd->self.controller,
+ urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ DMA_FROM_DEVICE);
+ urb->context = done;
+ return urb;
+}
+
+static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
+{
+ int retval = -ENOMEM;
+ struct usb_ctrlrequest *dr;
+ struct urb *urb;
+ struct usb_device *udev;
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct usb_device_descriptor *buf;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ /* Obtain udev of the rhub's child port */
+ udev = usb_hub_find_child(hcd->self.root_hub, port);
+ if (!udev) {
+ ehci_err(ehci, "No device attached to the RootHub\n");
+ return -ENODEV;
+ }
+ buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+ if (!dr) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ /* Fill Setup packet for GetDescriptor */
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16(USB_DT_DEVICE << 8);
+ dr->wIndex = 0;
+ dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
+ urb = request_single_step_set_feature_urb(udev, dr, buf, &done);
+ if (!urb)
+ goto cleanup;
+
+ /* Submit just the SETUP stage */
+ retval = submit_single_step_set_feature(hcd, urb, 1);
+ if (retval)
+ goto out1;
+ if (!wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) {
+ usb_kill_urb(urb);
+ retval = -ETIMEDOUT;
+ ehci_err(ehci, "%s SETUP stage timed out on ep0\n", __func__);
+ goto out1;
+ }
+ msleep(15 * 1000);
+
+ /* Complete remaining DATA and STATUS stages using the same URB */
+ urb->status = -EINPROGRESS;
+ usb_get_urb(urb);
+ atomic_inc(&urb->use_count);
+ atomic_inc(&urb->dev->urbnum);
+ retval = submit_single_step_set_feature(hcd, urb, 0);
+ if (!retval && !wait_for_completion_timeout(&done,
+ msecs_to_jiffies(2000))) {
+ usb_kill_urb(urb);
+ retval = -ETIMEDOUT;
+ ehci_err(ehci, "%s IN stage timed out on ep0\n", __func__);
+ }
+out1:
+ usb_free_urb(urb);
+cleanup:
+ kfree(dr);
+ kfree(buf);
+ return retval;
+}
+#endif /* CONFIG_USB_HCD_TEST_MODE */
+/*-------------------------------------------------------------------------*/
+
+int ehci_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
@@ -718,7 +868,7 @@ static int ehci_hub_control (
int ports = HCS_N_PORTS (ehci->hcs_params);
u32 __iomem *status_reg = &ehci->regs->port_status[
(wIndex & 0xff) - 1];
- u32 __iomem *hostpc_reg = NULL;
+ u32 __iomem *hostpc_reg = &ehci->regs->hostpc[(wIndex & 0xff) - 1];
u32 temp, temp1, status;
unsigned long flags;
int retval = 0;
@@ -731,9 +881,6 @@ static int ehci_hub_control (
* power, "this is the one", etc. EHCI spec supports this.
*/
- if (ehci->has_hostpc)
- hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs
- + HOSTPC0 + 4 * ((wIndex & 0xff) - 1));
spin_lock_irqsave (&ehci->lock, flags);
switch (typeReq) {
case ClearHubFeature:
@@ -751,6 +898,7 @@ static int ehci_hub_control (
goto error;
wIndex--;
temp = ehci_readl(ehci, status_reg);
+ temp &= ~PORT_RWC_BITS;
/*
* Even if OWNER is set, so the port is owned by the
@@ -764,21 +912,27 @@ static int ehci_hub_control (
ehci_writel(ehci, temp & ~PORT_PE, status_reg);
break;
case USB_PORT_FEAT_C_ENABLE:
- ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_PEC,
- status_reg);
+ ehci_writel(ehci, temp | PORT_PEC, status_reg);
break;
case USB_PORT_FEAT_SUSPEND:
if (temp & PORT_RESET)
goto error;
if (ehci->no_selective_suspend)
break;
+#ifdef CONFIG_USB_OTG
+ if ((hcd->self.otg_port == (wIndex + 1))
+ && hcd->self.b_hnp_enable) {
+ otg_start_hnp(hcd->phy->otg);
+ break;
+ }
+#endif
if (!(temp & PORT_SUSPEND))
break;
if ((temp & PORT_PE) == 0)
goto error;
/* clear phy low-power mode before resume */
- if (hostpc_reg) {
+ if (ehci->has_tdi_phy_lpm) {
temp1 = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
hostpc_reg);
@@ -787,32 +941,26 @@ static int ehci_hub_control (
spin_lock_irqsave(&ehci->lock, flags);
}
/* resume signaling for 20 msec */
- temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
+ temp &= ~PORT_WAKE_BITS;
ehci_writel(ehci, temp | PORT_RESUME, status_reg);
ehci->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
+ set_bit(wIndex, &ehci->resuming_ports);
+ usb_hcd_start_port_resume(&hcd->self, wIndex);
break;
case USB_PORT_FEAT_C_SUSPEND:
clear_bit(wIndex, &ehci->port_c_suspend);
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC (ehci->hcs_params))
- ehci_writel(ehci,
- temp & ~(PORT_RWC_BITS | PORT_POWER),
- status_reg);
+ ehci_writel(ehci, temp & ~PORT_POWER,
+ status_reg);
break;
case USB_PORT_FEAT_C_CONNECTION:
- if (ehci->has_lpm) {
- /* clear PORTSC bits on disconnect */
- temp &= ~PORT_LPM;
- temp &= ~PORT_DEV_ADDR;
- }
- ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_CSC,
- status_reg);
+ ehci_writel(ehci, temp | PORT_CSC, status_reg);
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
- ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_OCC,
- status_reg);
+ ehci_writel(ehci, temp | PORT_OCC, status_reg);
break;
case USB_PORT_FEAT_C_RESET:
/* GetPortStatus clears reset */
@@ -854,54 +1002,56 @@ static int ehci_hub_control (
* power switching; they're allowed to just limit the
* current. khubd will turn the power back on.
*/
- if (HCS_PPC (ehci->hcs_params)){
+ if (((temp & PORT_OC) || (ehci->need_oc_pp_cycle))
+ && HCS_PPC(ehci->hcs_params)) {
ehci_writel(ehci,
temp & ~(PORT_RWC_BITS | PORT_POWER),
status_reg);
+ temp = ehci_readl(ehci, status_reg);
}
}
- /* whoever resumes must GetPortStatus to complete it!! */
- if (temp & PORT_RESUME) {
+ /* no reset or resume pending */
+ if (!ehci->reset_done[wIndex]) {
/* Remote Wakeup received? */
- if (!ehci->reset_done[wIndex]) {
+ if (temp & PORT_RESUME) {
/* resume signaling for 20 msec */
ehci->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
+ usb_hcd_start_port_resume(&hcd->self, wIndex);
+ set_bit(wIndex, &ehci->resuming_ports);
/* check the port again */
mod_timer(&ehci_to_hcd(ehci)->rh_timer,
ehci->reset_done[wIndex]);
}
- /* resume completed? */
- else if (time_after_eq(jiffies,
- ehci->reset_done[wIndex])) {
- clear_bit(wIndex, &ehci->suspended_ports);
- set_bit(wIndex, &ehci->port_c_suspend);
- ehci->reset_done[wIndex] = 0;
+ /* reset or resume not yet complete */
+ } else if (!time_after_eq(jiffies, ehci->reset_done[wIndex])) {
+ ; /* wait until it is complete */
- /* stop resume signaling */
- temp = ehci_readl(ehci, status_reg);
- ehci_writel(ehci,
- temp & ~(PORT_RWC_BITS | PORT_RESUME),
- status_reg);
- retval = handshake(ehci, status_reg,
- PORT_RESUME, 0, 2000 /* 2msec */);
- if (retval != 0) {
- ehci_err(ehci,
- "port %d resume error %d\n",
+ /* resume completed */
+ } else if (test_bit(wIndex, &ehci->resuming_ports)) {
+ clear_bit(wIndex, &ehci->suspended_ports);
+ set_bit(wIndex, &ehci->port_c_suspend);
+ ehci->reset_done[wIndex] = 0;
+ usb_hcd_end_port_resume(&hcd->self, wIndex);
+
+ /* stop resume signaling */
+ temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME);
+ ehci_writel(ehci, temp, status_reg);
+ clear_bit(wIndex, &ehci->resuming_ports);
+ retval = ehci_handshake(ehci, status_reg,
+ PORT_RESUME, 0, 2000 /* 2msec */);
+ if (retval != 0) {
+ ehci_err(ehci, "port %d resume error %d\n",
wIndex + 1, retval);
- goto error;
- }
- temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
+ goto error;
}
- }
+ temp = ehci_readl(ehci, status_reg);
/* whoever resets must GetPortStatus to complete it!! */
- if ((temp & PORT_RESET)
- && time_after_eq(jiffies,
- ehci->reset_done[wIndex])) {
+ } else {
status |= USB_PORT_STAT_C_RESET << 16;
ehci->reset_done [wIndex] = 0;
@@ -911,7 +1061,7 @@ static int ehci_hub_control (
/* REVISIT: some hardware needs 550+ usec to clear
* this bit; seems too long to spin routinely...
*/
- retval = handshake(ehci, status_reg,
+ retval = ehci_handshake(ehci, status_reg,
PORT_RESET, 0, 1000);
if (retval != 0) {
ehci_err (ehci, "port %d reset error %d\n",
@@ -924,9 +1074,6 @@ static int ehci_hub_control (
ehci_readl(ehci, status_reg));
}
- if (!(temp & (PORT_RESUME|PORT_RESET)))
- ehci->reset_done[wIndex] = 0;
-
/* transfer dedicated ports to the companion hc */
if ((temp & PORT_CONNECT) &&
test_bit(wIndex, &ehci->companion_ports)) {
@@ -960,9 +1107,11 @@ static int ehci_hub_control (
status |= USB_PORT_STAT_SUSPEND;
} else if (test_bit(wIndex, &ehci->suspended_ports)) {
clear_bit(wIndex, &ehci->suspended_ports);
+ clear_bit(wIndex, &ehci->resuming_ports);
ehci->reset_done[wIndex] = 0;
if (temp & PORT_PE)
set_bit(wIndex, &ehci->port_c_suspend);
+ usb_hcd_end_port_resume(&hcd->self, wIndex);
}
if (temp & PORT_OC)
@@ -974,10 +1123,8 @@ static int ehci_hub_control (
if (test_bit(wIndex, &ehci->port_c_suspend))
status |= USB_PORT_STAT_C_SUSPEND << 16;
-#ifndef VERBOSE_DEBUG
- if (status & ~0xffff) /* only if wPortChange is interesting */
-#endif
- dbg_port (ehci, "GetStatus", wIndex + 1, temp);
+ if (status & ~0xffff) /* only if wPortChange is interesting */
+ dbg_port(ehci, "GetStatus", wIndex + 1, temp);
put_unaligned_le32(status, buf);
break;
case SetHubFeature:
@@ -1020,12 +1167,12 @@ static int ehci_hub_control (
/* After above check the port must be connected.
* Set appropriate bit thus could put phy into low power
- * mode if we have hostpc feature
+ * mode if we have tdi_phy_lpm feature
*/
temp &= ~PORT_WKCONN_E;
temp |= PORT_WKDISC_E | PORT_WKOC_E;
ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
- if (hostpc_reg) {
+ if (ehci->has_tdi_phy_lpm) {
spin_unlock_irqrestore(&ehci->lock, flags);
msleep(5);/* 5ms for HCD enter low pwr mode */
spin_lock_irqsave(&ehci->lock, flags);
@@ -1045,7 +1192,7 @@ static int ehci_hub_control (
status_reg);
break;
case USB_PORT_FEAT_RESET:
- if (temp & PORT_RESUME)
+ if (temp & (PORT_SUSPEND|PORT_RESUME))
goto error;
/* line status bits may report this as low speed,
* which can be fine if this root hub has a
@@ -1059,7 +1206,6 @@ static int ehci_hub_control (
wIndex + 1);
temp |= PORT_OWNER;
} else {
- ehci_vdbg (ehci, "port %d reset\n", wIndex + 1);
temp |= PORT_RESET;
temp &= ~PORT_PE;
@@ -1080,10 +1226,37 @@ static int ehci_hub_control (
* about the EHCI-specific stuff.
*/
case USB_PORT_FEAT_TEST:
+#ifdef CONFIG_USB_HCD_TEST_MODE
+ if (selector == EHSET_TEST_SINGLE_STEP_SET_FEATURE) {
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ retval = ehset_single_step_set_feature(hcd,
+ wIndex);
+ spin_lock_irqsave(&ehci->lock, flags);
+ break;
+ }
+#endif
if (!selector || selector > 5)
goto error;
+ spin_unlock_irqrestore(&ehci->lock, flags);
ehci_quiesce(ehci);
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ /* Put all enabled ports into suspend */
+ while (ports--) {
+ u32 __iomem *sreg =
+ &ehci->regs->port_status[ports];
+
+ temp = ehci_readl(ehci, sreg) & ~PORT_RWC_BITS;
+ if (temp & PORT_PE)
+ ehci_writel(ehci, temp | PORT_SUSPEND,
+ sreg);
+ }
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
ehci_halt(ehci);
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ temp = ehci_readl(ehci, status_reg);
temp |= selector << 16;
ehci_writel(ehci, temp, status_reg);
break;
@@ -1103,6 +1276,7 @@ error_exit:
spin_unlock_irqrestore (&ehci->lock, flags);
return retval;
}
+EXPORT_SYMBOL_GPL(ehci_hub_control);
static void ehci_relinquish_port(struct usb_hcd *hcd, int portnum)
{
diff --git a/drivers/usb/host/ehci-ixp4xx.c b/drivers/usb/host/ehci-ixp4xx.c
deleted file mode 100644
index 89b7c70c6ed..00000000000
--- a/drivers/usb/host/ehci-ixp4xx.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * IXP4XX EHCI Host Controller Driver
- *
- * Author: Vladimir Barinov <vbarinov@embeddedalley.com>
- *
- * Based on "ehci-fsl.c" by Randy Vinson <rvinson@mvista.com>
- *
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-#include <linux/platform_device.h>
-
-static int ixp4xx_ehci_init(struct usb_hcd *hcd)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- int retval = 0;
-
- ehci->big_endian_desc = 1;
- ehci->big_endian_mmio = 1;
-
- ehci->caps = hcd->regs + 0x100;
- ehci->regs = hcd->regs + 0x100
- + HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
- ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
-
- hcd->has_tt = 1;
- ehci_reset(ehci);
-
- retval = ehci_init(hcd);
- if (retval)
- return retval;
-
- ehci_port_power(ehci, 0);
-
- return retval;
-}
-
-static const struct hc_driver ixp4xx_ehci_hc_driver = {
- .description = hcd_name,
- .product_desc = "IXP4XX EHCI Host Controller",
- .hcd_priv_size = sizeof(struct ehci_hcd),
- .irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
- .reset = ixp4xx_ehci_init,
- .start = ehci_run,
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
- .endpoint_reset = ehci_endpoint_reset,
- .get_frame_number = ehci_get_frame,
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
-#if defined(CONFIG_PM)
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
-#endif
- .relinquish_port = ehci_relinquish_port,
- .port_handed_over = ehci_port_handed_over,
-
- .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
-};
-
-static int ixp4xx_ehci_probe(struct platform_device *pdev)
-{
- struct usb_hcd *hcd;
- const struct hc_driver *driver = &ixp4xx_ehci_hc_driver;
- struct resource *res;
- int irq;
- int retval;
-
- if (usb_disabled())
- return -ENODEV;
-
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev,
- "Found HC with no IRQ. Check %s setup!\n",
- dev_name(&pdev->dev));
- return -ENODEV;
- }
- irq = res->start;
-
- hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
- if (!hcd) {
- retval = -ENOMEM;
- goto fail_create_hcd;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev,
- "Found HC with no register addr. Check %s setup!\n",
- dev_name(&pdev->dev));
- retval = -ENODEV;
- goto fail_request_resource;
- }
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
-
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
- driver->description)) {
- dev_dbg(&pdev->dev, "controller already in use\n");
- retval = -EBUSY;
- goto fail_request_resource;
- }
-
- hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
- if (hcd->regs == NULL) {
- dev_dbg(&pdev->dev, "error mapping memory\n");
- retval = -EFAULT;
- goto fail_ioremap;
- }
-
- retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
- if (retval)
- goto fail_add_hcd;
-
- return retval;
-
-fail_add_hcd:
- iounmap(hcd->regs);
-fail_ioremap:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-fail_request_resource:
- usb_put_hcd(hcd);
-fail_create_hcd:
- dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), retval);
- return retval;
-}
-
-static int ixp4xx_ehci_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-MODULE_ALIAS("platform:ixp4xx-ehci");
-
-static struct platform_driver ixp4xx_ehci_driver = {
- .probe = ixp4xx_ehci_probe,
- .remove = ixp4xx_ehci_remove,
- .driver = {
- .name = "ixp4xx-ehci",
- },
-};
diff --git a/drivers/usb/host/ehci-lpm.c b/drivers/usb/host/ehci-lpm.c
deleted file mode 100644
index b4d4d63c13e..00000000000
--- a/drivers/usb/host/ehci-lpm.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/* ehci-lpm.c EHCI HCD LPM support code
- * Copyright (c) 2008 - 2010, Intel Corporation.
- * Author: Jacob Pan <jacob.jun.pan@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-/* this file is part of ehci-hcd.c */
-static int ehci_lpm_set_da(struct ehci_hcd *ehci, int dev_addr, int port_num)
-{
- u32 __iomem portsc;
-
- ehci_dbg(ehci, "set dev address %d for port %d\n", dev_addr, port_num);
- if (port_num > HCS_N_PORTS(ehci->hcs_params)) {
- ehci_dbg(ehci, "invalid port number %d\n", port_num);
- return -ENODEV;
- }
- portsc = ehci_readl(ehci, &ehci->regs->port_status[port_num-1]);
- portsc &= ~PORT_DEV_ADDR;
- portsc |= dev_addr<<25;
- ehci_writel(ehci, portsc, &ehci->regs->port_status[port_num-1]);
- return 0;
-}
-
-/*
- * this function is used to check if the device support LPM
- * if yes, mark the PORTSC register with PORT_LPM bit
- */
-static int ehci_lpm_check(struct ehci_hcd *ehci, int port)
-{
- u32 __iomem *portsc ;
- u32 val32;
- int retval;
-
- portsc = &ehci->regs->port_status[port-1];
- val32 = ehci_readl(ehci, portsc);
- if (!(val32 & PORT_DEV_ADDR)) {
- ehci_dbg(ehci, "LPM: no device attached\n");
- return -ENODEV;
- }
- val32 |= PORT_LPM;
- ehci_writel(ehci, val32, portsc);
- msleep(5);
- val32 |= PORT_SUSPEND;
- ehci_dbg(ehci, "Sending LPM 0x%08x to port %d\n", val32, port);
- ehci_writel(ehci, val32, portsc);
- /* wait for ACK */
- msleep(10);
- retval = handshake(ehci, &ehci->regs->port_status[port-1], PORT_SSTS,
- PORTSC_SUSPEND_STS_ACK, 125);
- dbg_port(ehci, "LPM", port, val32);
- if (retval != -ETIMEDOUT) {
- ehci_dbg(ehci, "LPM: device ACK for LPM\n");
- val32 |= PORT_LPM;
- /*
- * now device should be in L1 sleep, let's wake up the device
- * so that we can complete enumeration.
- */
- ehci_writel(ehci, val32, portsc);
- msleep(10);
- val32 |= PORT_RESUME;
- ehci_writel(ehci, val32, portsc);
- } else {
- ehci_dbg(ehci, "LPM: device does not ACK, disable LPM %d\n",
- retval);
- val32 &= ~PORT_LPM;
- retval = -ETIMEDOUT;
- ehci_writel(ehci, val32, portsc);
- }
-
- return retval;
-}
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index d36e4e75e08..c0fb6a8ae6a 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -64,10 +64,8 @@ static inline void ehci_qtd_free (struct ehci_hcd *ehci, struct ehci_qtd *qtd)
}
-static void qh_destroy(struct ehci_qh *qh)
+static void qh_destroy(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- struct ehci_hcd *ehci = qh->ehci;
-
/* clean qtds first, and know this is not linked */
if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
ehci_dbg (ehci, "unused qh not empty!\n");
@@ -92,11 +90,10 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
if (!qh->hw)
goto fail;
memset(qh->hw, 0, sizeof *qh->hw);
- qh->refcount = 1;
- qh->ehci = ehci;
qh->qh_dma = dma;
// INIT_LIST_HEAD (&qh->qh_list);
INIT_LIST_HEAD (&qh->qtd_list);
+ INIT_LIST_HEAD(&qh->unlink_node);
/* dummy td enables safe urb queuing */
qh->dummy = ehci_qtd_alloc (ehci, flags);
@@ -113,20 +110,6 @@ fail:
return NULL;
}
-/* to share a qh (cpu threads, or hc) */
-static inline struct ehci_qh *qh_get (struct ehci_qh *qh)
-{
- WARN_ON(!qh->refcount);
- qh->refcount++;
- return qh;
-}
-
-static inline void qh_put (struct ehci_qh *qh)
-{
- if (!--qh->refcount)
- qh_destroy(qh);
-}
-
/*-------------------------------------------------------------------------*/
/* The queue heads and transfer descriptors are managed from pools tied
@@ -136,11 +119,14 @@ static inline void qh_put (struct ehci_qh *qh)
static void ehci_mem_cleanup (struct ehci_hcd *ehci)
{
- free_cached_lists(ehci);
if (ehci->async)
- qh_put (ehci->async);
+ qh_destroy(ehci, ehci->async);
ehci->async = NULL;
+ if (ehci->dummy)
+ qh_destroy(ehci, ehci->dummy);
+ ehci->dummy = NULL;
+
/* DMA consistent memory and pools */
if (ehci->qtd_pool)
dma_pool_destroy (ehci->qtd_pool);
@@ -227,8 +213,26 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
if (ehci->periodic == NULL) {
goto fail;
}
- for (i = 0; i < ehci->periodic_size; i++)
- ehci->periodic [i] = EHCI_LIST_END(ehci);
+
+ if (ehci->use_dummy_qh) {
+ struct ehci_qh_hw *hw;
+ ehci->dummy = ehci_qh_alloc(ehci, flags);
+ if (!ehci->dummy)
+ goto fail;
+
+ hw = ehci->dummy->hw;
+ hw->hw_next = EHCI_LIST_END(ehci);
+ hw->hw_qtd_next = EHCI_LIST_END(ehci);
+ hw->hw_alt_next = EHCI_LIST_END(ehci);
+ ehci->dummy->hw = hw;
+
+ for (i = 0; i < ehci->periodic_size; i++)
+ ehci->periodic[i] = cpu_to_hc32(ehci,
+ ehci->dummy->qh_dma);
+ } else {
+ for (i = 0; i < ehci->periodic_size; i++)
+ ehci->periodic[i] = EHCI_LIST_END(ehci);
+ }
/* software shadow of hardware table */
ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags);
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
new file mode 100644
index 00000000000..982c09bebe0
--- /dev/null
+++ b/drivers/usb/host/ehci-msm.c
@@ -0,0 +1,232 @@
+/* ehci-msm.c - HSUSB Host Controller Driver Implementation
+ *
+ * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * Partly derived from ehci-fsl.c and ehci-hcd.c
+ * Copyright (c) 2000-2004 by David Brownell
+ * Copyright (c) 2005 MontaVista Software
+ *
+ * All source code in this file is licensed under the following license except
+ * where indicated.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/msm_hsusb_hw.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+
+#define MSM_USB_BASE (hcd->regs)
+
+#define DRIVER_DESC "Qualcomm On-Chip EHCI Host Controller"
+
+static const char hcd_name[] = "ehci-msm";
+static struct hc_driver __read_mostly msm_hc_driver;
+
+static int ehci_msm_reset(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+
+ ehci->caps = USB_CAPLENGTH;
+ hcd->has_tt = 1;
+
+ retval = ehci_setup(hcd);
+ if (retval)
+ return retval;
+
+ /* bursts of unspecified length. */
+ writel(0, USB_AHBBURST);
+ /* Use the AHB transactor */
+ writel(0, USB_AHBMODE);
+ /* Disable streaming mode and select host mode */
+ writel(0x13, USB_USBMODE);
+
+ return 0;
+}
+
+static int ehci_msm_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct resource *res;
+ struct usb_phy *phy;
+ int ret;
+
+ dev_dbg(&pdev->dev, "ehci_msm proble\n");
+
+ hcd = usb_create_hcd(&msm_hc_driver, &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ dev_err(&pdev->dev, "Unable to create HCD\n");
+ return -ENOMEM;
+ }
+
+ hcd->irq = platform_get_irq(pdev, 0);
+ if (hcd->irq < 0) {
+ dev_err(&pdev->dev, "Unable to get IRQ resource\n");
+ ret = hcd->irq;
+ goto put_hcd;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to get memory resource\n");
+ ret = -ENODEV;
+ goto put_hcd;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
+ goto put_hcd;
+ }
+
+ /*
+ * OTG driver takes care of PHY initialization, clock management,
+ * powering up VBUS, mapping of registers address space and power
+ * management.
+ */
+ if (pdev->dev.of_node)
+ phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
+ else
+ phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
+
+ if (IS_ERR(phy)) {
+ dev_err(&pdev->dev, "unable to find transceiver\n");
+ ret = -EPROBE_DEFER;
+ goto put_hcd;
+ }
+
+ ret = otg_set_host(phy->otg, &hcd->self);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to register with transceiver\n");
+ goto put_hcd;
+ }
+
+ hcd->phy = phy;
+ device_init_wakeup(&pdev->dev, 1);
+ /*
+ * OTG device parent of HCD takes care of putting
+ * hardware into low power mode.
+ */
+ pm_runtime_no_callbacks(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ /* FIXME: need to call usb_add_hcd() here? */
+
+ return 0;
+
+put_hcd:
+ usb_put_hcd(hcd);
+
+ return ret;
+}
+
+static int ehci_msm_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ device_init_wakeup(&pdev->dev, 0);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ otg_set_host(hcd->phy->otg, NULL);
+
+ /* FIXME: need to call usb_remove_hcd() here? */
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ehci_msm_pm_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ bool do_wakeup = device_may_wakeup(dev);
+
+ dev_dbg(dev, "ehci-msm PM suspend\n");
+
+ return ehci_suspend(hcd, do_wakeup);
+}
+
+static int ehci_msm_pm_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "ehci-msm PM resume\n");
+ ehci_resume(hcd, false);
+
+ return 0;
+}
+#else
+#define ehci_msm_pm_suspend NULL
+#define ehci_msm_pm_resume NULL
+#endif
+
+static const struct dev_pm_ops ehci_msm_dev_pm_ops = {
+ .suspend = ehci_msm_pm_suspend,
+ .resume = ehci_msm_pm_resume,
+};
+
+static struct of_device_id msm_ehci_dt_match[] = {
+ { .compatible = "qcom,ehci-host", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_ehci_dt_match);
+
+static struct platform_driver ehci_msm_driver = {
+ .probe = ehci_msm_probe,
+ .remove = ehci_msm_remove,
+ .driver = {
+ .name = "msm_hsusb_host",
+ .pm = &ehci_msm_dev_pm_ops,
+ .of_match_table = msm_ehci_dt_match,
+ },
+};
+
+static const struct ehci_driver_overrides msm_overrides __initdata = {
+ .reset = ehci_msm_reset,
+};
+
+static int __init ehci_msm_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+ ehci_init_driver(&msm_hc_driver, &msm_overrides);
+ return platform_driver_register(&ehci_msm_driver);
+}
+module_init(ehci_msm_init);
+
+static void __exit ehci_msm_cleanup(void)
+{
+ platform_driver_unregister(&ehci_msm_driver);
+}
+module_exit(ehci_msm_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS("platform:msm-ehci");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
new file mode 100644
index 00000000000..08147c35f83
--- /dev/null
+++ b/drivers/usb/host/ehci-mv.c
@@ -0,0 +1,334 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ * Author: Chao Xie <chao.xie@marvell.com>
+ * Neil Zhang <zhangwm@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/usb/otg.h>
+#include <linux/platform_data/mv_usb.h>
+
+#define CAPLENGTH_MASK (0xff)
+
+struct ehci_hcd_mv {
+ struct usb_hcd *hcd;
+
+ /* Which mode does this ehci running OTG/Host ? */
+ int mode;
+
+ void __iomem *phy_regs;
+ void __iomem *cap_regs;
+ void __iomem *op_regs;
+
+ struct usb_phy *otg;
+
+ struct mv_usb_platform_data *pdata;
+
+ struct clk *clk;
+};
+
+static void ehci_clock_enable(struct ehci_hcd_mv *ehci_mv)
+{
+ clk_prepare_enable(ehci_mv->clk);
+}
+
+static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
+{
+ clk_disable_unprepare(ehci_mv->clk);
+}
+
+static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
+{
+ int retval;
+
+ ehci_clock_enable(ehci_mv);
+ if (ehci_mv->pdata->phy_init) {
+ retval = ehci_mv->pdata->phy_init(ehci_mv->phy_regs);
+ if (retval)
+ return retval;
+ }
+
+ return 0;
+}
+
+static void mv_ehci_disable(struct ehci_hcd_mv *ehci_mv)
+{
+ if (ehci_mv->pdata->phy_deinit)
+ ehci_mv->pdata->phy_deinit(ehci_mv->phy_regs);
+ ehci_clock_disable(ehci_mv);
+}
+
+static int mv_ehci_reset(struct usb_hcd *hcd)
+{
+ struct device *dev = hcd->self.controller;
+ struct ehci_hcd_mv *ehci_mv = dev_get_drvdata(dev);
+ int retval;
+
+ if (ehci_mv == NULL) {
+ dev_err(dev, "Can not find private ehci data\n");
+ return -ENODEV;
+ }
+
+ hcd->has_tt = 1;
+
+ retval = ehci_setup(hcd);
+ if (retval)
+ dev_err(dev, "ehci_setup failed %d\n", retval);
+
+ return retval;
+}
+
+static const struct hc_driver mv_ehci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Marvell EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = mv_ehci_reset,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+};
+
+static int mv_ehci_probe(struct platform_device *pdev)
+{
+ struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct ehci_hcd_mv *ehci_mv;
+ struct resource *r;
+ int retval = -ENODEV;
+ u32 offset;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "missing platform_data\n");
+ return -ENODEV;
+ }
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ hcd = usb_create_hcd(&mv_ehci_hc_driver, &pdev->dev, "mv ehci");
+ if (!hcd)
+ return -ENOMEM;
+
+ ehci_mv = devm_kzalloc(&pdev->dev, sizeof(*ehci_mv), GFP_KERNEL);
+ if (ehci_mv == NULL) {
+ dev_err(&pdev->dev, "cannot allocate ehci_hcd_mv\n");
+ retval = -ENOMEM;
+ goto err_put_hcd;
+ }
+
+ platform_set_drvdata(pdev, ehci_mv);
+ ehci_mv->pdata = pdata;
+ ehci_mv->hcd = hcd;
+
+ ehci_mv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ehci_mv->clk)) {
+ dev_err(&pdev->dev, "error getting clock\n");
+ retval = PTR_ERR(ehci_mv->clk);
+ goto err_put_hcd;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phyregs");
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
+ retval = -ENODEV;
+ goto err_put_hcd;
+ }
+
+ ehci_mv->phy_regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(ehci_mv->phy_regs)) {
+ retval = PTR_ERR(ehci_mv->phy_regs);
+ goto err_put_hcd;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "capregs");
+ if (!r) {
+ dev_err(&pdev->dev, "no I/O memory resource defined\n");
+ retval = -ENODEV;
+ goto err_put_hcd;
+ }
+
+ ehci_mv->cap_regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(ehci_mv->cap_regs)) {
+ retval = PTR_ERR(ehci_mv->cap_regs);
+ goto err_put_hcd;
+ }
+
+ retval = mv_ehci_enable(ehci_mv);
+ if (retval) {
+ dev_err(&pdev->dev, "init phy error %d\n", retval);
+ goto err_put_hcd;
+ }
+
+ offset = readl(ehci_mv->cap_regs) & CAPLENGTH_MASK;
+ ehci_mv->op_regs =
+ (void __iomem *) ((unsigned long) ehci_mv->cap_regs + offset);
+
+ hcd->rsrc_start = r->start;
+ hcd->rsrc_len = resource_size(r);
+ hcd->regs = ehci_mv->op_regs;
+
+ hcd->irq = platform_get_irq(pdev, 0);
+ if (!hcd->irq) {
+ dev_err(&pdev->dev, "Cannot get irq.");
+ retval = -ENODEV;
+ goto err_disable_clk;
+ }
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
+
+ ehci_mv->mode = pdata->mode;
+ if (ehci_mv->mode == MV_USB_MODE_OTG) {
+ ehci_mv->otg = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(ehci_mv->otg)) {
+ retval = PTR_ERR(ehci_mv->otg);
+
+ if (retval == -ENXIO)
+ dev_info(&pdev->dev, "MV_USB_MODE_OTG "
+ "must have CONFIG_USB_PHY enabled\n");
+ else
+ dev_err(&pdev->dev,
+ "unable to find transceiver\n");
+ goto err_disable_clk;
+ }
+
+ retval = otg_set_host(ehci_mv->otg->otg, &hcd->self);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "unable to register with transceiver\n");
+ retval = -ENODEV;
+ goto err_disable_clk;
+ }
+ /* otg will enable clock before use as host */
+ mv_ehci_disable(ehci_mv);
+ } else {
+ if (pdata->set_vbus)
+ pdata->set_vbus(1);
+
+ retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
+ if (retval) {
+ dev_err(&pdev->dev,
+ "failed to add hcd with err %d\n", retval);
+ goto err_set_vbus;
+ }
+ device_wakeup_enable(hcd->self.controller);
+ }
+
+ if (pdata->private_init)
+ pdata->private_init(ehci_mv->op_regs, ehci_mv->phy_regs);
+
+ dev_info(&pdev->dev,
+ "successful find EHCI device with regs 0x%p irq %d"
+ " working in %s mode\n", hcd->regs, hcd->irq,
+ ehci_mv->mode == MV_USB_MODE_OTG ? "OTG" : "Host");
+
+ return 0;
+
+err_set_vbus:
+ if (pdata->set_vbus)
+ pdata->set_vbus(0);
+err_disable_clk:
+ mv_ehci_disable(ehci_mv);
+err_put_hcd:
+ usb_put_hcd(hcd);
+
+ return retval;
+}
+
+static int mv_ehci_remove(struct platform_device *pdev)
+{
+ struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_mv->hcd;
+
+ if (hcd->rh_registered)
+ usb_remove_hcd(hcd);
+
+ if (!IS_ERR_OR_NULL(ehci_mv->otg))
+ otg_set_host(ehci_mv->otg->otg, NULL);
+
+ if (ehci_mv->mode == MV_USB_MODE_HOST) {
+ if (ehci_mv->pdata->set_vbus)
+ ehci_mv->pdata->set_vbus(0);
+
+ mv_ehci_disable(ehci_mv);
+ }
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+MODULE_ALIAS("mv-ehci");
+
+static const struct platform_device_id ehci_id_table[] = {
+ {"pxa-u2oehci", PXA_U2OEHCI},
+ {"pxa-sph", PXA_SPH},
+ {"mmp3-hsic", MMP3_HSIC},
+ {"mmp3-fsic", MMP3_FSIC},
+ {},
+};
+
+static void mv_ehci_shutdown(struct platform_device *pdev)
+{
+ struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_mv->hcd;
+
+ if (!hcd->rh_registered)
+ return;
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver ehci_mv_driver = {
+ .probe = mv_ehci_probe,
+ .remove = mv_ehci_remove,
+ .shutdown = mv_ehci_shutdown,
+ .driver = {
+ .name = "mv-ehci",
+ .bus = &platform_bus_type,
+ },
+ .id_table = ehci_id_table,
+};
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index bce85055019..dbe5e4eea08 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -17,116 +17,45 @@
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/platform_data/usb-ehci-mxc.h>
+#include "ehci.h"
-#include <mach/mxc_ehci.h>
+#define DRIVER_DESC "Freescale On-Chip EHCI Host driver"
+
+static const char hcd_name[] = "ehci-mxc";
#define ULPI_VIEWPORT_OFFSET 0x170
struct ehci_mxc_priv {
- struct clk *usbclk, *ahbclk;
- struct usb_hcd *hcd;
+ struct clk *usbclk, *ahbclk, *phyclk;
};
-/* called during probe() after chip reset completes */
-static int ehci_mxc_setup(struct usb_hcd *hcd)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- struct device *dev = hcd->self.controller;
- struct mxc_usbh_platform_data *pdata = dev_get_platdata(dev);
- int retval;
-
- /* EHCI registers start at offset 0x100 */
- ehci->caps = hcd->regs + 0x100;
- ehci->regs = hcd->regs + 0x100 +
- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
- dbg_hcs_params(ehci, "reset");
- dbg_hcc_params(ehci, "reset");
-
- /* cache this readonly data; minimize chip reads */
- ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
-
- hcd->has_tt = 1;
-
- retval = ehci_halt(ehci);
- if (retval)
- return retval;
-
- /* data structure init */
- retval = ehci_init(hcd);
- if (retval)
- return retval;
-
- ehci->sbrn = 0x20;
-
- ehci_reset(ehci);
-
- /* set up the PORTSCx register */
- ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
-
- /* is this really needed? */
- msleep(10);
-
- ehci_port_power(ehci, 0);
- return 0;
-}
+static struct hc_driver __read_mostly ehci_mxc_hc_driver;
-static const struct hc_driver ehci_mxc_hc_driver = {
- .description = hcd_name,
- .product_desc = "Freescale On-Chip EHCI Host Controller",
- .hcd_priv_size = sizeof(struct ehci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ehci_irq,
- .flags = HCD_USB2 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .reset = ehci_mxc_setup,
- .start = ehci_run,
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ehci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
- .relinquish_port = ehci_relinquish_port,
- .port_handed_over = ehci_port_handed_over,
+static const struct ehci_driver_overrides ehci_mxc_overrides __initconst = {
+ .extra_priv_size = sizeof(struct ehci_mxc_priv),
};
static int ehci_mxc_drv_probe(struct platform_device *pdev)
{
- struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
+ struct mxc_usbh_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct usb_hcd *hcd;
struct resource *res;
int irq, ret;
struct ehci_mxc_priv *priv;
struct device *dev = &pdev->dev;
-
- dev_info(&pdev->dev, "initializing i.MX USB Controller\n");
+ struct ehci_hcd *ehci;
if (!pdata) {
dev_err(dev, "No platform data given, bailing out.\n");
@@ -139,34 +68,48 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
if (!hcd)
return -ENOMEM;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto err_alloc;
- }
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "Found HC with no register addr. Check setup!\n");
ret = -ENODEV;
- goto err_get_resource;
+ goto err_alloc;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- dev_dbg(dev, "controller already in use\n");
- ret = -EBUSY;
- goto err_request_mem;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
+ goto err_alloc;
+ }
+
+ hcd->has_tt = 1;
+ ehci = hcd_to_ehci(hcd);
+ priv = (struct ehci_mxc_priv *) ehci->priv;
+
+ /* enable clocks */
+ priv->usbclk = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(priv->usbclk)) {
+ ret = PTR_ERR(priv->usbclk);
+ goto err_alloc;
}
+ clk_prepare_enable(priv->usbclk);
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- dev_err(dev, "error mapping memory\n");
- ret = -EFAULT;
- goto err_ioremap;
+ priv->ahbclk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(priv->ahbclk)) {
+ ret = PTR_ERR(priv->ahbclk);
+ goto err_clk_ahb;
}
+ clk_prepare_enable(priv->ahbclk);
+
+ /* "dr" device has its own clock on i.MX51 */
+ priv->phyclk = devm_clk_get(&pdev->dev, "phy");
+ if (IS_ERR(priv->phyclk))
+ priv->phyclk = NULL;
+ if (priv->phyclk)
+ clk_prepare_enable(priv->phyclk);
+
/* call platform specific init function */
if (pdata->init) {
@@ -179,122 +122,111 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
mdelay(10);
}
- /* enable clocks */
- priv->usbclk = clk_get(dev, "usb");
- if (IS_ERR(priv->usbclk)) {
- ret = PTR_ERR(priv->usbclk);
- goto err_clk;
- }
- clk_enable(priv->usbclk);
+ /* EHCI registers start at offset 0x100 */
+ ehci->caps = hcd->regs + 0x100;
+ ehci->regs = hcd->regs + 0x100 +
+ HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
- if (!cpu_is_mx35() && !cpu_is_mx25()) {
- priv->ahbclk = clk_get(dev, "usb_ahb");
- if (IS_ERR(priv->ahbclk)) {
- ret = PTR_ERR(priv->ahbclk);
- goto err_clk_ahb;
- }
- clk_enable(priv->ahbclk);
- }
+ /* set up the PORTSCx register */
+ ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
- /* setup specific usb hw */
- ret = mxc_initialize_usb_hw(pdev->id, pdata->flags);
- if (ret < 0)
- goto err_init;
+ /* is this really needed? */
+ msleep(10);
/* Initialize the transceiver */
if (pdata->otg) {
pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET;
- ret = otg_init(pdata->otg);
+ ret = usb_phy_init(pdata->otg);
if (ret) {
dev_err(dev, "unable to init transceiver, probably missing\n");
ret = -ENODEV;
goto err_add;
}
- ret = otg_set_vbus(pdata->otg, 1);
+ ret = otg_set_vbus(pdata->otg->otg, 1);
if (ret) {
dev_err(dev, "unable to enable vbus on transceiver\n");
goto err_add;
}
}
- priv->hcd = hcd;
- platform_set_drvdata(pdev, priv);
+ platform_set_drvdata(pdev, hcd);
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto err_add;
+ device_wakeup_enable(hcd->self.controller);
return 0;
err_add:
if (pdata && pdata->exit)
pdata->exit(pdev);
err_init:
- if (priv->ahbclk) {
- clk_disable(priv->ahbclk);
- clk_put(priv->ahbclk);
- }
+ if (priv->phyclk)
+ clk_disable_unprepare(priv->phyclk);
+
+ clk_disable_unprepare(priv->ahbclk);
err_clk_ahb:
- clk_disable(priv->usbclk);
- clk_put(priv->usbclk);
-err_clk:
- iounmap(hcd->regs);
-err_ioremap:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-err_request_mem:
-err_get_resource:
- kfree(priv);
+ clk_disable_unprepare(priv->usbclk);
err_alloc:
usb_put_hcd(hcd);
return ret;
}
-static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
+static int ehci_mxc_drv_remove(struct platform_device *pdev)
{
- struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
- struct ehci_mxc_priv *priv = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = priv->hcd;
+ struct mxc_usbh_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct ehci_mxc_priv *priv = (struct ehci_mxc_priv *) ehci->priv;
+
+ usb_remove_hcd(hcd);
if (pdata && pdata->exit)
pdata->exit(pdev);
- if (pdata->otg)
- otg_shutdown(pdata->otg);
+ if (pdata && pdata->otg)
+ usb_phy_shutdown(pdata->otg);
- usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
- platform_set_drvdata(pdev, NULL);
-
- clk_disable(priv->usbclk);
- clk_put(priv->usbclk);
- if (priv->ahbclk) {
- clk_disable(priv->ahbclk);
- clk_put(priv->ahbclk);
- }
+ clk_disable_unprepare(priv->usbclk);
+ clk_disable_unprepare(priv->ahbclk);
- kfree(priv);
+ if (priv->phyclk)
+ clk_disable_unprepare(priv->phyclk);
+ usb_put_hcd(hcd);
return 0;
}
-static void ehci_mxc_drv_shutdown(struct platform_device *pdev)
-{
- struct ehci_mxc_priv *priv = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = priv->hcd;
-
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
-}
-
MODULE_ALIAS("platform:mxc-ehci");
static struct platform_driver ehci_mxc_driver = {
.probe = ehci_mxc_drv_probe,
- .remove = __exit_p(ehci_mxc_drv_remove),
- .shutdown = ehci_mxc_drv_shutdown,
+ .remove = ehci_mxc_drv_remove,
+ .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "mxc-ehci",
},
};
+
+static int __init ehci_mxc_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ehci_init_driver(&ehci_mxc_hc_driver, &ehci_mxc_overrides);
+ return platform_driver_register(&ehci_mxc_driver);
+}
+module_init(ehci_mxc_init);
+
+static void __exit ehci_mxc_cleanup(void)
+{
+ platform_driver_unregister(&ehci_mxc_driver);
+}
+module_exit(ehci_mxc_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Sascha Hauer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
index a31a031178a..9051439039a 100644
--- a/drivers/usb/host/ehci-octeon.c
+++ b/drivers/usb/host/ehci-octeon.c
@@ -51,12 +51,12 @@ static const struct hc_driver ehci_octeon_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
*/
- .reset = ehci_init,
+ .reset = ehci_setup,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
@@ -116,30 +116,24 @@ static int ehci_octeon_drv_probe(struct platform_device *pdev)
* We can DMA from anywhere. But the descriptors must be in
* the lower 4GB.
*/
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &ehci_octeon_dma_mask;
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
hcd = usb_create_hcd(&ehci_octeon_hc_driver, &pdev->dev, "octeon");
if (!hcd)
return -ENOMEM;
hcd->rsrc_start = res_mem->start;
- hcd->rsrc_len = res_mem->end - res_mem->start + 1;
+ hcd->rsrc_len = resource_size(res_mem);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
- OCTEON_EHCI_HCD_NAME)) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
- ret = -EBUSY;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
goto err1;
}
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto err2;
- }
-
ehci_octeon_start();
ehci = hcd_to_ehci(hcd);
@@ -150,29 +144,20 @@ static int ehci_octeon_drv_probe(struct platform_device *pdev)
#endif
ehci->caps = hcd->regs;
- ehci->regs = hcd->regs +
- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
- /* cache this readonly data; minimize chip reads */
- ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret) {
dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
- goto err3;
+ goto err2;
}
+ device_wakeup_enable(hcd->self.controller);
platform_set_drvdata(pdev, hcd);
- /* root ports should always stay powered */
- ehci_port_power(ehci, 1);
-
return 0;
-err3:
+err2:
ehci_octeon_stop();
- iounmap(hcd->regs);
-err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err1:
usb_put_hcd(hcd);
return ret;
@@ -185,12 +170,8 @@ static int ehci_octeon_drv_remove(struct platform_device *pdev)
usb_remove_hcd(hcd);
ehci_octeon_stop();
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 116ae280053..a24720beb39 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -1,11 +1,14 @@
/*
- * ehci-omap.c - driver for USBHOST on OMAP 34xx processor
+ * ehci-omap.c - driver for USBHOST on OMAP3/4 processors
*
- * Bus Glue for OMAP34xx USBHOST 3 port EHCI controller
- * Tested on OMAP3430 ES2.0 SDP
+ * Bus Glue for the EHCI controllers in OMAP3/4
+ * Tested on several OMAP3 boards, and OMAP4 Pandaboard
*
- * Copyright (C) 2007-2008 Texas Instruments, Inc.
+ * Copyright (C) 2007-2013 Texas Instruments, Inc.
* Author: Vikram Pandita <vikram.pandita@ti.com>
+ * Author: Anand Gadiyar <gadiyar@ti.com>
+ * Author: Keshava Munegowda <keshava_mgowda@ti.com>
+ * Author: Roger Quadros <rogerq@ti.com>
*
* Copyright (C) 2009 Nokia Corporation
* Contact: Felipe Balbi <felipe.balbi@nokia.com>
@@ -26,95 +29,25 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
- * TODO (last updated Feb 12, 2010):
- * - add kernel-doc
- * - enable AUTOIDLE
- * - add suspend/resume
- * - move workarounds to board-files
*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/gpio.h>
-#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/usb/ulpi.h>
-#include <plat/usb.h>
+#include <linux/pm_runtime.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/of.h>
+#include <linux/dma-mapping.h>
-/*
- * OMAP USBHOST Register addresses: VIRTUAL ADDRESSES
- * Use ehci_omap_readl()/ehci_omap_writel() functions
- */
+#include "ehci.h"
-/* TLL Register Set */
-#define OMAP_USBTLL_REVISION (0x00)
-#define OMAP_USBTLL_SYSCONFIG (0x10)
-#define OMAP_USBTLL_SYSCONFIG_CACTIVITY (1 << 8)
-#define OMAP_USBTLL_SYSCONFIG_SIDLEMODE (1 << 3)
-#define OMAP_USBTLL_SYSCONFIG_ENAWAKEUP (1 << 2)
-#define OMAP_USBTLL_SYSCONFIG_SOFTRESET (1 << 1)
-#define OMAP_USBTLL_SYSCONFIG_AUTOIDLE (1 << 0)
-
-#define OMAP_USBTLL_SYSSTATUS (0x14)
-#define OMAP_USBTLL_SYSSTATUS_RESETDONE (1 << 0)
-
-#define OMAP_USBTLL_IRQSTATUS (0x18)
-#define OMAP_USBTLL_IRQENABLE (0x1C)
-
-#define OMAP_TLL_SHARED_CONF (0x30)
-#define OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN (1 << 6)
-#define OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN (1 << 5)
-#define OMAP_TLL_SHARED_CONF_USB_DIVRATION (1 << 2)
-#define OMAP_TLL_SHARED_CONF_FCLK_REQ (1 << 1)
-#define OMAP_TLL_SHARED_CONF_FCLK_IS_ON (1 << 0)
-
-#define OMAP_TLL_CHANNEL_CONF(num) (0x040 + 0x004 * num)
-#define OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF (1 << 11)
-#define OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE (1 << 10)
-#define OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE (1 << 9)
-#define OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE (1 << 8)
-#define OMAP_TLL_CHANNEL_CONF_CHANEN (1 << 0)
-
-#define OMAP_TLL_ULPI_FUNCTION_CTRL(num) (0x804 + 0x100 * num)
-#define OMAP_TLL_ULPI_INTERFACE_CTRL(num) (0x807 + 0x100 * num)
-#define OMAP_TLL_ULPI_OTG_CTRL(num) (0x80A + 0x100 * num)
-#define OMAP_TLL_ULPI_INT_EN_RISE(num) (0x80D + 0x100 * num)
-#define OMAP_TLL_ULPI_INT_EN_FALL(num) (0x810 + 0x100 * num)
-#define OMAP_TLL_ULPI_INT_STATUS(num) (0x813 + 0x100 * num)
-#define OMAP_TLL_ULPI_INT_LATCH(num) (0x814 + 0x100 * num)
-#define OMAP_TLL_ULPI_DEBUG(num) (0x815 + 0x100 * num)
-#define OMAP_TLL_ULPI_SCRATCH_REGISTER(num) (0x816 + 0x100 * num)
-
-#define OMAP_TLL_CHANNEL_COUNT 3
-#define OMAP_TLL_CHANNEL_1_EN_MASK (1 << 1)
-#define OMAP_TLL_CHANNEL_2_EN_MASK (1 << 2)
-#define OMAP_TLL_CHANNEL_3_EN_MASK (1 << 4)
-
-/* UHH Register Set */
-#define OMAP_UHH_REVISION (0x00)
-#define OMAP_UHH_SYSCONFIG (0x10)
-#define OMAP_UHH_SYSCONFIG_MIDLEMODE (1 << 12)
-#define OMAP_UHH_SYSCONFIG_CACTIVITY (1 << 8)
-#define OMAP_UHH_SYSCONFIG_SIDLEMODE (1 << 3)
-#define OMAP_UHH_SYSCONFIG_ENAWAKEUP (1 << 2)
-#define OMAP_UHH_SYSCONFIG_SOFTRESET (1 << 1)
-#define OMAP_UHH_SYSCONFIG_AUTOIDLE (1 << 0)
-
-#define OMAP_UHH_SYSSTATUS (0x14)
-#define OMAP_UHH_HOSTCONFIG (0x40)
-#define OMAP_UHH_HOSTCONFIG_ULPI_BYPASS (1 << 0)
-#define OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS (1 << 0)
-#define OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS (1 << 11)
-#define OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS (1 << 12)
-#define OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN (1 << 2)
-#define OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN (1 << 3)
-#define OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN (1 << 4)
-#define OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN (1 << 5)
-#define OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS (1 << 8)
-#define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS (1 << 9)
-#define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS (1 << 10)
-
-#define OMAP_UHH_DEBUG_CSR (0x44)
+#include <linux/platform_data/usb-omap.h>
/* EHCI Register Set */
#define EHCI_INSNREG04 (0xA0)
@@ -127,292 +60,143 @@
#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8
#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0
+#define DRIVER_DESC "OMAP-EHCI Host Controller driver"
+
+static const char hcd_name[] = "ehci-omap";
+
/*-------------------------------------------------------------------------*/
-static inline void ehci_omap_writel(void __iomem *base, u32 reg, u32 val)
+struct omap_hcd {
+ struct usb_phy *phy[OMAP3_HS_USB_PORTS]; /* one PHY for each port */
+ int nports;
+};
+
+static inline void ehci_write(void __iomem *base, u32 reg, u32 val)
{
__raw_writel(val, base + reg);
}
-static inline u32 ehci_omap_readl(void __iomem *base, u32 reg)
+static inline u32 ehci_read(void __iomem *base, u32 reg)
{
return __raw_readl(base + reg);
}
-static inline void ehci_omap_writeb(void __iomem *base, u8 reg, u8 val)
-{
- __raw_writeb(val, base + reg);
-}
-
-static inline u8 ehci_omap_readb(void __iomem *base, u8 reg)
-{
- return __raw_readb(base + reg);
-}
-
-/*-------------------------------------------------------------------------*/
-
-struct ehci_hcd_omap {
- struct ehci_hcd *ehci;
- struct device *dev;
-
- struct clk *usbhost_ick;
- struct clk *usbhost2_120m_fck;
- struct clk *usbhost1_48m_fck;
- struct clk *usbtll_fck;
- struct clk *usbtll_ick;
-
- /* FIXME the following two workarounds are
- * board specific not silicon-specific so these
- * should be moved to board-file instead.
- *
- * Maybe someone from TI will know better which
- * board is affected and needs the workarounds
- * to be applied
- */
-
- /* gpio for resetting phy */
- int reset_gpio_port[OMAP3_HS_USB_PORTS];
-
- /* phy reset workaround */
- int phy_reset;
-
- /* desired phy_mode: TLL, PHY */
- enum ehci_hcd_omap_mode port_mode[OMAP3_HS_USB_PORTS];
+/* configure so an HC device and id are always provided */
+/* always called with process context; sleeping is OK */
- void __iomem *uhh_base;
- void __iomem *tll_base;
- void __iomem *ehci_base;
+static struct hc_driver __read_mostly ehci_omap_hc_driver;
- /* Regulators for USB PHYs.
- * Each PHY can have a separate regulator.
- */
- struct regulator *regulator[OMAP3_HS_USB_PORTS];
+static const struct ehci_driver_overrides ehci_omap_overrides __initdata = {
+ .extra_priv_size = sizeof(struct omap_hcd),
};
-/*-------------------------------------------------------------------------*/
-
-static void omap_usb_utmi_init(struct ehci_hcd_omap *omap, u8 tll_channel_mask)
+/**
+ * ehci_hcd_omap_probe - initialize TI-based HCDs
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int ehci_hcd_omap_probe(struct platform_device *pdev)
{
- unsigned reg;
+ struct device *dev = &pdev->dev;
+ struct usbhs_omap_platform_data *pdata = dev_get_platdata(dev);
+ struct resource *res;
+ struct usb_hcd *hcd;
+ void __iomem *regs;
+ int ret;
+ int irq;
int i;
+ struct omap_hcd *omap;
- /* Program the 3 TLL channels upfront */
- for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) {
- reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
-
- /* Disable AutoIdle, BitStuffing and use SDR Mode */
- reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE
- | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
- | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
- ehci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg);
- }
-
- /* Program Common TLL register */
- reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_SHARED_CONF);
- reg |= (OMAP_TLL_SHARED_CONF_FCLK_IS_ON
- | OMAP_TLL_SHARED_CONF_USB_DIVRATION
- | OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN);
- reg &= ~OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN;
-
- ehci_omap_writel(omap->tll_base, OMAP_TLL_SHARED_CONF, reg);
-
- /* Enable channels now */
- for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) {
- reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
-
- /* Enable only the reg that is needed */
- if (!(tll_channel_mask & 1<<i))
- continue;
-
- reg |= OMAP_TLL_CHANNEL_CONF_CHANEN;
- ehci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg);
+ if (usb_disabled())
+ return -ENODEV;
- ehci_omap_writeb(omap->tll_base,
- OMAP_TLL_ULPI_SCRATCH_REGISTER(i), 0xbe);
- dev_dbg(omap->dev, "ULPI_SCRATCH_REG[ch=%d]= 0x%02x\n",
- i+1, ehci_omap_readb(omap->tll_base,
- OMAP_TLL_ULPI_SCRATCH_REGISTER(i)));
+ if (!dev->parent) {
+ dev_err(dev, "Missing parent device\n");
+ return -ENODEV;
}
-}
-
-/*-------------------------------------------------------------------------*/
-static void omap_ehci_soft_phy_reset(struct ehci_hcd_omap *omap, u8 port)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
- unsigned reg = 0;
-
- reg = ULPI_FUNC_CTRL_RESET
- /* FUNCTION_CTRL_SET register */
- | (ULPI_SET(ULPI_FUNC_CTRL) << EHCI_INSNREG05_ULPI_REGADD_SHIFT)
- /* Write */
- | (2 << EHCI_INSNREG05_ULPI_OPSEL_SHIFT)
- /* PORTn */
- | ((port + 1) << EHCI_INSNREG05_ULPI_PORTSEL_SHIFT)
- /* start ULPI access*/
- | (1 << EHCI_INSNREG05_ULPI_CONTROL_SHIFT);
-
- ehci_omap_writel(omap->ehci_base, EHCI_INSNREG05_ULPI, reg);
-
- /* Wait for ULPI access completion */
- while ((ehci_omap_readl(omap->ehci_base, EHCI_INSNREG05_ULPI)
- & (1 << EHCI_INSNREG05_ULPI_CONTROL_SHIFT))) {
- cpu_relax();
-
- if (time_after(jiffies, timeout)) {
- dev_dbg(omap->dev, "phy reset operation timed out\n");
- break;
- }
+ /* For DT boot, get platform data from parent. i.e. usbhshost */
+ if (dev->of_node) {
+ pdata = dev_get_platdata(dev->parent);
+ dev->platform_data = pdata;
}
-}
-/* omap_start_ehc
- * - Start the TI USBHOST controller
- */
-static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
- u8 tll_ch_mask = 0;
- unsigned reg = 0;
- int ret = 0;
-
- dev_dbg(omap->dev, "starting TI EHCI USB Controller\n");
-
- /* Enable Clocks for USBHOST */
- omap->usbhost_ick = clk_get(omap->dev, "usbhost_ick");
- if (IS_ERR(omap->usbhost_ick)) {
- ret = PTR_ERR(omap->usbhost_ick);
- goto err_host_ick;
+ if (!pdata) {
+ dev_err(dev, "Missing platform data\n");
+ return -ENODEV;
}
- clk_enable(omap->usbhost_ick);
- omap->usbhost2_120m_fck = clk_get(omap->dev, "usbhost_120m_fck");
- if (IS_ERR(omap->usbhost2_120m_fck)) {
- ret = PTR_ERR(omap->usbhost2_120m_fck);
- goto err_host_120m_fck;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "EHCI irq failed\n");
+ return -ENODEV;
}
- clk_enable(omap->usbhost2_120m_fck);
- omap->usbhost1_48m_fck = clk_get(omap->dev, "usbhost_48m_fck");
- if (IS_ERR(omap->usbhost1_48m_fck)) {
- ret = PTR_ERR(omap->usbhost1_48m_fck);
- goto err_host_48m_fck;
- }
- clk_enable(omap->usbhost1_48m_fck);
-
- if (omap->phy_reset) {
- /* Refer: ISSUE1 */
- if (gpio_is_valid(omap->reset_gpio_port[0])) {
- gpio_request(omap->reset_gpio_port[0],
- "USB1 PHY reset");
- gpio_direction_output(omap->reset_gpio_port[0], 0);
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
- if (gpio_is_valid(omap->reset_gpio_port[1])) {
- gpio_request(omap->reset_gpio_port[1],
- "USB2 PHY reset");
- gpio_direction_output(omap->reset_gpio_port[1], 0);
- }
+ /*
+ * Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
- /* Hold the PHY in RESET for enough time till DIR is high */
- udelay(10);
+ ret = -ENODEV;
+ hcd = usb_create_hcd(&ehci_omap_hc_driver, dev,
+ dev_name(dev));
+ if (!hcd) {
+ dev_err(dev, "Failed to create HCD\n");
+ return -ENOMEM;
}
- /* Configure TLL for 60Mhz clk for ULPI */
- omap->usbtll_fck = clk_get(omap->dev, "usbtll_fck");
- if (IS_ERR(omap->usbtll_fck)) {
- ret = PTR_ERR(omap->usbtll_fck);
- goto err_tll_fck;
- }
- clk_enable(omap->usbtll_fck);
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = regs;
+ hcd_to_ehci(hcd)->caps = regs;
- omap->usbtll_ick = clk_get(omap->dev, "usbtll_ick");
- if (IS_ERR(omap->usbtll_ick)) {
- ret = PTR_ERR(omap->usbtll_ick);
- goto err_tll_ick;
- }
- clk_enable(omap->usbtll_ick);
+ omap = (struct omap_hcd *)hcd_to_ehci(hcd)->priv;
+ omap->nports = pdata->nports;
- /* perform TLL soft reset, and wait until reset is complete */
- ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
- OMAP_USBTLL_SYSCONFIG_SOFTRESET);
+ platform_set_drvdata(pdev, hcd);
- /* Wait for TLL reset to complete */
- while (!(ehci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
- & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
- cpu_relax();
+ /* get the PHY devices if needed */
+ for (i = 0 ; i < omap->nports ; i++) {
+ struct usb_phy *phy;
- if (time_after(jiffies, timeout)) {
- dev_dbg(omap->dev, "operation timed out\n");
- ret = -EINVAL;
- goto err_sys_status;
+ /* get the PHY device */
+ if (dev->of_node)
+ phy = devm_usb_get_phy_by_phandle(dev, "phys", i);
+ else
+ phy = devm_usb_get_phy_dev(dev, i);
+ if (IS_ERR(phy)) {
+ /* Don't bail out if PHY is not absolutely necessary */
+ if (pdata->port_mode[i] != OMAP_EHCI_PORT_MODE_PHY)
+ continue;
+
+ ret = PTR_ERR(phy);
+ dev_err(dev, "Can't get PHY device for port %d: %d\n",
+ i, ret);
+ goto err_phy;
}
- }
- dev_dbg(omap->dev, "TLL RESET DONE\n");
-
- /* (1<<3) = no idle mode only for initial debugging */
- ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
- OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
- OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
- OMAP_USBTLL_SYSCONFIG_CACTIVITY);
-
-
- /* Put UHH in NoIdle/NoStandby mode */
- reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG);
- reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
- | OMAP_UHH_SYSCONFIG_SIDLEMODE
- | OMAP_UHH_SYSCONFIG_CACTIVITY
- | OMAP_UHH_SYSCONFIG_MIDLEMODE);
- reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
-
- ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
-
- reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
-
- /* setup ULPI bypass and burst configurations */
- reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
- | OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN
- | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN);
- reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN;
-
- if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_UNKNOWN)
- reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
- if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_UNKNOWN)
- reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
- if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_UNKNOWN)
- reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
-
- /* Bypass the TLL module for PHY mode operation */
- if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) {
- dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1\n");
- if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) ||
- (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) ||
- (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY))
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
- else
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
- } else {
- dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n");
- if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY)
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
- else if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL)
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
-
- if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY)
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
- else if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL)
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
-
- if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY)
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
- else if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
+ omap->phy[i] = phy;
+ if (pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY) {
+ usb_phy_init(omap->phy[i]);
+ /* bring PHY out of suspend */
+ usb_phy_set_suspend(omap->phy[i], 0);
+ }
}
- ehci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg);
- dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
/*
* An undocumented "feature" in the OMAP3 EHCI controller,
@@ -423,327 +207,48 @@ static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
* to suspend. Writing 1 to this undocumented register bit
* disables this feature and restores normal behavior.
*/
- ehci_omap_writel(omap->ehci_base, EHCI_INSNREG04,
+ ehci_write(regs, EHCI_INSNREG04,
EHCI_INSNREG04_DISABLE_UNSUSPEND);
- if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) ||
- (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) ||
- (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)) {
-
- if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL)
- tll_ch_mask |= OMAP_TLL_CHANNEL_1_EN_MASK;
- if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL)
- tll_ch_mask |= OMAP_TLL_CHANNEL_2_EN_MASK;
- if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)
- tll_ch_mask |= OMAP_TLL_CHANNEL_3_EN_MASK;
-
- /* Enable UTMI mode for required TLL channels */
- omap_usb_utmi_init(omap, tll_ch_mask);
- }
-
- if (omap->phy_reset) {
- /* Refer ISSUE1:
- * Hold the PHY in RESET for enough time till
- * PHY is settled and ready
- */
- udelay(10);
-
- if (gpio_is_valid(omap->reset_gpio_port[0]))
- gpio_set_value(omap->reset_gpio_port[0], 1);
-
- if (gpio_is_valid(omap->reset_gpio_port[1]))
- gpio_set_value(omap->reset_gpio_port[1], 1);
- }
-
- /* Soft reset the PHY using PHY reset command over ULPI */
- if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY)
- omap_ehci_soft_phy_reset(omap, 0);
- if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY)
- omap_ehci_soft_phy_reset(omap, 1);
-
- return 0;
-
-err_sys_status:
- clk_disable(omap->usbtll_ick);
- clk_put(omap->usbtll_ick);
-
-err_tll_ick:
- clk_disable(omap->usbtll_fck);
- clk_put(omap->usbtll_fck);
-
-err_tll_fck:
- clk_disable(omap->usbhost1_48m_fck);
- clk_put(omap->usbhost1_48m_fck);
-
- if (omap->phy_reset) {
- if (gpio_is_valid(omap->reset_gpio_port[0]))
- gpio_free(omap->reset_gpio_port[0]);
-
- if (gpio_is_valid(omap->reset_gpio_port[1]))
- gpio_free(omap->reset_gpio_port[1]);
- }
-
-err_host_48m_fck:
- clk_disable(omap->usbhost2_120m_fck);
- clk_put(omap->usbhost2_120m_fck);
-
-err_host_120m_fck:
- clk_disable(omap->usbhost_ick);
- clk_put(omap->usbhost_ick);
-
-err_host_ick:
- return ret;
-}
-
-static void omap_stop_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(100);
-
- dev_dbg(omap->dev, "stopping TI EHCI USB Controller\n");
-
- /* Reset OMAP modules for insmod/rmmod to work */
- ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG,
- OMAP_UHH_SYSCONFIG_SOFTRESET);
- while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & (1 << 0))) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(omap->dev, "operation timed out\n");
- }
-
- while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & (1 << 1))) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(omap->dev, "operation timed out\n");
- }
-
- while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & (1 << 2))) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(omap->dev, "operation timed out\n");
- }
-
- ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1));
-
- while (!(ehci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
- & (1 << 0))) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(omap->dev, "operation timed out\n");
- }
-
- if (omap->usbtll_fck != NULL) {
- clk_disable(omap->usbtll_fck);
- clk_put(omap->usbtll_fck);
- omap->usbtll_fck = NULL;
- }
-
- if (omap->usbhost_ick != NULL) {
- clk_disable(omap->usbhost_ick);
- clk_put(omap->usbhost_ick);
- omap->usbhost_ick = NULL;
- }
-
- if (omap->usbhost1_48m_fck != NULL) {
- clk_disable(omap->usbhost1_48m_fck);
- clk_put(omap->usbhost1_48m_fck);
- omap->usbhost1_48m_fck = NULL;
- }
-
- if (omap->usbhost2_120m_fck != NULL) {
- clk_disable(omap->usbhost2_120m_fck);
- clk_put(omap->usbhost2_120m_fck);
- omap->usbhost2_120m_fck = NULL;
- }
-
- if (omap->usbtll_ick != NULL) {
- clk_disable(omap->usbtll_ick);
- clk_put(omap->usbtll_ick);
- omap->usbtll_ick = NULL;
- }
-
- if (omap->phy_reset) {
- if (gpio_is_valid(omap->reset_gpio_port[0]))
- gpio_free(omap->reset_gpio_port[0]);
-
- if (gpio_is_valid(omap->reset_gpio_port[1]))
- gpio_free(omap->reset_gpio_port[1]);
- }
-
- dev_dbg(omap->dev, "Clock to USB host has been disabled\n");
-}
-
-/*-------------------------------------------------------------------------*/
-
-static const struct hc_driver ehci_omap_hc_driver;
-
-/* configure so an HC device and id are always provided */
-/* always called with process context; sleeping is OK */
-
-/**
- * ehci_hcd_omap_probe - initialize TI-based HCDs
- *
- * Allocates basic resources for this USB host controller, and
- * then invokes the start() method for the HCD associated with it
- * through the hotplug entry's driver_data.
- */
-static int ehci_hcd_omap_probe(struct platform_device *pdev)
-{
- struct ehci_hcd_omap_platform_data *pdata = pdev->dev.platform_data;
- struct ehci_hcd_omap *omap;
- struct resource *res;
- struct usb_hcd *hcd;
-
- int irq = platform_get_irq(pdev, 0);
- int ret = -ENODEV;
- int i;
- char supply[7];
-
- if (!pdata) {
- dev_dbg(&pdev->dev, "missing platform_data\n");
- goto err_pdata;
- }
-
- if (usb_disabled())
- goto err_disabled;
-
- omap = kzalloc(sizeof(*omap), GFP_KERNEL);
- if (!omap) {
- ret = -ENOMEM;
- goto err_disabled;
- }
-
- hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev,
- dev_name(&pdev->dev));
- if (!hcd) {
- dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret);
- ret = -ENOMEM;
- goto err_create_hcd;
- }
-
- platform_set_drvdata(pdev, omap);
- omap->dev = &pdev->dev;
- omap->phy_reset = pdata->phy_reset;
- omap->reset_gpio_port[0] = pdata->reset_gpio_port[0];
- omap->reset_gpio_port[1] = pdata->reset_gpio_port[1];
- omap->reset_gpio_port[2] = pdata->reset_gpio_port[2];
- omap->port_mode[0] = pdata->port_mode[0];
- omap->port_mode[1] = pdata->port_mode[1];
- omap->port_mode[2] = pdata->port_mode[2];
- omap->ehci = hcd_to_ehci(hcd);
- omap->ehci->sbrn = 0x20;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- dev_err(&pdev->dev, "EHCI ioremap failed\n");
- ret = -ENOMEM;
- goto err_ioremap;
- }
-
- /* we know this is the memory we want, no need to ioremap again */
- omap->ehci->caps = hcd->regs;
- omap->ehci_base = hcd->regs;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- omap->uhh_base = ioremap(res->start, resource_size(res));
- if (!omap->uhh_base) {
- dev_err(&pdev->dev, "UHH ioremap failed\n");
- ret = -ENOMEM;
- goto err_uhh_ioremap;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- omap->tll_base = ioremap(res->start, resource_size(res));
- if (!omap->tll_base) {
- dev_err(&pdev->dev, "TLL ioremap failed\n");
- ret = -ENOMEM;
- goto err_tll_ioremap;
- }
-
- /* get ehci regulator and enable */
- for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
- if (omap->port_mode[i] != EHCI_HCD_OMAP_MODE_PHY) {
- omap->regulator[i] = NULL;
- continue;
- }
- snprintf(supply, sizeof(supply), "hsusb%d", i);
- omap->regulator[i] = regulator_get(omap->dev, supply);
- if (IS_ERR(omap->regulator[i])) {
- omap->regulator[i] = NULL;
- dev_dbg(&pdev->dev,
- "failed to get ehci port%d regulator\n", i);
- } else {
- regulator_enable(omap->regulator[i]);
- }
- }
-
- ret = omap_start_ehc(omap, hcd);
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret) {
- dev_dbg(&pdev->dev, "failed to start ehci\n");
- goto err_start;
+ dev_err(dev, "failed to add hcd with err %d\n", ret);
+ goto err_pm_runtime;
}
+ device_wakeup_enable(hcd->self.controller);
- omap->ehci->regs = hcd->regs
- + HC_LENGTH(readl(&omap->ehci->caps->hc_capbase));
-
- dbg_hcs_params(omap->ehci, "reset");
- dbg_hcc_params(omap->ehci, "reset");
-
- /* cache this readonly data; minimize chip reads */
- omap->ehci->hcs_params = readl(&omap->ehci->caps->hcs_params);
+ /*
+ * Bring PHYs out of reset for non PHY modes.
+ * Even though HSIC mode is a PHY-less mode, the reset
+ * line exists between the chips and can be modelled
+ * as a PHY device for reset control.
+ */
+ for (i = 0; i < omap->nports; i++) {
+ if (!omap->phy[i] ||
+ pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY)
+ continue;
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
- if (ret) {
- dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
- goto err_add_hcd;
+ usb_phy_init(omap->phy[i]);
+ /* bring PHY out of suspend */
+ usb_phy_set_suspend(omap->phy[i], 0);
}
- /* root ports should always stay powered */
- ehci_port_power(omap->ehci, 1);
-
return 0;
-err_add_hcd:
- omap_stop_ehc(omap, hcd);
+err_pm_runtime:
+ pm_runtime_put_sync(dev);
-err_start:
- for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
- if (omap->regulator[i]) {
- regulator_disable(omap->regulator[i]);
- regulator_put(omap->regulator[i]);
- }
+err_phy:
+ for (i = 0; i < omap->nports; i++) {
+ if (omap->phy[i])
+ usb_phy_shutdown(omap->phy[i]);
}
- iounmap(omap->tll_base);
-
-err_tll_ioremap:
- iounmap(omap->uhh_base);
-
-err_uhh_ioremap:
- iounmap(hcd->regs);
-err_ioremap:
usb_put_hcd(hcd);
-err_create_hcd:
- kfree(omap);
-err_disabled:
-err_pdata:
return ret;
}
-/* may be called without controller electrically present */
-/* may be called with controller, bus, and devices active */
/**
* ehci_hcd_omap_remove - shutdown processing for EHCI HCDs
@@ -755,93 +260,68 @@ err_pdata:
*/
static int ehci_hcd_omap_remove(struct platform_device *pdev)
{
- struct ehci_hcd_omap *omap = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = ehci_to_hcd(omap->ehci);
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct omap_hcd *omap = (struct omap_hcd *)hcd_to_ehci(hcd)->priv;
int i;
usb_remove_hcd(hcd);
- omap_stop_ehc(omap, hcd);
- iounmap(hcd->regs);
- for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
- if (omap->regulator[i]) {
- regulator_disable(omap->regulator[i]);
- regulator_put(omap->regulator[i]);
- }
+
+ for (i = 0; i < omap->nports; i++) {
+ if (omap->phy[i])
+ usb_phy_shutdown(omap->phy[i]);
}
- iounmap(omap->tll_base);
- iounmap(omap->uhh_base);
+
usb_put_hcd(hcd);
- kfree(omap);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return 0;
}
-static void ehci_hcd_omap_shutdown(struct platform_device *pdev)
-{
- struct ehci_hcd_omap *omap = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = ehci_to_hcd(omap->ehci);
+static const struct of_device_id omap_ehci_dt_ids[] = {
+ { .compatible = "ti,ehci-omap" },
+ { }
+};
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
-}
+MODULE_DEVICE_TABLE(of, omap_ehci_dt_ids);
static struct platform_driver ehci_hcd_omap_driver = {
.probe = ehci_hcd_omap_probe,
.remove = ehci_hcd_omap_remove,
- .shutdown = ehci_hcd_omap_shutdown,
+ .shutdown = usb_hcd_platform_shutdown,
/*.suspend = ehci_hcd_omap_suspend, */
/*.resume = ehci_hcd_omap_resume, */
.driver = {
- .name = "ehci-omap",
+ .name = hcd_name,
+ .of_match_table = omap_ehci_dt_ids,
}
};
/*-------------------------------------------------------------------------*/
-static const struct hc_driver ehci_omap_hc_driver = {
- .description = hcd_name,
- .product_desc = "OMAP-EHCI Host Controller",
- .hcd_priv_size = sizeof(struct ehci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
-
- /*
- * basic lifecycle operations
- */
- .reset = ehci_init,
- .start = ehci_run,
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
- .endpoint_reset = ehci_endpoint_reset,
+static int __init ehci_omap_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
- /*
- * scheduling support
- */
- .get_frame_number = ehci_get_frame,
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
- /*
- * root hub support
- */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
+ ehci_init_driver(&ehci_omap_hc_driver, &ehci_omap_overrides);
+ return platform_driver_register(&ehci_hcd_omap_driver);
+}
+module_init(ehci_omap_init);
- .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
-};
+static void __exit ehci_omap_cleanup(void)
+{
+ platform_driver_unregister(&ehci_hcd_omap_driver);
+}
+module_exit(ehci_omap_cleanup);
-MODULE_ALIAS("platform:omap-ehci");
+MODULE_ALIAS("platform:ehci-omap");
MODULE_AUTHOR("Texas Instruments, Inc.");
MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 0f87dc72820..22e15cab8ea 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -12,7 +12,18 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mbus.h>
-#include <plat/ehci-orion.h>
+#include <linux/clk.h>
+#include <linux/platform_data/usb-ehci-orion.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+
+#include "ehci.h"
#define rdl(off) __raw_readl(hcd->regs + (off))
#define wrl(off, val) __raw_writel((val), hcd->regs + (off))
@@ -30,6 +41,19 @@
#define USB_PHY_IVREF_CTRL 0x440
#define USB_PHY_TST_GRP_CTRL 0x450
+#define DRIVER_DESC "EHCI orion driver"
+
+#define hcd_to_orion_priv(h) ((struct orion_ehci_hcd *)hcd_to_ehci(h)->priv)
+
+struct orion_ehci_hcd {
+ struct clk *clk;
+ struct phy *phy;
+};
+
+static const char hcd_name[] = "ehci-orion";
+
+static struct hc_driver __read_mostly ehci_orion_hc_driver;
+
/*
* Implement Orion USB controller specification guidelines
*/
@@ -100,78 +124,9 @@ static void orion_usb_phy_v1_setup(struct usb_hcd *hcd)
wrl(USB_MODE, 0x13);
}
-static int ehci_orion_setup(struct usb_hcd *hcd)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- int retval;
-
- ehci_reset(ehci);
- retval = ehci_halt(ehci);
- if (retval)
- return retval;
-
- /*
- * data structure init
- */
- retval = ehci_init(hcd);
- if (retval)
- return retval;
-
- hcd->has_tt = 1;
-
- ehci_port_power(ehci, 0);
-
- return retval;
-}
-
-static const struct hc_driver ehci_orion_hc_driver = {
- .description = hcd_name,
- .product_desc = "Marvell Orion EHCI",
- .hcd_priv_size = sizeof(struct ehci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
-
- /*
- * basic lifecycle operations
- */
- .reset = ehci_orion_setup,
- .start = ehci_run,
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
- .endpoint_reset = ehci_endpoint_reset,
-
- /*
- * scheduling support
- */
- .get_frame_number = ehci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
- .relinquish_port = ehci_relinquish_port,
- .port_handed_over = ehci_port_handed_over,
-
- .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
-};
-
-static void __init
+static void
ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
- struct mbus_dram_target_info *dram)
+ const struct mbus_dram_target_info *dram)
{
int i;
@@ -181,7 +136,7 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
}
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
wrl(USB_WINDOW_CTRL(i), ((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
@@ -190,14 +145,21 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
}
}
-static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
+static const struct ehci_driver_overrides orion_overrides __initconst = {
+ .extra_priv_size = sizeof(struct orion_ehci_hcd),
+};
+
+static int ehci_orion_drv_probe(struct platform_device *pdev)
{
- struct orion_ehci_data *pd = pdev->dev.platform_data;
+ struct orion_ehci_data *pd = dev_get_platdata(&pdev->dev);
+ const struct mbus_dram_target_info *dram;
struct resource *res;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
void __iomem *regs;
int irq, err;
+ enum orion_ehci_phy_ver phy_version;
+ struct orion_ehci_hcd *priv;
if (usb_disabled())
return -ENODEV;
@@ -210,7 +172,7 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
"Found HC with no IRQ. Check %s setup!\n",
dev_name(&pdev->dev));
err = -ENODEV;
- goto err1;
+ goto err;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -219,28 +181,29 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
"Found HC with no register addr. Check %s setup!\n",
dev_name(&pdev->dev));
err = -ENODEV;
- goto err1;
+ goto err;
}
- if (!request_mem_region(res->start, resource_size(res),
- ehci_orion_hc_driver.description)) {
- dev_dbg(&pdev->dev, "controller already in use\n");
- err = -EBUSY;
- goto err1;
- }
+ /*
+ * Right now device-tree probed devices don't get dma_mask
+ * set. Since shared usb code relies on it, set it here for
+ * now. Once we have dma capability bindings this can go away.
+ */
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ goto err;
- regs = ioremap(res->start, resource_size(res));
- if (regs == NULL) {
- dev_dbg(&pdev->dev, "error mapping memory\n");
- err = -EFAULT;
- goto err2;
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs)) {
+ err = PTR_ERR(regs);
+ goto err;
}
hcd = usb_create_hcd(&ehci_orion_hc_driver,
&pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
err = -ENOMEM;
- goto err3;
+ goto err;
}
hcd->rsrc_start = res->start;
@@ -249,22 +212,47 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs + 0x100;
- ehci->regs = hcd->regs + 0x100 +
- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
- ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
hcd->has_tt = 1;
- ehci->sbrn = 0x20;
+
+ priv = hcd_to_orion_priv(hcd);
+ /*
+ * Not all platforms can gate the clock, so it is not an error if
+ * the clock does not exists.
+ */
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(priv->clk))
+ clk_prepare_enable(priv->clk);
+
+ priv->phy = devm_phy_optional_get(&pdev->dev, "usb");
+ if (IS_ERR(priv->phy)) {
+ err = PTR_ERR(priv->phy);
+ goto err_phy_get;
+ } else {
+ err = phy_init(priv->phy);
+ if (err)
+ goto err_phy_init;
+
+ err = phy_power_on(priv->phy);
+ if (err)
+ goto err_phy_power_on;
+ }
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- if (pd != NULL && pd->dram != NULL)
- ehci_orion_conf_mbus_windows(hcd, pd->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ ehci_orion_conf_mbus_windows(hcd, dram);
/*
* setup Orion USB controller.
*/
- switch (pd->phy_version) {
+ if (pdev->dev.of_node)
+ phy_version = EHCI_PHY_NA;
+ else
+ phy_version = pd->phy_version;
+
+ switch (phy_version) {
case EHCI_PHY_NA: /* dont change USB phy settings */
break;
case EHCI_PHY_ORION:
@@ -273,45 +261,90 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
case EHCI_PHY_DD:
case EHCI_PHY_KW:
default:
- printk(KERN_WARNING "Orion ehci -USB phy version isn't supported.\n");
+ dev_warn(&pdev->dev, "USB phy version isn't supported.\n");
}
- err = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_DISABLED);
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
- goto err4;
+ goto err_add_hcd;
+ device_wakeup_enable(hcd->self.controller);
return 0;
-err4:
+err_add_hcd:
+ if (!IS_ERR(priv->phy))
+ phy_power_off(priv->phy);
+err_phy_power_on:
+ if (!IS_ERR(priv->phy))
+ phy_exit(priv->phy);
+err_phy_init:
+err_phy_get:
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
usb_put_hcd(hcd);
-err3:
- iounmap(regs);
-err2:
- release_mem_region(res->start, resource_size(res));
-err1:
+err:
dev_err(&pdev->dev, "init %s fail, %d\n",
dev_name(&pdev->dev), err);
return err;
}
-static int __exit ehci_orion_drv_remove(struct platform_device *pdev)
+static int ehci_orion_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct orion_ehci_hcd *priv = hcd_to_orion_priv(hcd);
usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+
+ if (!IS_ERR(priv->phy)) {
+ phy_power_off(priv->phy);
+ phy_exit(priv->phy);
+ }
+
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
usb_put_hcd(hcd);
return 0;
}
-MODULE_ALIAS("platform:orion-ehci");
+static const struct of_device_id ehci_orion_dt_ids[] = {
+ { .compatible = "marvell,orion-ehci", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ehci_orion_dt_ids);
static struct platform_driver ehci_orion_driver = {
.probe = ehci_orion_drv_probe,
- .remove = __exit_p(ehci_orion_drv_remove),
+ .remove = ehci_orion_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
- .driver.name = "orion-ehci",
+ .driver = {
+ .name = "orion-ehci",
+ .owner = THIS_MODULE,
+ .of_match_table = ehci_orion_dt_ids,
+ },
};
+
+static int __init ehci_orion_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ehci_init_driver(&ehci_orion_hc_driver, &orion_overrides);
+ return platform_driver_register(&ehci_orion_driver);
+}
+module_init(ehci_orion_init);
+
+static void __exit ehci_orion_cleanup(void)
+{
+ platform_driver_unregister(&ehci_orion_driver);
+}
+module_exit(ehci_orion_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS("platform:orion-ehci");
+MODULE_AUTHOR("Tzachi Perelstein");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index a1e8d273103..3e86bf4371b 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -18,9 +18,21 @@
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#ifndef CONFIG_PCI
-#error "This file is PCI bus glue. CONFIG_PCI must be defined."
-#endif
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+#include "pci-quirks.h"
+
+#define DRIVER_DESC "EHCI PCI platform driver"
+
+static const char hcd_name[] = "ehci-pci";
+
+/* defined here to avoid adding to pci_ids.h for single instance use */
+#define PCI_DEVICE_ID_INTEL_CE4100_USB 0x2e70
/*-------------------------------------------------------------------------*/
@@ -46,11 +58,20 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- struct pci_dev *p_smbus;
- u8 rev;
u32 temp;
int retval;
+ ehci->caps = hcd->regs;
+
+ /*
+ * ehci_init() causes memory for DMA transfers to be
+ * allocated. Thus, any vendor-specific workarounds based on
+ * limiting the type of memory used for DMA transfers must
+ * happen before ehci_setup() is called.
+ *
+ * Most other workarounds can be done either before or after
+ * init and reset; they are located here too.
+ */
switch (pdev->vendor) {
case PCI_VENDOR_ID_TOSHIBA_2:
/* celleb's companion chip */
@@ -63,20 +84,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
#endif
}
break;
- }
-
- ehci->caps = hcd->regs;
- ehci->regs = hcd->regs +
- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
-
- dbg_hcs_params(ehci, "reset");
- dbg_hcc_params(ehci, "reset");
-
- /* ehci_init() causes memory for DMA transfers to be
- * allocated. Thus, any vendor-specific workarounds based on
- * limiting the type of memory used for DMA transfers must
- * happen before ehci_init() is called. */
- switch (pdev->vendor) {
case PCI_VENDOR_ID_NVIDIA:
/* NVidia reports that certain chips don't handle
* QH, ITD, or SITD addresses above 2GB. (But TD,
@@ -92,62 +99,45 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
ehci_warn(ehci, "can't enable NVidia "
"workaround for >2GB RAM\n");
break;
- }
- break;
- }
-
- /* cache this readonly data; minimize chip reads */
- ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
-
- retval = ehci_halt(ehci);
- if (retval)
- return retval;
-
- /* data structure init */
- retval = ehci_init(hcd);
- if (retval)
- return retval;
- switch (pdev->vendor) {
- case PCI_VENDOR_ID_NEC:
- ehci->need_io_watchdog = 0;
+ /* Some NForce2 chips have problems with selective suspend;
+ * fixed in newer silicon.
+ */
+ case 0x0068:
+ if (pdev->revision < 0xa4)
+ ehci->no_selective_suspend = 1;
+ break;
+ }
break;
case PCI_VENDOR_ID_INTEL:
- ehci->need_io_watchdog = 0;
- ehci->fs_i_thresh = 1;
- if (pdev->device == 0x27cc) {
- ehci->broken_periodic = 1;
- ehci_info(ehci, "using broken periodic workaround\n");
- }
- if (pdev->device == 0x0806 || pdev->device == 0x0811
- || pdev->device == 0x0829) {
- ehci_info(ehci, "disable lpm for langwell/penwell\n");
- ehci->has_lpm = 0;
- }
+ if (pdev->device == PCI_DEVICE_ID_INTEL_CE4100_USB)
+ hcd->has_tt = 1;
break;
case PCI_VENDOR_ID_TDI:
- if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
+ if (pdev->device == PCI_DEVICE_ID_TDI_EHCI)
hcd->has_tt = 1;
- tdi_reset(ehci);
- }
break;
case PCI_VENDOR_ID_AMD:
+ /* AMD PLL quirk */
+ if (usb_amd_find_chipset_info())
+ ehci->amd_pll_fix = 1;
/* AMD8111 EHCI doesn't work, according to AMD errata */
if (pdev->device == 0x7463) {
ehci_info(ehci, "ignoring AMD8111 (errata)\n");
retval = -EIO;
goto done;
}
- break;
- case PCI_VENDOR_ID_NVIDIA:
- switch (pdev->device) {
- /* Some NForce2 chips have problems with selective suspend;
- * fixed in newer silicon.
+
+ /*
+ * EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may
+ * read/write memory space which does not belong to it when
+ * there is NULL pointer with T-bit set to 1 in the frame list
+ * table. To avoid the issue, the frame list link pointer
+ * should always contain a valid pointer to a inactive qh.
*/
- case 0x0068:
- if (pdev->revision < 0xa4)
- ehci->no_selective_suspend = 1;
- break;
+ if (pdev->device == 0x7808) {
+ ehci->use_dummy_qh = 1;
+ ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI dummy qh workaround\n");
}
break;
case PCI_VENDOR_ID_VIA:
@@ -165,49 +155,86 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
}
break;
case PCI_VENDOR_ID_ATI:
+ /* AMD PLL quirk */
+ if (usb_amd_find_chipset_info())
+ ehci->amd_pll_fix = 1;
+
+ /*
+ * EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may
+ * read/write memory space which does not belong to it when
+ * there is NULL pointer with T-bit set to 1 in the frame list
+ * table. To avoid the issue, the frame list link pointer
+ * should always contain a valid pointer to a inactive qh.
+ */
+ if (pdev->device == 0x4396) {
+ ehci->use_dummy_qh = 1;
+ ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI dummy qh workaround\n");
+ }
/* SB600 and old version of SB700 have a bug in EHCI controller,
* which causes usb devices lose response in some cases.
*/
- if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) {
- p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
- PCI_DEVICE_ID_ATI_SBX00_SMBUS,
- NULL);
- if (!p_smbus)
- break;
- rev = p_smbus->revision;
- if ((pdev->device == 0x4386) || (rev == 0x3a)
- || (rev == 0x3b)) {
- u8 tmp;
- ehci_info(ehci, "applying AMD SB600/SB700 USB "
- "freeze workaround\n");
- pci_read_config_byte(pdev, 0x53, &tmp);
- pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
- }
- pci_dev_put(p_smbus);
+ if ((pdev->device == 0x4386 || pdev->device == 0x4396) &&
+ usb_amd_hang_symptom_quirk()) {
+ u8 tmp;
+ ehci_info(ehci, "applying AMD SB600/SB700 USB freeze workaround\n");
+ pci_read_config_byte(pdev, 0x53, &tmp);
+ pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
}
break;
+ case PCI_VENDOR_ID_NETMOS:
+ /* MosChip frame-index-register bug */
+ ehci_info(ehci, "applying MosChip frame-index workaround\n");
+ ehci->frame_index_bug = 1;
+ break;
}
/* optional debug port, normally in the first BAR */
- temp = pci_find_capability(pdev, 0x0a);
+ temp = pci_find_capability(pdev, PCI_CAP_ID_DBG);
if (temp) {
pci_read_config_dword(pdev, temp, &temp);
temp >>= 16;
- if ((temp & (3 << 13)) == (1 << 13)) {
+ if (((temp >> 13) & 7) == 1) {
+ u32 hcs_params = ehci_readl(ehci,
+ &ehci->caps->hcs_params);
+
temp &= 0x1fff;
- ehci->debug = ehci_to_hcd(ehci)->regs + temp;
+ ehci->debug = hcd->regs + temp;
temp = ehci_readl(ehci, &ehci->debug->control);
ehci_info(ehci, "debug port %d%s\n",
- HCS_DEBUG_PORT(ehci->hcs_params),
- (temp & DBGP_ENABLED)
- ? " IN USE"
- : "");
+ HCS_DEBUG_PORT(hcs_params),
+ (temp & DBGP_ENABLED) ? " IN USE" : "");
if (!(temp & DBGP_ENABLED))
ehci->debug = NULL;
}
}
- ehci_reset(ehci);
+ retval = ehci_setup(hcd);
+ if (retval)
+ return retval;
+
+ /* These workarounds need to be applied after ehci_setup() */
+ switch (pdev->vendor) {
+ case PCI_VENDOR_ID_NEC:
+ ehci->need_io_watchdog = 0;
+ break;
+ case PCI_VENDOR_ID_INTEL:
+ ehci->need_io_watchdog = 0;
+ break;
+ case PCI_VENDOR_ID_NVIDIA:
+ switch (pdev->device) {
+ /* MCP89 chips on the MacBookAir3,1 give EPROTO when
+ * fetching device descriptors unless LPM is disabled.
+ * There are also intermittent problems enumerating
+ * devices with PPCD enabled.
+ */
+ case 0x0d9d:
+ ehci_info(ehci, "disable ppcd for nvidia mcp89\n");
+ ehci->has_ppcd = 0;
+ ehci->command &= ~CMD_PPCEE;
+ break;
+ }
+ break;
+ }
/* at least the Genesys GL880S needs fixup here */
temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
@@ -232,7 +259,11 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
}
/* Serial Bus Release Number is at PCI 0x60 offset */
- pci_read_config_byte(pdev, 0x60, &ehci->sbrn);
+ if (pdev->vendor == PCI_VENDOR_ID_STMICRO
+ && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST)
+ ; /* ConneXT has no sbrn register */
+ else
+ pci_read_config_byte(pdev, 0x60, &ehci->sbrn);
/* Keep this around for a while just in case some EHCI
* implementation uses legacy PCI PM support. This test
@@ -249,22 +280,11 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
}
}
-#ifdef CONFIG_USB_SUSPEND
- /* REVISIT: the controller works fine for wakeup iff the root hub
- * itself is "globally" suspended, but usbcore currently doesn't
- * understand such things.
- *
- * System suspend currently expects to be able to suspend the entire
- * device tree, device-at-a-time. If we failed selective suspend
- * reports, system suspend would fail; so the root hub code must claim
- * success. That's lying to usbcore, and it matters for runtime
- * PM scenarios with selective suspend and remote wakeup...
- */
+#ifdef CONFIG_PM_RUNTIME
if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev))
ehci_warn(ehci, "selective suspend/wakeup unavailable\n");
#endif
- ehci_port_power(ehci, 1);
retval = ehci_pci_reinit(ehci, pdev);
done:
return retval;
@@ -283,158 +303,26 @@ done:
* Also they depend on separate root hub suspend/resume.
*/
-static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- unsigned long flags;
- int rc = 0;
-
- if (time_before(jiffies, ehci->next_statechange))
- msleep(10);
-
- /* Root hub was already suspended. Disable irq emission and
- * mark HW unaccessible. The PM and USB cores make sure that
- * the root hub is either suspended or stopped.
- */
- spin_lock_irqsave (&ehci->lock, flags);
- ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
- ehci_writel(ehci, 0, &ehci->regs->intr_enable);
- (void)ehci_readl(ehci, &ehci->regs->intr_enable);
-
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- spin_unlock_irqrestore (&ehci->lock, flags);
-
- // could save FLADJ in case of Vaux power loss
- // ... we'd only use it to handle clock skew
-
- return rc;
-}
-
static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- // maybe restore FLADJ
-
- if (time_before(jiffies, ehci->next_statechange))
- msleep(100);
-
- /* Mark hardware accessible again as we are out of D3 state by now */
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-
- /* If CF is still set and we aren't resuming from hibernation
- * then we maintained PCI Vaux power.
- * Just undo the effect of ehci_pci_suspend().
- */
- if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF &&
- !hibernated) {
- int mask = INTR_MASK;
-
- ehci_prepare_ports_for_controller_resume(ehci);
- if (!hcd->self.root_hub->do_remote_wakeup)
- mask &= ~STS_PCD;
- ehci_writel(ehci, mask, &ehci->regs->intr_enable);
- ehci_readl(ehci, &ehci->regs->intr_enable);
- return 0;
- }
-
- usb_root_hub_lost_power(hcd->self.root_hub);
-
- /* Else reset, to cope with power loss or flush-to-storage
- * style "resume" having let BIOS kick in during reboot.
- */
- (void) ehci_halt(ehci);
- (void) ehci_reset(ehci);
- (void) ehci_pci_reinit(ehci, pdev);
-
- /* emptying the schedule aborts any urbs */
- spin_lock_irq(&ehci->lock);
- if (ehci->reclaim)
- end_unlink_async(ehci);
- ehci_work(ehci);
- spin_unlock_irq(&ehci->lock);
-
- ehci_writel(ehci, ehci->command, &ehci->regs->command);
- ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
- ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
-
- /* here we "know" root ports should always stay powered */
- ehci_port_power(ehci, 1);
-
- hcd->state = HC_STATE_SUSPENDED;
+ if (ehci_resume(hcd, hibernated) != 0)
+ (void) ehci_pci_reinit(ehci, pdev);
return 0;
}
-#endif
-static int ehci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- int rc = 0;
-
- if (!udev->parent) /* udev is root hub itself, impossible */
- rc = -1;
- /* we only support lpm device connected to root hub yet */
- if (ehci->has_lpm && !udev->parent->parent) {
- rc = ehci_lpm_set_da(ehci, udev->devnum, udev->portnum);
- if (!rc)
- rc = ehci_lpm_check(ehci, udev->portnum);
- }
- return rc;
-}
+#else
-static const struct hc_driver ehci_pci_hc_driver = {
- .description = hcd_name,
- .product_desc = "EHCI Host Controller",
- .hcd_priv_size = sizeof(struct ehci_hcd),
+#define ehci_suspend NULL
+#define ehci_pci_resume NULL
+#endif /* CONFIG_PM */
- /*
- * generic hardware linkage
- */
- .irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+static struct hc_driver __read_mostly ehci_pci_hc_driver;
- /*
- * basic lifecycle operations
- */
+static const struct ehci_driver_overrides pci_overrides __initconst = {
.reset = ehci_pci_setup,
- .start = ehci_run,
-#ifdef CONFIG_PM
- .pci_suspend = ehci_pci_suspend,
- .pci_resume = ehci_pci_resume,
-#endif
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
- .endpoint_reset = ehci_endpoint_reset,
-
- /*
- * scheduling support
- */
- .get_frame_number = ehci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
- .relinquish_port = ehci_relinquish_port,
- .port_handed_over = ehci_port_handed_over,
-
- /*
- * call back when device connected and addressed
- */
- .update_device = ehci_update_device,
-
- .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
/*-------------------------------------------------------------------------*/
@@ -444,6 +332,9 @@ static const struct pci_device_id pci_ids [] = { {
/* handle any USB 2.0 EHCI controller */
PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0),
.driver_data = (unsigned long) &ehci_pci_hc_driver,
+ }, {
+ PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_HOST),
+ .driver_data = (unsigned long) &ehci_pci_hc_driver,
},
{ /* end: all zeroes */ }
};
@@ -458,9 +349,37 @@ static struct pci_driver ehci_pci_driver = {
.remove = usb_hcd_pci_remove,
.shutdown = usb_hcd_pci_shutdown,
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
.driver = {
.pm = &usb_hcd_pci_pm_ops
},
#endif
};
+
+static int __init ehci_pci_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ehci_init_driver(&ehci_pci_hc_driver, &pci_overrides);
+
+ /* Entries for the PCI suspend/resume callbacks are special */
+ ehci_pci_hc_driver.pci_suspend = ehci_suspend;
+ ehci_pci_hc_driver.pci_resume = ehci_pci_resume;
+
+ return pci_register_driver(&ehci_pci_driver);
+}
+module_init(ehci_pci_init);
+
+static void __exit ehci_pci_cleanup(void)
+{
+ pci_unregister_driver(&ehci_pci_driver);
+}
+module_exit(ehci_pci_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("David Brownell");
+MODULE_AUTHOR("Alan Stern");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
new file mode 100644
index 00000000000..2f5b9ce3e04
--- /dev/null
+++ b/drivers/usb/host/ehci-platform.c
@@ -0,0 +1,410 @@
+/*
+ * Generic platform ehci driver
+ *
+ * Copyright 2007 Steven Brown <sbrown@cortland.com>
+ * Copyright 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright 2014 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Derived from the ohci-ssb driver
+ * Copyright 2007 Michael Buesch <m@bues.ch>
+ *
+ * Derived from the EHCI-PCI driver
+ * Copyright (c) 2000-2004 by David Brownell
+ *
+ * Derived from the ohci-pci driver
+ * Copyright 1999 Roman Weissgaerber
+ * Copyright 2000-2002 David Brownell
+ * Copyright 1999 Linus Torvalds
+ * Copyright 1999 Gregory P. Smith
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/hrtimer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/ehci_pdriver.h>
+
+#include "ehci.h"
+
+#define DRIVER_DESC "EHCI generic platform driver"
+#define EHCI_MAX_CLKS 3
+#define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv)
+
+struct ehci_platform_priv {
+ struct clk *clks[EHCI_MAX_CLKS];
+ struct reset_control *rst;
+ struct phy *phy;
+};
+
+static const char hcd_name[] = "ehci-platform";
+
+static int ehci_platform_reset(struct usb_hcd *hcd)
+{
+ struct platform_device *pdev = to_platform_device(hcd->self.controller);
+ struct usb_ehci_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+
+ hcd->has_tt = pdata->has_tt;
+ ehci->has_synopsys_hc_bug = pdata->has_synopsys_hc_bug;
+
+ if (pdata->pre_setup) {
+ retval = pdata->pre_setup(hcd);
+ if (retval < 0)
+ return retval;
+ }
+
+ ehci->caps = hcd->regs + pdata->caps_offset;
+ retval = ehci_setup(hcd);
+ if (retval)
+ return retval;
+
+ if (pdata->no_io_watchdog)
+ ehci->need_io_watchdog = 0;
+ return 0;
+}
+
+static int ehci_platform_power_on(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+ int clk, ret;
+
+ for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++) {
+ ret = clk_prepare_enable(priv->clks[clk]);
+ if (ret)
+ goto err_disable_clks;
+ }
+
+ if (priv->phy) {
+ ret = phy_init(priv->phy);
+ if (ret)
+ goto err_disable_clks;
+
+ ret = phy_power_on(priv->phy);
+ if (ret)
+ goto err_exit_phy;
+ }
+
+ return 0;
+
+err_exit_phy:
+ phy_exit(priv->phy);
+err_disable_clks:
+ while (--clk >= 0)
+ clk_disable_unprepare(priv->clks[clk]);
+
+ return ret;
+}
+
+static void ehci_platform_power_off(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+ int clk;
+
+ if (priv->phy) {
+ phy_power_off(priv->phy);
+ phy_exit(priv->phy);
+ }
+
+ for (clk = EHCI_MAX_CLKS - 1; clk >= 0; clk--)
+ if (priv->clks[clk])
+ clk_disable_unprepare(priv->clks[clk]);
+}
+
+static struct hc_driver __read_mostly ehci_platform_hc_driver;
+
+static const struct ehci_driver_overrides platform_overrides __initconst = {
+ .reset = ehci_platform_reset,
+ .extra_priv_size = sizeof(struct ehci_platform_priv),
+};
+
+static struct usb_ehci_pdata ehci_platform_defaults = {
+ .power_on = ehci_platform_power_on,
+ .power_suspend = ehci_platform_power_off,
+ .power_off = ehci_platform_power_off,
+};
+
+static int ehci_platform_probe(struct platform_device *dev)
+{
+ struct usb_hcd *hcd;
+ struct resource *res_mem;
+ struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
+ struct ehci_platform_priv *priv;
+ struct ehci_hcd *ehci;
+ int err, irq, clk = 0;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ /*
+ * Use reasonable defaults so platforms don't have to provide these
+ * with DT probing on ARM.
+ */
+ if (!pdata)
+ pdata = &ehci_platform_defaults;
+
+ err = dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ dev_err(&dev->dev, "no irq provided");
+ return irq;
+ }
+ res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res_mem) {
+ dev_err(&dev->dev, "no memory resource provided");
+ return -ENXIO;
+ }
+
+ hcd = usb_create_hcd(&ehci_platform_hc_driver, &dev->dev,
+ dev_name(&dev->dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ platform_set_drvdata(dev, hcd);
+ dev->dev.platform_data = pdata;
+ priv = hcd_to_ehci_priv(hcd);
+ ehci = hcd_to_ehci(hcd);
+
+ if (pdata == &ehci_platform_defaults && dev->dev.of_node) {
+ if (of_property_read_bool(dev->dev.of_node, "big-endian-regs"))
+ ehci->big_endian_mmio = 1;
+
+ if (of_property_read_bool(dev->dev.of_node, "big-endian-desc"))
+ ehci->big_endian_desc = 1;
+
+ if (of_property_read_bool(dev->dev.of_node, "big-endian"))
+ ehci->big_endian_mmio = ehci->big_endian_desc = 1;
+
+ priv->phy = devm_phy_get(&dev->dev, "usb");
+ if (IS_ERR(priv->phy)) {
+ err = PTR_ERR(priv->phy);
+ if (err == -EPROBE_DEFER)
+ goto err_put_hcd;
+ priv->phy = NULL;
+ }
+
+ for (clk = 0; clk < EHCI_MAX_CLKS; clk++) {
+ priv->clks[clk] = of_clk_get(dev->dev.of_node, clk);
+ if (IS_ERR(priv->clks[clk])) {
+ err = PTR_ERR(priv->clks[clk]);
+ if (err == -EPROBE_DEFER)
+ goto err_put_clks;
+ priv->clks[clk] = NULL;
+ break;
+ }
+ }
+ }
+
+ priv->rst = devm_reset_control_get_optional(&dev->dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ err = PTR_ERR(priv->rst);
+ if (err == -EPROBE_DEFER)
+ goto err_put_clks;
+ priv->rst = NULL;
+ } else {
+ err = reset_control_deassert(priv->rst);
+ if (err)
+ goto err_put_clks;
+ }
+
+ if (pdata->big_endian_desc)
+ ehci->big_endian_desc = 1;
+ if (pdata->big_endian_mmio)
+ ehci->big_endian_mmio = 1;
+
+#ifndef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
+ if (ehci->big_endian_mmio) {
+ dev_err(&dev->dev,
+ "Error: CONFIG_USB_EHCI_BIG_ENDIAN_MMIO not set\n");
+ err = -EINVAL;
+ goto err_reset;
+ }
+#endif
+#ifndef CONFIG_USB_EHCI_BIG_ENDIAN_DESC
+ if (ehci->big_endian_desc) {
+ dev_err(&dev->dev,
+ "Error: CONFIG_USB_EHCI_BIG_ENDIAN_DESC not set\n");
+ err = -EINVAL;
+ goto err_reset;
+ }
+#endif
+
+ if (pdata->power_on) {
+ err = pdata->power_on(dev);
+ if (err < 0)
+ goto err_reset;
+ }
+
+ hcd->rsrc_start = res_mem->start;
+ hcd->rsrc_len = resource_size(res_mem);
+
+ hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
+ if (IS_ERR(hcd->regs)) {
+ err = PTR_ERR(hcd->regs);
+ goto err_power;
+ }
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (err)
+ goto err_power;
+
+ device_wakeup_enable(hcd->self.controller);
+ platform_set_drvdata(dev, hcd);
+
+ return err;
+
+err_power:
+ if (pdata->power_off)
+ pdata->power_off(dev);
+err_reset:
+ if (priv->rst)
+ reset_control_assert(priv->rst);
+err_put_clks:
+ while (--clk >= 0)
+ clk_put(priv->clks[clk]);
+err_put_hcd:
+ if (pdata == &ehci_platform_defaults)
+ dev->dev.platform_data = NULL;
+
+ usb_put_hcd(hcd);
+
+ return err;
+}
+
+static int ehci_platform_remove(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
+ struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+ int clk;
+
+ usb_remove_hcd(hcd);
+
+ if (pdata->power_off)
+ pdata->power_off(dev);
+
+ if (priv->rst)
+ reset_control_assert(priv->rst);
+
+ for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++)
+ clk_put(priv->clks[clk]);
+
+ usb_put_hcd(hcd);
+
+ if (pdata == &ehci_platform_defaults)
+ dev->dev.platform_data = NULL;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int ehci_platform_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ bool do_wakeup = device_may_wakeup(dev);
+ int ret;
+
+ ret = ehci_suspend(hcd, do_wakeup);
+ if (ret)
+ return ret;
+
+ if (pdata->power_suspend)
+ pdata->power_suspend(pdev);
+
+ return ret;
+}
+
+static int ehci_platform_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+
+ if (pdata->power_on) {
+ int err = pdata->power_on(pdev);
+ if (err < 0)
+ return err;
+ }
+
+ ehci_resume(hcd, false);
+ return 0;
+}
+
+#else /* !CONFIG_PM */
+#define ehci_platform_suspend NULL
+#define ehci_platform_resume NULL
+#endif /* CONFIG_PM */
+
+static const struct of_device_id vt8500_ehci_ids[] = {
+ { .compatible = "via,vt8500-ehci", },
+ { .compatible = "wm,prizm-ehci", },
+ { .compatible = "generic-ehci", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, vt8500_ehci_ids);
+
+static const struct platform_device_id ehci_platform_table[] = {
+ { "ehci-platform", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, ehci_platform_table);
+
+static const struct dev_pm_ops ehci_platform_pm_ops = {
+ .suspend = ehci_platform_suspend,
+ .resume = ehci_platform_resume,
+};
+
+static struct platform_driver ehci_platform_driver = {
+ .id_table = ehci_platform_table,
+ .probe = ehci_platform_probe,
+ .remove = ehci_platform_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ehci-platform",
+ .pm = &ehci_platform_pm_ops,
+ .of_match_table = vt8500_ehci_ids,
+ }
+};
+
+static int __init ehci_platform_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ehci_init_driver(&ehci_platform_hc_driver, &platform_overrides);
+ return platform_driver_register(&ehci_platform_driver);
+}
+module_init(ehci_platform_init);
+
+static void __exit ehci_platform_cleanup(void)
+{
+ platform_driver_unregister(&ehci_platform_driver);
+}
+module_exit(ehci_platform_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Hauke Mehrtens");
+MODULE_AUTHOR("Alan Stern");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
new file mode 100644
index 00000000000..7d75465d97c
--- /dev/null
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -0,0 +1,330 @@
+/*
+ * PMC MSP EHCI (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 2006-2010 PMC-Sierra Inc
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+
+/* includes */
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/usb.h>
+#include <msp_usb.h>
+
+/* stream disable*/
+#define USB_CTRL_MODE_STREAM_DISABLE 0x10
+
+/* threshold */
+#define USB_CTRL_FIFO_THRESH 0x00300000
+
+/* register offset for usb_mode */
+#define USB_EHCI_REG_USB_MODE 0x68
+
+/* register offset for usb fifo */
+#define USB_EHCI_REG_USB_FIFO 0x24
+
+/* register offset for usb status */
+#define USB_EHCI_REG_USB_STATUS 0x44
+
+/* serial/parallel transceiver */
+#define USB_EHCI_REG_BIT_STAT_STS (1<<29)
+
+/* TWI USB0 host device pin */
+#define MSP_PIN_USB0_HOST_DEV 49
+
+/* TWI USB1 host device pin */
+#define MSP_PIN_USB1_HOST_DEV 50
+
+
+static void usb_hcd_tdi_set_mode(struct ehci_hcd *ehci)
+{
+ u8 *base;
+ u8 *statreg;
+ u8 *fiforeg;
+ u32 val;
+ struct ehci_regs *reg_base = ehci->regs;
+
+ /* get register base */
+ base = (u8 *)reg_base + USB_EHCI_REG_USB_MODE;
+ statreg = (u8 *)reg_base + USB_EHCI_REG_USB_STATUS;
+ fiforeg = (u8 *)reg_base + USB_EHCI_REG_USB_FIFO;
+
+ /* Disable controller mode stream */
+ val = ehci_readl(ehci, (u32 *)base);
+ ehci_writel(ehci, (val | USB_CTRL_MODE_STREAM_DISABLE),
+ (u32 *)base);
+
+ /* clear STS to select parallel transceiver interface */
+ val = ehci_readl(ehci, (u32 *)statreg);
+ val = val & ~USB_EHCI_REG_BIT_STAT_STS;
+ ehci_writel(ehci, val, (u32 *)statreg);
+
+ /* write to set the proper fifo threshold */
+ ehci_writel(ehci, USB_CTRL_FIFO_THRESH, (u32 *)fiforeg);
+
+ /* set TWI GPIO USB_HOST_DEV pin high */
+ gpio_direction_output(MSP_PIN_USB0_HOST_DEV, 1);
+}
+
+/* called during probe() after chip reset completes */
+static int ehci_msp_setup(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+
+ ehci->big_endian_mmio = 1;
+ ehci->big_endian_desc = 1;
+
+ ehci->caps = hcd->regs;
+ hcd->has_tt = 1;
+
+ retval = ehci_setup(hcd);
+ if (retval)
+ return retval;
+
+ usb_hcd_tdi_set_mode(ehci);
+
+ return retval;
+}
+
+
+/* configure so an HC device and id are always provided
+ * always called with process context; sleeping is OK
+ */
+
+static int usb_hcd_msp_map_regs(struct mspusb_device *dev)
+{
+ struct resource *res;
+ struct platform_device *pdev = &dev->dev;
+ u32 res_len;
+ int retval;
+
+ /* MAB register space */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res == NULL)
+ return -ENOMEM;
+ res_len = resource_size(res);
+ if (!request_mem_region(res->start, res_len, "mab regs"))
+ return -EBUSY;
+
+ dev->mab_regs = ioremap_nocache(res->start, res_len);
+ if (dev->mab_regs == NULL) {
+ retval = -ENOMEM;
+ goto err1;
+ }
+
+ /* MSP USB register space */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (res == NULL) {
+ retval = -ENOMEM;
+ goto err2;
+ }
+ res_len = resource_size(res);
+ if (!request_mem_region(res->start, res_len, "usbid regs")) {
+ retval = -EBUSY;
+ goto err2;
+ }
+ dev->usbid_regs = ioremap_nocache(res->start, res_len);
+ if (dev->usbid_regs == NULL) {
+ retval = -ENOMEM;
+ goto err3;
+ }
+
+ return 0;
+err3:
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ res_len = resource_size(res);
+ release_mem_region(res->start, res_len);
+err2:
+ iounmap(dev->mab_regs);
+err1:
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ res_len = resource_size(res);
+ release_mem_region(res->start, res_len);
+ dev_err(&pdev->dev, "Failed to map non-EHCI regs.\n");
+ return retval;
+}
+
+/**
+ * usb_hcd_msp_probe - initialize PMC MSP-based HCDs
+ * Context: !in_interrupt()
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ *
+ */
+int usb_hcd_msp_probe(const struct hc_driver *driver,
+ struct platform_device *dev)
+{
+ int retval;
+ struct usb_hcd *hcd;
+ struct resource *res;
+ struct ehci_hcd *ehci ;
+
+ hcd = usb_create_hcd(driver, &dev->dev, "pmcmsp");
+ if (!hcd)
+ return -ENOMEM;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ pr_debug("No IOMEM resource info for %s.\n", dev->name);
+ retval = -ENOMEM;
+ goto err1;
+ }
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, dev->name)) {
+ retval = -EBUSY;
+ goto err1;
+ }
+ hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ pr_debug("ioremap failed");
+ retval = -ENOMEM;
+ goto err2;
+ }
+
+ res = platform_get_resource(dev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&dev->dev, "No IRQ resource info for %s.\n", dev->name);
+ retval = -ENOMEM;
+ goto err3;
+ }
+
+ /* Map non-EHCI register spaces */
+ retval = usb_hcd_msp_map_regs(to_mspusb_device(dev));
+ if (retval != 0)
+ goto err3;
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->big_endian_mmio = 1;
+ ehci->big_endian_desc = 1;
+
+
+ retval = usb_add_hcd(hcd, res->start, IRQF_SHARED);
+ if (retval == 0) {
+ device_wakeup_enable(hcd->self.controller);
+ return 0;
+ }
+
+ usb_remove_hcd(hcd);
+err3:
+ iounmap(hcd->regs);
+err2:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err1:
+ usb_put_hcd(hcd);
+
+ return retval;
+}
+
+
+
+/**
+ * usb_hcd_msp_remove - shutdown processing for PMC MSP-based HCDs
+ * @dev: USB Host Controller being removed
+ * Context: !in_interrupt()
+ *
+ * Reverses the effect of usb_hcd_msp_probe(), first invoking
+ * the HCD's stop() method. It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ *
+ * may be called without controller electrically present
+ * may be called with controller, bus, and devices active
+ */
+void usb_hcd_msp_remove(struct usb_hcd *hcd, struct platform_device *dev)
+{
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+}
+
+static const struct hc_driver ehci_msp_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "PMC MSP EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ehci_msp_setup,
+ .shutdown = ehci_shutdown,
+ .start = ehci_run,
+ .stop = ehci_stop,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static int ehci_hcd_msp_drv_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pr_debug("In ehci_hcd_msp_drv_probe");
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ gpio_request(MSP_PIN_USB0_HOST_DEV, "USB0_HOST_DEV_GPIO");
+
+ ret = usb_hcd_msp_probe(&ehci_msp_hc_driver, pdev);
+
+ return ret;
+}
+
+static int ehci_hcd_msp_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_hcd_msp_remove(hcd, pdev);
+
+ /* free TWI GPIO USB_HOST_DEV pin */
+ gpio_free(MSP_PIN_USB0_HOST_DEV);
+
+ return 0;
+}
+
+MODULE_ALIAS("pmcmsp-ehci");
+
+static struct platform_driver ehci_hcd_msp_driver = {
+ .probe = ehci_hcd_msp_drv_probe,
+ .remove = ehci_hcd_msp_drv_remove,
+ .driver = {
+ .name = "pmcmsp-ehci",
+ .owner = THIS_MODULE,
+ },
+};
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index ba52be47302..547924796d2 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -12,29 +12,14 @@
* This file is licenced under the GPL.
*/
+#include <linux/err.h>
#include <linux/signal.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
-/* called during probe() after chip reset completes */
-static int ehci_ppc_of_setup(struct usb_hcd *hcd)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- int retval;
-
- retval = ehci_halt(ehci);
- if (retval)
- return retval;
-
- retval = ehci_init(hcd);
- if (retval)
- return retval;
-
- ehci->sbrn = 0x20;
- return ehci_reset(ehci);
-}
-
static const struct hc_driver ehci_ppc_of_hc_driver = {
.description = hcd_name,
@@ -45,12 +30,12 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
*/
- .reset = ehci_ppc_of_setup,
+ .reset = ehci_setup,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
@@ -89,7 +74,7 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
* Fix: Enable Break Memory Transfer (BMT) in INSNREG3
*/
#define PPC440EPX_EHCI0_INSREG_BMT (0x1 << 0)
-static int __devinit
+static int
ppc44x_enable_bmt(struct device_node *dn)
{
__iomem u32 *insreg_virt;
@@ -105,8 +90,7 @@ ppc44x_enable_bmt(struct device_node *dn)
}
-static int __devinit
-ehci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *match)
+static int ehci_hcd_ppc_of_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
struct usb_hcd *hcd;
@@ -131,25 +115,19 @@ ehci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *mat
return -ENOMEM;
hcd->rsrc_start = res.start;
- hcd->rsrc_len = res.end - res.start + 1;
-
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
- rv = -EBUSY;
- goto err_rmr;
- }
+ hcd->rsrc_len = resource_size(&res);
irq = irq_of_parse_and_map(dn, 0);
if (irq == NO_IRQ) {
- printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
+ dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
+ __FILE__);
rv = -EBUSY;
goto err_irq;
}
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
- rv = -ENOMEM;
+ hcd->regs = devm_ioremap_resource(&op->dev, &res);
+ if (IS_ERR(hcd->regs)) {
+ rv = PTR_ERR(hcd->regs);
goto err_ioremap;
}
@@ -158,8 +136,10 @@ ehci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *mat
if (np != NULL) {
/* claim we really affected by usb23 erratum */
if (!of_address_to_resource(np, 0, &res))
- ehci->ohci_hcctrl_reg = ioremap(res.start +
- OHCI_HCCTRL_OFFSET, OHCI_HCCTRL_LEN);
+ ehci->ohci_hcctrl_reg =
+ devm_ioremap(&op->dev,
+ res.start + OHCI_HCCTRL_OFFSET,
+ OHCI_HCCTRL_LEN);
else
pr_debug("%s: no ohci offset in fdt\n", __FILE__);
if (!ehci->ohci_hcctrl_reg) {
@@ -179,11 +159,6 @@ ehci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *mat
ehci->big_endian_desc = 1;
ehci->caps = hcd->regs;
- ehci->regs = hcd->regs +
- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
-
- /* cache this readonly data; minimize chip reads */
- ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
if (of_device_is_compatible(dn, "ibm,usb-ehci-440epx")) {
rv = ppc44x_enable_bmt(dn);
@@ -193,19 +168,14 @@ ehci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *mat
rv = usb_add_hcd(hcd, irq, 0);
if (rv)
- goto err_ehci;
+ goto err_ioremap;
+ device_wakeup_enable(hcd->self.controller);
return 0;
-err_ehci:
- if (ehci->has_amcc_usb23)
- iounmap(ehci->ohci_hcctrl_reg);
- iounmap(hcd->regs);
err_ioremap:
irq_dispose_mapping(irq);
err_irq:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-err_rmr:
usb_put_hcd(hcd);
return rv;
@@ -214,21 +184,17 @@ err_rmr:
static int ehci_hcd_ppc_of_remove(struct platform_device *op)
{
- struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
+ struct usb_hcd *hcd = platform_get_drvdata(op);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct device_node *np;
struct resource res;
- dev_set_drvdata(&op->dev, NULL);
-
dev_dbg(&op->dev, "stopping PPC-OF USB Controller\n");
usb_remove_hcd(hcd);
- iounmap(hcd->regs);
irq_dispose_mapping(hcd->irq);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
/* use request_mem_region to test if the ohci driver is loaded. if so
* ensure the ohci core is operational.
@@ -246,8 +212,6 @@ static int ehci_hcd_ppc_of_remove(struct platform_device *op)
pr_debug("%s: no ohci offset in fdt\n", __FILE__);
of_node_put(np);
}
-
- iounmap(ehci->ohci_hcctrl_reg);
}
usb_put_hcd(hcd);
@@ -255,17 +219,6 @@ static int ehci_hcd_ppc_of_remove(struct platform_device *op)
}
-static int ehci_hcd_ppc_of_shutdown(struct platform_device *op)
-{
- struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
-
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
-
- return 0;
-}
-
-
static const struct of_device_id ehci_hcd_ppc_of_match[] = {
{
.compatible = "usb-ehci",
@@ -275,10 +228,10 @@ static const struct of_device_id ehci_hcd_ppc_of_match[] = {
MODULE_DEVICE_TABLE(of, ehci_hcd_ppc_of_match);
-static struct of_platform_driver ehci_hcd_ppc_of_driver = {
+static struct platform_driver ehci_hcd_ppc_of_driver = {
.probe = ehci_hcd_ppc_of_probe,
.remove = ehci_hcd_ppc_of_remove,
- .shutdown = ehci_hcd_ppc_of_shutdown,
+ .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ppc-of-ehci",
.owner = THIS_MODULE,
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 1dee33b9139..7934ff9b35e 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -21,33 +21,47 @@
#include <asm/firmware.h>
#include <asm/ps3.h>
-static int ps3_ehci_hc_reset(struct usb_hcd *hcd)
+static void ps3_ehci_setup_insnreg(struct ehci_hcd *ehci)
{
- int result;
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ /* PS3 HC internal setup register offsets. */
- ehci->big_endian_mmio = 1;
+ enum ps3_ehci_hc_insnreg {
+ ps3_ehci_hc_insnreg01 = 0x084,
+ ps3_ehci_hc_insnreg02 = 0x088,
+ ps3_ehci_hc_insnreg03 = 0x08c,
+ };
- ehci->caps = hcd->regs;
- ehci->regs = hcd->regs + HC_LENGTH(ehci_readl(ehci,
- &ehci->caps->hc_capbase));
+ /* PS3 EHCI HC errata fix 316 - The PS3 EHCI HC will reset its
+ * internal INSNREGXX setup regs back to the chip default values
+ * on Host Controller Reset (CMD_RESET) or Light Host Controller
+ * Reset (CMD_LRESET). The work-around for this is for the HC
+ * driver to re-initialise these regs when ever the HC is reset.
+ */
- dbg_hcs_params(ehci, "reset");
- dbg_hcc_params(ehci, "reset");
+ /* Set burst transfer counts to 256 out, 32 in. */
- ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+ writel_be(0x01000020, (void __iomem *)ehci->regs +
+ ps3_ehci_hc_insnreg01);
- result = ehci_halt(ehci);
+ /* Enable burst transfer counts. */
- if (result)
- return result;
+ writel_be(0x00000001, (void __iomem *)ehci->regs +
+ ps3_ehci_hc_insnreg03);
+}
- result = ehci_init(hcd);
+static int ps3_ehci_hc_reset(struct usb_hcd *hcd)
+{
+ int result;
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ ehci->big_endian_mmio = 1;
+ ehci->caps = hcd->regs;
+ result = ehci_setup(hcd);
if (result)
return result;
- ehci_reset(ehci);
+ ps3_ehci_setup_insnreg(ehci);
return result;
}
@@ -57,7 +71,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
.product_desc = "PS3 EHCI Host Controller",
.hcd_priv_size = sizeof(struct ehci_hcd),
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
.reset = ps3_ehci_hc_reset,
.start = ehci_run,
.stop = ehci_stop,
@@ -79,7 +93,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
-static int __devinit ps3_ehci_probe(struct ps3_system_bus_device *dev)
+static int ps3_ehci_probe(struct ps3_system_bus_device *dev)
{
int result;
struct usb_hcd *hcd;
@@ -167,7 +181,7 @@ static int __devinit ps3_ehci_probe(struct ps3_system_bus_device *dev)
ps3_system_bus_set_drvdata(dev, hcd);
- result = usb_add_hcd(hcd, virq, IRQF_DISABLED);
+ result = usb_add_hcd(hcd, virq, 0);
if (result) {
dev_dbg(&dev->core, "%s:%d: usb_add_hcd failed (%d)\n",
@@ -175,6 +189,7 @@ static int __devinit ps3_ehci_probe(struct ps3_system_bus_device *dev)
goto fail_add_hcd;
}
+ device_wakeup_enable(hcd->self.controller);
return result;
fail_add_hcd:
@@ -207,7 +222,6 @@ static int ps3_ehci_remove(struct ps3_system_bus_device *dev)
tmp = hcd->irq;
- ehci_shutdown(hcd);
usb_remove_hcd(hcd);
ps3_system_bus_set_drvdata(dev, NULL);
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 233c288e3f9..54f5332f814 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -90,7 +90,7 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
struct ehci_qh_hw *hw = qh->hw;
/* writes to an active overlay are unsafe */
- BUG_ON(qh->qh_state != QH_STATE_IDLE);
+ WARN_ON(qh->qh_state != QH_STATE_IDLE);
hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
hw->hw_alt_next = EHCI_LIST_END(ehci);
@@ -100,19 +100,17 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
* and set the pseudo-toggle in udev. Only usb_clear_halt() will
* ever clear it.
*/
- if (!(hw->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
+ if (!(hw->hw_info1 & cpu_to_hc32(ehci, QH_TOGGLE_CTL))) {
unsigned is_out, epnum;
- is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
+ is_out = qh->is_out;
epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
- if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
+ if (unlikely(!usb_gettoggle(qh->ps.udev, epnum, is_out))) {
hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
- usb_settoggle (qh->dev, epnum, is_out, 1);
+ usb_settoggle(qh->ps.udev, epnum, is_out, 1);
}
}
- /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
- wmb ();
hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
}
@@ -125,18 +123,19 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
struct ehci_qtd *qtd;
- if (list_empty (&qh->qtd_list))
- qtd = qh->dummy;
- else {
- qtd = list_entry (qh->qtd_list.next,
- struct ehci_qtd, qtd_list);
- /* first qtd may already be partially processed */
- if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current)
- qtd = NULL;
- }
+ qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list);
- if (qtd)
- qh_update (ehci, qh, qtd);
+ /*
+ * first qtd may already be partially processed.
+ * If we come here during unlink, the QH overlay region
+ * might have reference to the just unlinked qtd. The
+ * qtd is updated in qh_completions(). Update the QH
+ * overlay here.
+ */
+ if (qh->hw->hw_token & ACTIVE_BIT(ehci))
+ qh->hw->hw_qtd_next = qtd->hw_next;
+ else
+ qh_update(ehci, qh, qtd);
}
/*-------------------------------------------------------------------------*/
@@ -153,7 +152,7 @@ static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd,
spin_lock_irqsave(&ehci->lock, flags);
qh->clearing_tt = 0;
if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
- && HC_IS_RUNNING(hcd->state))
+ && ehci->rh_state == EHCI_RH_RUNNING)
qh_link_async(ehci, qh);
spin_unlock_irqrestore(&ehci->lock, flags);
}
@@ -169,13 +168,13 @@ static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
* Note: this routine is never called for Isochronous transfers.
*/
if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
-#ifdef DEBUG
+#ifdef CONFIG_DYNAMIC_DEBUG
struct usb_device *tt = urb->dev->tt->hub;
dev_dbg(&tt->dev,
"clear tt buffer port %d, a%d ep%d t%08x\n",
urb->dev->ttport, urb->dev->devnum,
usb_pipeendpoint(urb->pipe), token);
-#endif /* DEBUG */
+#endif /* CONFIG_DYNAMIC_DEBUG */
if (!ehci_is_TDI(ehci)
|| urb->dev->tt->hub !=
ehci_to_hcd(ehci)->self.root_hub) {
@@ -241,13 +240,6 @@ static int qtd_copy_status (
} else { /* unknown */
status = -EPROTO;
}
-
- ehci_vdbg (ehci,
- "dev%d ep%d%s qtd token %08x --> status %d\n",
- usb_pipedevice (urb->pipe),
- usb_pipeendpoint (urb->pipe),
- usb_pipein (urb->pipe) ? "in" : "out",
- token, status);
}
return status;
@@ -255,19 +247,10 @@ static int qtd_copy_status (
static void
ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
-__releases(ehci->lock)
-__acquires(ehci->lock)
{
- if (likely (urb->hcpriv != NULL)) {
- struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
-
- /* S-mask in a QH means it's an interrupt urb */
- if ((qh->hw->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
-
- /* ... update hc-wide periodic stats (for usbfs) */
- ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
- }
- qh_put (qh);
+ if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
+ /* ... update hc-wide periodic stats */
+ ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
}
if (unlikely(urb->unlinked)) {
@@ -289,22 +272,16 @@ __acquires(ehci->lock)
urb->actual_length, urb->transfer_buffer_length);
#endif
- /* complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
- spin_unlock (&ehci->lock);
usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
- spin_lock (&ehci->lock);
}
-static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
-static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
-
static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
/*
* Process and free completed qtds for a qh, returning URBs to drivers.
- * Chases up to qh->hw_current. Returns number of completions called,
- * indicating how much "real" work we did.
+ * Chases up to qh->hw_current. Returns nonzero if the caller should
+ * unlink qh.
*/
static unsigned
qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
@@ -313,14 +290,9 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
struct list_head *entry, *tmp;
int last_status;
int stopped;
- unsigned count = 0;
u8 state;
- const __le32 halt = HALT_BIT(ehci);
struct ehci_qh_hw *hw = qh->hw;
- if (unlikely (list_empty (&qh->qtd_list)))
- return count;
-
/* completions (or tasks on other cpus) must never clobber HALT
* till we've gone through and cleaned everything up, even when
* they add urbs to this qh's queue or mark them for unlinking.
@@ -329,7 +301,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
*
* It's a bug for qh->qh_state to be anything other than
* QH_STATE_IDLE, unless our caller is scan_async() or
- * scan_periodic().
+ * scan_intr().
*/
state = qh->qh_state;
qh->qh_state = QH_STATE_COMPLETING;
@@ -338,7 +310,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
rescan:
last = NULL;
last_status = -EINPROGRESS;
- qh->needs_rescan = 0;
+ qh->dequeue_during_giveback = 0;
/* remove de-activated QTDs from front of queue.
* after faults (including short reads), cleanup this urb
@@ -357,7 +329,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
if (last) {
if (likely (last->urb != urb)) {
ehci_urb_done(ehci, last->urb, last_status);
- count++;
last_status = -EINPROGRESS;
}
ehci_qtd_free (ehci, last);
@@ -376,6 +347,17 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
retry_xacterr:
if ((token & QTD_STS_ACTIVE) == 0) {
+ /* Report Data Buffer Error: non-fatal but useful */
+ if (token & QTD_STS_DBE)
+ ehci_dbg(ehci,
+ "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ urb,
+ usb_endpoint_num(&urb->ep->desc),
+ usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd,
+ qh);
+
/* on STALL, error, and short reads this urb must
* complete and all its qtds must be recycled.
*/
@@ -422,12 +404,11 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
&& !(qtd->hw_alt_next
& EHCI_LIST_END(ehci))) {
stopped = 1;
- goto halt;
}
/* stop scanning when we reach qtds the hc is using */
} else if (likely (!stopped
- && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) {
+ && ehci->rh_state >= EHCI_RH_RUNNING)) {
break;
/* scan the whole queue for unlinks whenever it stops */
@@ -435,7 +416,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
stopped = 1;
/* cancel everything if we halt, suspend, etc */
- if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state))
+ if (ehci->rh_state < EHCI_RH_RUNNING)
last_status = -ESHUTDOWN;
/* this qtd is active; skip it unless a previous qtd
@@ -444,11 +425,19 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
else if (last_status == -EINPROGRESS && !urb->unlinked)
continue;
- /* qh unlinked; token in overlay may be most current */
- if (state == QH_STATE_IDLE
- && cpu_to_hc32(ehci, qtd->qtd_dma)
- == hw->hw_current) {
+ /*
+ * If this was the active qtd when the qh was unlinked
+ * and the overlay's token is active, then the overlay
+ * hasn't been written back to the qtd yet so use its
+ * token instead of the qtd's. After the qtd is
+ * processed and removed, the overlay won't be valid
+ * any more.
+ */
+ if (state == QH_STATE_IDLE &&
+ qh->qtd_list.next == &qtd->qtd_list &&
+ (hw->hw_token & ACTIVE_BIT(ehci))) {
token = hc32_to_cpu(ehci, hw->hw_token);
+ hw->hw_token &= ~ACTIVE_BIT(ehci);
/* An unlink may leave an incomplete
* async transaction in the TT buffer.
@@ -456,16 +445,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
*/
ehci_clear_tt_buffer(ehci, qh, urb, token);
}
-
- /* force halt for unlinked or blocked qh, so we'll
- * patch the qh later and so that completions can't
- * activate it while we "know" it's stopped.
- */
- if ((halt & hw->hw_token) == 0) {
-halt:
- hw->hw_token |= halt;
- wmb ();
- }
}
/* unless we already know the urb's status, collect qtd status
@@ -523,23 +502,16 @@ halt:
/* last urb's completion might still need calling */
if (likely (last != NULL)) {
ehci_urb_done(ehci, last->urb, last_status);
- count++;
ehci_qtd_free (ehci, last);
}
/* Do we need to rescan for URBs dequeued during a giveback? */
- if (unlikely(qh->needs_rescan)) {
+ if (unlikely(qh->dequeue_during_giveback)) {
/* If the QH is already unlinked, do the rescan now. */
if (state == QH_STATE_IDLE)
goto rescan;
- /* Otherwise we have to wait until the QH is fully unlinked.
- * Our caller will start an unlink if qh->needs_rescan is
- * set. But if an unlink has already started, nothing needs
- * to be done.
- */
- if (state != QH_STATE_LINKED)
- qh->needs_rescan = 0;
+ /* Otherwise the caller must unlink the QH. */
}
/* restore original state; caller must unlink or relink */
@@ -548,33 +520,23 @@ halt:
/* be sure the hardware's done with the qh before refreshing
* it after fault cleanup, or recovering from silicon wrongly
* overlaying the dummy qtd (which reduces DMA chatter).
+ *
+ * We won't refresh a QH that's linked (after the HC
+ * stopped the queue). That avoids a race:
+ * - HC reads first part of QH;
+ * - CPU updates that first part and the token;
+ * - HC reads rest of that QH, including token
+ * Result: HC gets an inconsistent image, and then
+ * DMAs to/from the wrong memory (corrupting it).
+ *
+ * That should be rare for interrupt transfers,
+ * except maybe high bandwidth ...
*/
- if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) {
- switch (state) {
- case QH_STATE_IDLE:
- qh_refresh(ehci, qh);
- break;
- case QH_STATE_LINKED:
- /* We won't refresh a QH that's linked (after the HC
- * stopped the queue). That avoids a race:
- * - HC reads first part of QH;
- * - CPU updates that first part and the token;
- * - HC reads rest of that QH, including token
- * Result: HC gets an inconsistent image, and then
- * DMAs to/from the wrong memory (corrupting it).
- *
- * That should be rare for interrupt transfers,
- * except maybe high bandwidth ...
- */
-
- /* Tell the caller to start an unlink */
- qh->needs_rescan = 1;
- break;
- /* otherwise, unlink already started */
- }
- }
+ if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci))
+ qh->exception = 1;
- return count;
+ /* Let the caller know if the QH needs to be unlinked. */
+ return qh->exception;
}
/*-------------------------------------------------------------------------*/
@@ -661,7 +623,7 @@ qh_urb_transaction (
/*
* data transfer stage: buffer setup
*/
- i = urb->num_sgs;
+ i = urb->num_mapped_sgs;
if (len > 0 && i > 0) {
sg = urb->sg;
buf = sg_dma_address(sg);
@@ -736,7 +698,8 @@ qh_urb_transaction (
/*
* control requests may need a terminating data "status" ack;
- * bulk ones may need a terminating short packet (zero length).
+ * other OUT ones may need a terminating short packet
+ * (zero length).
*/
if (likely (urb->transfer_buffer_length != 0)) {
int one_more = 0;
@@ -745,7 +708,7 @@ qh_urb_transaction (
one_more = 1;
token ^= 0x0100; /* "in" <--> "out" */
token |= QTD_TOGGLE; /* force DATA1 */
- } else if (usb_pipebulk (urb->pipe)
+ } else if (usb_pipeout(urb->pipe)
&& (urb->transfer_flags & URB_ZERO_PACKET)
&& !(urb->transfer_buffer_length % maxpacket)) {
one_more = 1;
@@ -834,26 +797,35 @@ qh_make (
* For control/bulk requests, the HC or TT handles these.
*/
if (type == PIPE_INTERRUPT) {
- qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
+ unsigned tmp;
+
+ qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
is_input, 0,
hb_mult(maxp) * max_packet(maxp)));
- qh->start = NO_FRAME;
+ qh->ps.phase = NO_FRAME;
if (urb->dev->speed == USB_SPEED_HIGH) {
- qh->c_usecs = 0;
+ qh->ps.c_usecs = 0;
qh->gap_uf = 0;
- qh->period = urb->interval >> 3;
- if (qh->period == 0 && urb->interval != 1) {
+ if (urb->interval > 1 && urb->interval < 8) {
/* NOTE interval 2 or 4 uframes could work.
* But interval 1 scheduling is simpler, and
* includes high bandwidth.
*/
urb->interval = 1;
- } else if (qh->period > ehci->periodic_size) {
- qh->period = ehci->periodic_size;
- urb->interval = qh->period << 3;
+ } else if (urb->interval > ehci->periodic_size << 3) {
+ urb->interval = ehci->periodic_size << 3;
}
+ qh->ps.period = urb->interval >> 3;
+
+ /* period for bandwidth allocation */
+ tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
+ 1 << (urb->ep->desc.bInterval - 1));
+
+ /* Allow urb->interval to override */
+ qh->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
+ qh->ps.bw_period = qh->ps.bw_uperiod >> 3;
} else {
int think_time;
@@ -863,32 +835,40 @@ qh_make (
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { // SPLIT, gap, CSPLIT+DATA
- qh->c_usecs = qh->usecs + HS_USECS (0);
- qh->usecs = HS_USECS (1);
+ qh->ps.c_usecs = qh->ps.usecs + HS_USECS(0);
+ qh->ps.usecs = HS_USECS(1);
} else { // SPLIT+DATA, gap, CSPLIT
- qh->usecs += HS_USECS (1);
- qh->c_usecs = HS_USECS (0);
+ qh->ps.usecs += HS_USECS(1);
+ qh->ps.c_usecs = HS_USECS(0);
}
think_time = tt ? tt->think_time : 0;
- qh->tt_usecs = NS_TO_US (think_time +
+ qh->ps.tt_usecs = NS_TO_US(think_time +
usb_calc_bus_time (urb->dev->speed,
is_input, 0, max_packet (maxp)));
- qh->period = urb->interval;
- if (qh->period > ehci->periodic_size) {
- qh->period = ehci->periodic_size;
- urb->interval = qh->period;
- }
+ if (urb->interval > ehci->periodic_size)
+ urb->interval = ehci->periodic_size;
+ qh->ps.period = urb->interval;
+
+ /* period for bandwidth allocation */
+ tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
+ urb->ep->desc.bInterval);
+ tmp = rounddown_pow_of_two(tmp);
+
+ /* Allow urb->interval to override */
+ qh->ps.bw_period = min_t(unsigned, tmp, urb->interval);
+ qh->ps.bw_uperiod = qh->ps.bw_period << 3;
}
}
/* support for tt scheduling, and access to toggles */
- qh->dev = urb->dev;
+ qh->ps.udev = urb->dev;
+ qh->ps.ep = urb->ep;
/* using TT? */
switch (urb->dev->speed) {
case USB_SPEED_LOW:
- info1 |= (1 << 12); /* EPS "low" */
+ info1 |= QH_LOW_SPEED;
/* FALL THROUGH */
case USB_SPEED_FULL:
@@ -896,8 +876,8 @@ qh_make (
if (type != PIPE_INTERRUPT)
info1 |= (EHCI_TUNE_RL_TT << 28);
if (type == PIPE_CONTROL) {
- info1 |= (1 << 27); /* for TT */
- info1 |= 1 << 14; /* toggle from qtd */
+ info1 |= QH_CONTROL_EP; /* for TT */
+ info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
}
info1 |= maxp << 16;
@@ -922,11 +902,11 @@ qh_make (
break;
case USB_SPEED_HIGH: /* no TT involved */
- info1 |= (2 << 12); /* EPS "high" */
+ info1 |= QH_HIGH_SPEED;
if (type == PIPE_CONTROL) {
info1 |= (EHCI_TUNE_RL_HS << 28);
info1 |= 64 << 16; /* usb2 fixed maxpacket */
- info1 |= 1 << 14; /* toggle from qtd */
+ info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else if (type == PIPE_BULK) {
info1 |= (EHCI_TUNE_RL_HS << 28);
@@ -944,26 +924,53 @@ qh_make (
}
break;
default:
- dbg ("bogus dev %p speed %d", urb->dev, urb->dev->speed);
+ ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev,
+ urb->dev->speed);
done:
- qh_put (qh);
+ qh_destroy(ehci, qh);
return NULL;
}
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
- /* init as live, toggle clear, advance to dummy */
+ /* init as live, toggle clear */
qh->qh_state = QH_STATE_IDLE;
hw = qh->hw;
hw->hw_info1 = cpu_to_hc32(ehci, info1);
hw->hw_info2 = cpu_to_hc32(ehci, info2);
+ qh->is_out = !is_input;
usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
- qh_refresh (ehci, qh);
return qh;
}
/*-------------------------------------------------------------------------*/
+static void enable_async(struct ehci_hcd *ehci)
+{
+ if (ehci->async_count++)
+ return;
+
+ /* Stop waiting to turn off the async schedule */
+ ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC);
+
+ /* Don't start the schedule until ASS is 0 */
+ ehci_poll_ASS(ehci);
+ turn_on_io_watchdog(ehci);
+}
+
+static void disable_async(struct ehci_hcd *ehci)
+{
+ if (--ehci->async_count)
+ return;
+
+ /* The async schedule and unlink lists are supposed to be empty */
+ WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) ||
+ !list_empty(&ehci->async_idle));
+
+ /* Don't turn off the schedule until ASS is 1 */
+ ehci_poll_ASS(ehci);
+}
+
/* move qh (and its qtds) onto async queue; maybe enable queue. */
static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
@@ -977,27 +984,11 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
WARN_ON(qh->qh_state != QH_STATE_IDLE);
- /* (re)start the async schedule? */
- head = ehci->async;
- timer_action_done (ehci, TIMER_ASYNC_OFF);
- if (!head->qh_next.qh) {
- u32 cmd = ehci_readl(ehci, &ehci->regs->command);
-
- if (!(cmd & CMD_ASE)) {
- /* in case a clear of CMD_ASE didn't take yet */
- (void)handshake(ehci, &ehci->regs->status,
- STS_ASS, 0, 150);
- cmd |= CMD_ASE | CMD_RUN;
- ehci_writel(ehci, cmd, &ehci->regs->command);
- ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
- /* posted write need not be known to HC yet ... */
- }
- }
-
/* clear halt and/or toggle; and maybe recover from silicon quirk */
qh_refresh(ehci, qh);
/* splice right after start */
+ head = ehci->async;
qh->qh_next = head->qh_next;
qh->hw->hw_next = head->hw->hw_next;
wmb ();
@@ -1005,10 +996,12 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
head->qh_next.qh = qh;
head->hw->hw_next = dma;
- qh_get(qh);
- qh->xacterrs = 0;
qh->qh_state = QH_STATE_LINKED;
+ qh->xacterrs = 0;
+ qh->exception = 0;
/* qtd completions reported later by interrupt */
+
+ enable_async(ehci);
}
/*-------------------------------------------------------------------------*/
@@ -1068,7 +1061,7 @@ static struct ehci_qh *qh_append_tds (
*/
token = qtd->hw_token;
qtd->hw_token = HALT_BIT(ehci);
- wmb ();
+
dummy = qh->dummy;
dma = dummy->qtd_dma;
@@ -1092,7 +1085,7 @@ static struct ehci_qh *qh_append_tds (
wmb ();
dummy->hw_token = token;
- urb->hcpriv = qh_get (qh);
+ urb->hcpriv = qh;
}
}
return qh;
@@ -1107,22 +1100,24 @@ submit_async (
struct list_head *qtd_list,
gfp_t mem_flags
) {
- struct ehci_qtd *qtd;
int epnum;
unsigned long flags;
struct ehci_qh *qh = NULL;
int rc;
- qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
epnum = urb->ep->desc.bEndpointAddress;
#ifdef EHCI_URB_TRACE
- ehci_dbg (ehci,
- "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
- __func__, urb->dev->devpath, urb,
- epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
- urb->transfer_buffer_length,
- qtd, urb->ep->hcpriv);
+ {
+ struct ehci_qtd *qtd;
+ qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
+ ehci_dbg(ehci,
+ "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ __func__, urb->dev->devpath, urb,
+ epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd, urb->ep->hcpriv);
+ }
#endif
spin_lock_irqsave (&ehci->lock, flags);
@@ -1154,103 +1149,282 @@ submit_async (
}
/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_USB_HCD_TEST_MODE
+/*
+ * This function creates the qtds and submits them for the
+ * SINGLE_STEP_SET_FEATURE Test.
+ * This is done in two parts: first SETUP req for GetDesc is sent then
+ * 15 seconds later, the IN stage for GetDesc starts to req data from dev
+ *
+ * is_setup : i/p arguement decides which of the two stage needs to be
+ * performed; TRUE - SETUP and FALSE - IN+STATUS
+ * Returns 0 if success
+ */
+static int submit_single_step_set_feature(
+ struct usb_hcd *hcd,
+ struct urb *urb,
+ int is_setup
+) {
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct list_head qtd_list;
+ struct list_head *head;
-/* the async qh for the qtds being reclaimed are now unlinked from the HC */
-
-static void end_unlink_async (struct ehci_hcd *ehci)
-{
- struct ehci_qh *qh = ehci->reclaim;
- struct ehci_qh *next;
+ struct ehci_qtd *qtd, *qtd_prev;
+ dma_addr_t buf;
+ int len, maxpacket;
+ u32 token;
- iaa_watchdog_done(ehci);
+ INIT_LIST_HEAD(&qtd_list);
+ head = &qtd_list;
- // qh->hw_next = cpu_to_hc32(qh->qh_dma);
- qh->qh_state = QH_STATE_IDLE;
- qh->qh_next.qh = NULL;
- qh_put (qh); // refcount from reclaim
+ /* URBs map to sequences of QTDs: one logical transaction */
+ qtd = ehci_qtd_alloc(ehci, GFP_KERNEL);
+ if (unlikely(!qtd))
+ return -1;
+ list_add_tail(&qtd->qtd_list, head);
+ qtd->urb = urb;
- /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
- next = qh->reclaim;
- ehci->reclaim = next;
- qh->reclaim = NULL;
+ token = QTD_STS_ACTIVE;
+ token |= (EHCI_TUNE_CERR << 10);
- qh_completions (ehci, qh);
+ len = urb->transfer_buffer_length;
+ /*
+ * Check if the request is to perform just the SETUP stage (getDesc)
+ * as in SINGLE_STEP_SET_FEATURE test, DATA stage (IN) happens
+ * 15 secs after the setup
+ */
+ if (is_setup) {
+ /* SETUP pid */
+ qtd_fill(ehci, qtd, urb->setup_dma,
+ sizeof(struct usb_ctrlrequest),
+ token | (2 /* "setup" */ << 8), 8);
- if (!list_empty (&qh->qtd_list)
- && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
- qh_link_async (ehci, qh);
- else {
- /* it's not free to turn the async schedule on/off; leave it
- * active but idle for a while once it empties.
- */
- if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state)
- && ehci->async->qh_next.qh == NULL)
- timer_action (ehci, TIMER_ASYNC_OFF);
+ submit_async(ehci, urb, &qtd_list, GFP_ATOMIC);
+ return 0; /*Return now; we shall come back after 15 seconds*/
}
- qh_put(qh); /* refcount from async list */
- if (next) {
- ehci->reclaim = NULL;
- start_unlink_async (ehci, next);
- }
+ /*
+ * IN: data transfer stage: buffer setup : start the IN txn phase for
+ * the get_Desc SETUP which was sent 15seconds back
+ */
+ token ^= QTD_TOGGLE; /*We need to start IN with DATA-1 Pid-sequence*/
+ buf = urb->transfer_dma;
+
+ token |= (1 /* "in" */ << 8); /*This is IN stage*/
+
+ maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, 0));
+
+ qtd_fill(ehci, qtd, buf, len, token, maxpacket);
+
+ /*
+ * Our IN phase shall always be a short read; so keep the queue running
+ * and let it advance to the next qtd which zero length OUT status
+ */
+ qtd->hw_alt_next = EHCI_LIST_END(ehci);
+
+ /* STATUS stage for GetDesc control request */
+ token ^= 0x0100; /* "in" <--> "out" */
+ token |= QTD_TOGGLE; /* force DATA1 */
+
+ qtd_prev = qtd;
+ qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC);
+ if (unlikely(!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
+ list_add_tail(&qtd->qtd_list, head);
+
+ /* dont fill any data in such packets */
+ qtd_fill(ehci, qtd, 0, 0, token, 0);
+
+ /* by default, enable interrupt on urb completion */
+ if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT)))
+ qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
+
+ submit_async(ehci, urb, &qtd_list, GFP_KERNEL);
+
+ return 0;
+
+cleanup:
+ qtd_list_free(ehci, urb, head);
+ return -1;
}
+#endif /* CONFIG_USB_HCD_TEST_MODE */
-/* makes sure the async qh will become idle */
-/* caller must own ehci->lock */
+/*-------------------------------------------------------------------------*/
-static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
+static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- int cmd = ehci_readl(ehci, &ehci->regs->command);
- struct ehci_qh *prev;
-
-#ifdef DEBUG
- assert_spin_locked(&ehci->lock);
- if (ehci->reclaim
- || (qh->qh_state != QH_STATE_LINKED
- && qh->qh_state != QH_STATE_UNLINK_WAIT)
- )
- BUG ();
-#endif
+ struct ehci_qh *prev;
- /* stop async schedule right now? */
- if (unlikely (qh == ehci->async)) {
- /* can't get here without STS_ASS set */
- if (ehci_to_hcd(ehci)->state != HC_STATE_HALT
- && !ehci->reclaim) {
- /* ... and CMD_IAAD clear */
- ehci_writel(ehci, cmd & ~CMD_ASE,
- &ehci->regs->command);
- wmb ();
- // handshake later, if we need to
- timer_action_done (ehci, TIMER_ASYNC_OFF);
- }
- return;
- }
-
- qh->qh_state = QH_STATE_UNLINK;
- ehci->reclaim = qh = qh_get (qh);
+ /* Add to the end of the list of QHs waiting for the next IAAD */
+ qh->qh_state = QH_STATE_UNLINK_WAIT;
+ list_add_tail(&qh->unlink_node, &ehci->async_unlink);
+ /* Unlink it from the schedule */
prev = ehci->async;
while (prev->qh_next.qh != qh)
prev = prev->qh_next.qh;
prev->hw->hw_next = qh->hw->hw_next;
prev->qh_next = qh->qh_next;
- wmb ();
+ if (ehci->qh_scan_next == qh)
+ ehci->qh_scan_next = qh->qh_next.qh;
+}
+
+static void start_iaa_cycle(struct ehci_hcd *ehci)
+{
+ /* Do nothing if an IAA cycle is already running */
+ if (ehci->iaa_in_progress)
+ return;
+ ehci->iaa_in_progress = true;
/* If the controller isn't running, we don't have to wait for it */
- if (unlikely(!HC_IS_RUNNING(ehci_to_hcd(ehci)->state))) {
- /* if (unlikely (qh->reclaim != 0))
- * this will recurse, probably not much
- */
- end_unlink_async (ehci);
+ if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
+ end_unlink_async(ehci);
+
+ /* Otherwise start a new IAA cycle */
+ } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) {
+
+ /* Make sure the unlinks are all visible to the hardware */
+ wmb();
+
+ ehci_writel(ehci, ehci->command | CMD_IAAD,
+ &ehci->regs->command);
+ ehci_readl(ehci, &ehci->regs->command);
+ ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true);
+ }
+}
+
+/* the async qh for the qtds being unlinked are now gone from the HC */
+
+static void end_unlink_async(struct ehci_hcd *ehci)
+{
+ struct ehci_qh *qh;
+ bool early_exit;
+
+ if (ehci->has_synopsys_hc_bug)
+ ehci_writel(ehci, (u32) ehci->async->qh_dma,
+ &ehci->regs->async_next);
+
+ /* The current IAA cycle has ended */
+ ehci->iaa_in_progress = false;
+
+ if (list_empty(&ehci->async_unlink))
+ return;
+ qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
+ unlink_node); /* QH whose IAA cycle just ended */
+
+ /*
+ * If async_unlinking is set then this routine is already running,
+ * either on the stack or on another CPU.
+ */
+ early_exit = ehci->async_unlinking;
+
+ /* If the controller isn't running, process all the waiting QHs */
+ if (ehci->rh_state < EHCI_RH_RUNNING)
+ list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle);
+
+ /*
+ * Intel (?) bug: The HC can write back the overlay region even
+ * after the IAA interrupt occurs. In self-defense, always go
+ * through two IAA cycles for each QH.
+ */
+ else if (qh->qh_state == QH_STATE_UNLINK_WAIT) {
+ qh->qh_state = QH_STATE_UNLINK;
+ early_exit = true;
+ }
+
+ /* Otherwise process only the first waiting QH (NVIDIA bug?) */
+ else
+ list_move_tail(&qh->unlink_node, &ehci->async_idle);
+
+ /* Start a new IAA cycle if any QHs are waiting for it */
+ if (!list_empty(&ehci->async_unlink))
+ start_iaa_cycle(ehci);
+
+ /*
+ * Don't allow nesting or concurrent calls,
+ * or wait for the second IAA cycle for the next QH.
+ */
+ if (early_exit)
return;
+
+ /* Process the idle QHs */
+ ehci->async_unlinking = true;
+ while (!list_empty(&ehci->async_idle)) {
+ qh = list_first_entry(&ehci->async_idle, struct ehci_qh,
+ unlink_node);
+ list_del(&qh->unlink_node);
+
+ qh->qh_state = QH_STATE_IDLE;
+ qh->qh_next.qh = NULL;
+
+ if (!list_empty(&qh->qtd_list))
+ qh_completions(ehci, qh);
+ if (!list_empty(&qh->qtd_list) &&
+ ehci->rh_state == EHCI_RH_RUNNING)
+ qh_link_async(ehci, qh);
+ disable_async(ehci);
+ }
+ ehci->async_unlinking = false;
+}
+
+static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
+
+static void unlink_empty_async(struct ehci_hcd *ehci)
+{
+ struct ehci_qh *qh;
+ struct ehci_qh *qh_to_unlink = NULL;
+ int count = 0;
+
+ /* Find the last async QH which has been empty for a timer cycle */
+ for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) {
+ if (list_empty(&qh->qtd_list) &&
+ qh->qh_state == QH_STATE_LINKED) {
+ ++count;
+ if (qh->unlink_cycle != ehci->async_unlink_cycle)
+ qh_to_unlink = qh;
+ }
+ }
+
+ /* If nothing else is being unlinked, unlink the last empty QH */
+ if (list_empty(&ehci->async_unlink) && qh_to_unlink) {
+ start_unlink_async(ehci, qh_to_unlink);
+ --count;
+ }
+
+ /* Other QHs will be handled later */
+ if (count > 0) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
+ ++ehci->async_unlink_cycle;
+ }
+}
+
+/* The root hub is suspended; unlink all the async QHs */
+static void __maybe_unused unlink_empty_async_suspended(struct ehci_hcd *ehci)
+{
+ struct ehci_qh *qh;
+
+ while (ehci->async->qh_next.qh) {
+ qh = ehci->async->qh_next.qh;
+ WARN_ON(!list_empty(&qh->qtd_list));
+ single_unlink_async(ehci, qh);
}
+ start_iaa_cycle(ehci);
+}
+
+/* makes sure the async qh will become idle */
+/* caller must own ehci->lock */
+
+static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+ /* If the QH isn't linked then there's nothing we can do. */
+ if (qh->qh_state != QH_STATE_LINKED)
+ return;
- cmd |= CMD_IAAD;
- ehci_writel(ehci, cmd, &ehci->regs->command);
- (void)ehci_readl(ehci, &ehci->regs->command);
- iaa_watchdog_start(ehci);
+ single_unlink_async(ehci, qh);
+ start_iaa_cycle(ehci);
}
/*-------------------------------------------------------------------------*/
@@ -1258,54 +1432,45 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
static void scan_async (struct ehci_hcd *ehci)
{
struct ehci_qh *qh;
- enum ehci_timer_action action = TIMER_IO_WATCHDOG;
-
- ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index);
- timer_action_done (ehci, TIMER_ASYNC_SHRINK);
-rescan:
- qh = ehci->async->qh_next.qh;
- if (likely (qh != NULL)) {
- do {
- /* clean any finished work for this qh */
- if (!list_empty (&qh->qtd_list)
- && qh->stamp != ehci->stamp) {
- int temp;
-
- /* unlinks could happen here; completion
- * reporting drops the lock. rescan using
- * the latest schedule, but don't rescan
- * qhs we already finished (no looping).
- */
- qh = qh_get (qh);
- qh->stamp = ehci->stamp;
- temp = qh_completions (ehci, qh);
- if (qh->needs_rescan)
- unlink_async(ehci, qh);
- qh_put (qh);
- if (temp != 0) {
- goto rescan;
- }
- }
-
- /* unlink idle entries, reducing DMA usage as well
- * as HCD schedule-scanning costs. delay for any qh
- * we just scanned, there's a not-unusual case that it
- * doesn't stay idle for long.
- * (plus, avoids some kind of re-activation race.)
+ bool check_unlinks_later = false;
+
+ ehci->qh_scan_next = ehci->async->qh_next.qh;
+ while (ehci->qh_scan_next) {
+ qh = ehci->qh_scan_next;
+ ehci->qh_scan_next = qh->qh_next.qh;
+
+ /* clean any finished work for this qh */
+ if (!list_empty(&qh->qtd_list)) {
+ int temp;
+
+ /*
+ * Unlinks could happen here; completion reporting
+ * drops the lock. That's why ehci->qh_scan_next
+ * always holds the next qh to scan; if the next qh
+ * gets unlinked then ehci->qh_scan_next is adjusted
+ * in single_unlink_async().
*/
- if (list_empty(&qh->qtd_list)
+ temp = qh_completions(ehci, qh);
+ if (unlikely(temp)) {
+ start_unlink_async(ehci, qh);
+ } else if (list_empty(&qh->qtd_list)
&& qh->qh_state == QH_STATE_LINKED) {
- if (!ehci->reclaim
- && ((ehci->stamp - qh->stamp) & 0x1fff)
- >= (EHCI_SHRINK_FRAMES * 8))
- start_unlink_async(ehci, qh);
- else
- action = TIMER_ASYNC_SHRINK;
+ qh->unlink_cycle = ehci->async_unlink_cycle;
+ check_unlinks_later = true;
}
+ }
+ }
- qh = qh->qh_next.qh;
- } while (qh);
+ /*
+ * Unlink empty entries, reducing DMA usage as well
+ * as HCD schedule-scanning costs. Delay for any qh
+ * we just scanned, there's a not-unusual case that it
+ * doesn't stay idle for long.
+ */
+ if (check_unlinks_later && ehci->rh_state == EHCI_RH_RUNNING &&
+ !(ehci->enabled_hrtimer_events &
+ BIT(EHCI_HRTIMER_ASYNC_UNLINKS))) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
+ ++ehci->async_unlink_cycle;
}
- if (action == TIMER_ASYNC_SHRINK)
- timer_action (ehci, TIMER_ASYNC_SHRINK);
}
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index a92526d6e5a..e113fd73aea 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -36,8 +36,6 @@
static int ehci_get_frame (struct usb_hcd *hcd);
-/*-------------------------------------------------------------------------*/
-
/*
* periodic_next_shadow - return "next" pointer on shadow list
* @periodic: host pointer to qh/itd/sitd
@@ -98,83 +96,217 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
*/
*prev_p = *periodic_next_shadow(ehci, &here,
Q_NEXT_TYPE(ehci, *hw_p));
- *hw_p = *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p));
+
+ if (!ehci->use_dummy_qh ||
+ *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
+ != EHCI_LIST_END(ehci))
+ *hw_p = *shadow_next_periodic(ehci, &here,
+ Q_NEXT_TYPE(ehci, *hw_p));
+ else
+ *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
}
-/* how many of the uframe's 125 usecs are allocated? */
-static unsigned short
-periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
+/*-------------------------------------------------------------------------*/
+
+/* Bandwidth and TT management */
+
+/* Find the TT data structure for this device; create it if necessary */
+static struct ehci_tt *find_tt(struct usb_device *udev)
{
- __hc32 *hw_p = &ehci->periodic [frame];
- union ehci_shadow *q = &ehci->pshadow [frame];
- unsigned usecs = 0;
- struct ehci_qh_hw *hw;
-
- while (q->ptr) {
- switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
- case Q_TYPE_QH:
- hw = q->qh->hw;
- /* is it in the S-mask? */
- if (hw->hw_info2 & cpu_to_hc32(ehci, 1 << uframe))
- usecs += q->qh->usecs;
- /* ... or C-mask? */
- if (hw->hw_info2 & cpu_to_hc32(ehci,
- 1 << (8 + uframe)))
- usecs += q->qh->c_usecs;
- hw_p = &hw->hw_next;
- q = &q->qh->qh_next;
- break;
- // case Q_TYPE_FSTN:
- default:
- /* for "save place" FSTNs, count the relevant INTR
- * bandwidth from the previous frame
- */
- if (q->fstn->hw_prev != EHCI_LIST_END(ehci)) {
- ehci_dbg (ehci, "ignoring FSTN cost ...\n");
- }
- hw_p = &q->fstn->hw_next;
- q = &q->fstn->fstn_next;
- break;
- case Q_TYPE_ITD:
- if (q->itd->hw_transaction[uframe])
- usecs += q->itd->stream->usecs;
- hw_p = &q->itd->hw_next;
- q = &q->itd->itd_next;
- break;
- case Q_TYPE_SITD:
- /* is it in the S-mask? (count SPLIT, DATA) */
- if (q->sitd->hw_uframe & cpu_to_hc32(ehci,
- 1 << uframe)) {
- if (q->sitd->hw_fullspeed_ep &
- cpu_to_hc32(ehci, 1<<31))
- usecs += q->sitd->stream->usecs;
- else /* worst case for OUT start-split */
- usecs += HS_USECS_ISO (188);
- }
+ struct usb_tt *utt = udev->tt;
+ struct ehci_tt *tt, **tt_index, **ptt;
+ unsigned port;
+ bool allocated_index = false;
+
+ if (!utt)
+ return NULL; /* Not below a TT */
+
+ /*
+ * Find/create our data structure.
+ * For hubs with a single TT, we get it directly.
+ * For hubs with multiple TTs, there's an extra level of pointers.
+ */
+ tt_index = NULL;
+ if (utt->multi) {
+ tt_index = utt->hcpriv;
+ if (!tt_index) { /* Create the index array */
+ tt_index = kzalloc(utt->hub->maxchild *
+ sizeof(*tt_index), GFP_ATOMIC);
+ if (!tt_index)
+ return ERR_PTR(-ENOMEM);
+ utt->hcpriv = tt_index;
+ allocated_index = true;
+ }
+ port = udev->ttport - 1;
+ ptt = &tt_index[port];
+ } else {
+ port = 0;
+ ptt = (struct ehci_tt **) &utt->hcpriv;
+ }
+
+ tt = *ptt;
+ if (!tt) { /* Create the ehci_tt */
+ struct ehci_hcd *ehci =
+ hcd_to_ehci(bus_to_hcd(udev->bus));
- /* ... C-mask? (count CSPLIT, DATA) */
- if (q->sitd->hw_uframe &
- cpu_to_hc32(ehci, 1 << (8 + uframe))) {
- /* worst case for IN complete-split */
- usecs += q->sitd->stream->c_usecs;
+ tt = kzalloc(sizeof(*tt), GFP_ATOMIC);
+ if (!tt) {
+ if (allocated_index) {
+ utt->hcpriv = NULL;
+ kfree(tt_index);
}
+ return ERR_PTR(-ENOMEM);
+ }
+ list_add_tail(&tt->tt_list, &ehci->tt_list);
+ INIT_LIST_HEAD(&tt->ps_list);
+ tt->usb_tt = utt;
+ tt->tt_port = port;
+ *ptt = tt;
+ }
- hw_p = &q->sitd->hw_next;
- q = &q->sitd->sitd_next;
- break;
+ return tt;
+}
+
+/* Release the TT above udev, if it's not in use */
+static void drop_tt(struct usb_device *udev)
+{
+ struct usb_tt *utt = udev->tt;
+ struct ehci_tt *tt, **tt_index, **ptt;
+ int cnt, i;
+
+ if (!utt || !utt->hcpriv)
+ return; /* Not below a TT, or never allocated */
+
+ cnt = 0;
+ if (utt->multi) {
+ tt_index = utt->hcpriv;
+ ptt = &tt_index[udev->ttport - 1];
+
+ /* How many entries are left in tt_index? */
+ for (i = 0; i < utt->hub->maxchild; ++i)
+ cnt += !!tt_index[i];
+ } else {
+ tt_index = NULL;
+ ptt = (struct ehci_tt **) &utt->hcpriv;
+ }
+
+ tt = *ptt;
+ if (!tt || !list_empty(&tt->ps_list))
+ return; /* never allocated, or still in use */
+
+ list_del(&tt->tt_list);
+ *ptt = NULL;
+ kfree(tt);
+ if (cnt == 1) {
+ utt->hcpriv = NULL;
+ kfree(tt_index);
+ }
+}
+
+static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type,
+ struct ehci_per_sched *ps)
+{
+ dev_dbg(&ps->udev->dev,
+ "ep %02x: %s %s @ %u+%u (%u.%u+%u) [%u/%u us] mask %04x\n",
+ ps->ep->desc.bEndpointAddress,
+ (sign >= 0 ? "reserve" : "release"), type,
+ (ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod,
+ ps->phase, ps->phase_uf, ps->period,
+ ps->usecs, ps->c_usecs, ps->cs_mask);
+}
+
+static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci,
+ struct ehci_qh *qh, int sign)
+{
+ unsigned start_uf;
+ unsigned i, j, m;
+ int usecs = qh->ps.usecs;
+ int c_usecs = qh->ps.c_usecs;
+ int tt_usecs = qh->ps.tt_usecs;
+ struct ehci_tt *tt;
+
+ if (qh->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
+ return;
+ start_uf = qh->ps.bw_phase << 3;
+
+ bandwidth_dbg(ehci, sign, "intr", &qh->ps);
+
+ if (sign < 0) { /* Release bandwidth */
+ usecs = -usecs;
+ c_usecs = -c_usecs;
+ tt_usecs = -tt_usecs;
+ }
+
+ /* Entire transaction (high speed) or start-split (full/low speed) */
+ for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
+ i += qh->ps.bw_uperiod)
+ ehci->bandwidth[i] += usecs;
+
+ /* Complete-split (full/low speed) */
+ if (qh->ps.c_usecs) {
+ /* NOTE: adjustments needed for FSTN */
+ for (i = start_uf; i < EHCI_BANDWIDTH_SIZE;
+ i += qh->ps.bw_uperiod) {
+ for ((j = 2, m = 1 << (j+8)); j < 8; (++j, m <<= 1)) {
+ if (qh->ps.cs_mask & m)
+ ehci->bandwidth[i+j] += c_usecs;
+ }
}
}
-#ifdef DEBUG
- if (usecs > 100)
- ehci_err (ehci, "uframe %d sched overrun: %d usecs\n",
- frame * 8 + uframe, usecs);
-#endif
- return usecs;
+
+ /* FS/LS bus bandwidth */
+ if (tt_usecs) {
+ tt = find_tt(qh->ps.udev);
+ if (sign > 0)
+ list_add_tail(&qh->ps.ps_list, &tt->ps_list);
+ else
+ list_del(&qh->ps.ps_list);
+
+ for (i = start_uf >> 3; i < EHCI_BANDWIDTH_FRAMES;
+ i += qh->ps.bw_period)
+ tt->bandwidth[i] += tt_usecs;
+ }
}
/*-------------------------------------------------------------------------*/
-static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
+static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
+ struct ehci_tt *tt)
+{
+ struct ehci_per_sched *ps;
+ unsigned uframe, uf, x;
+ u8 *budget_line;
+
+ if (!tt)
+ return;
+ memset(budget_table, 0, EHCI_BANDWIDTH_SIZE);
+
+ /* Add up the contributions from all the endpoints using this TT */
+ list_for_each_entry(ps, &tt->ps_list, ps_list) {
+ for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE;
+ uframe += ps->bw_uperiod) {
+ budget_line = &budget_table[uframe];
+ x = ps->tt_usecs;
+
+ /* propagate the time forward */
+ for (uf = ps->phase_uf; uf < 8; ++uf) {
+ x += budget_line[uf];
+
+ /* Each microframe lasts 125 us */
+ if (x <= 125) {
+ budget_line[uf] = x;
+ break;
+ } else {
+ budget_line[uf] = 125;
+ x -= 125;
+ }
+ }
+ }
+ }
+}
+
+static int __maybe_unused same_tt(struct usb_device *dev1,
+ struct usb_device *dev2)
{
if (!dev1->tt || !dev2->tt)
return 0;
@@ -222,68 +354,6 @@ static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
}
}
-/* How many of the tt's periodic downstream 1000 usecs are allocated?
- *
- * While this measures the bandwidth in terms of usecs/uframe,
- * the low/fullspeed bus has no notion of uframes, so any particular
- * low/fullspeed transfer can "carry over" from one uframe to the next,
- * since the TT just performs downstream transfers in sequence.
- *
- * For example two separate 100 usec transfers can start in the same uframe,
- * and the second one would "carry over" 75 usecs into the next uframe.
- */
-static void
-periodic_tt_usecs (
- struct ehci_hcd *ehci,
- struct usb_device *dev,
- unsigned frame,
- unsigned short tt_usecs[8]
-)
-{
- __hc32 *hw_p = &ehci->periodic [frame];
- union ehci_shadow *q = &ehci->pshadow [frame];
- unsigned char uf;
-
- memset(tt_usecs, 0, 16);
-
- while (q->ptr) {
- switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
- case Q_TYPE_ITD:
- hw_p = &q->itd->hw_next;
- q = &q->itd->itd_next;
- continue;
- case Q_TYPE_QH:
- if (same_tt(dev, q->qh->dev)) {
- uf = tt_start_uframe(ehci, q->qh->hw->hw_info2);
- tt_usecs[uf] += q->qh->tt_usecs;
- }
- hw_p = &q->qh->hw->hw_next;
- q = &q->qh->qh_next;
- continue;
- case Q_TYPE_SITD:
- if (same_tt(dev, q->sitd->urb->dev)) {
- uf = tt_start_uframe(ehci, q->sitd->hw_uframe);
- tt_usecs[uf] += q->sitd->stream->tt_usecs;
- }
- hw_p = &q->sitd->hw_next;
- q = &q->sitd->sitd_next;
- continue;
- // case Q_TYPE_FSTN:
- default:
- ehci_dbg(ehci, "ignoring periodic frame %d FSTN\n",
- frame);
- hw_p = &q->fstn->hw_next;
- q = &q->fstn->fstn_next;
- }
- }
-
- carryover_tt_bandwidth(tt_usecs);
-
- if (max_tt_usecs[7] < tt_usecs[7])
- ehci_err(ehci, "frame %d tt sched overrun: %d usecs\n",
- frame, tt_usecs[7] - max_tt_usecs[7]);
-}
-
/*
* Return true if the device's tt's downstream bus is available for a
* periodic transfer of the specified length (usecs), starting at the
@@ -307,32 +377,32 @@ periodic_tt_usecs (
*/
static int tt_available (
struct ehci_hcd *ehci,
- unsigned period,
- struct usb_device *dev,
+ struct ehci_per_sched *ps,
+ struct ehci_tt *tt,
unsigned frame,
- unsigned uframe,
- u16 usecs
+ unsigned uframe
)
{
+ unsigned period = ps->bw_period;
+ unsigned usecs = ps->tt_usecs;
+
if ((period == 0) || (uframe >= 7)) /* error */
return 0;
- for (; frame < ehci->periodic_size; frame += period) {
- unsigned short tt_usecs[8];
+ for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES;
+ frame += period) {
+ unsigned i, uf;
+ unsigned short tt_usecs[8];
- periodic_tt_usecs (ehci, dev, frame, tt_usecs);
+ if (tt->bandwidth[frame] + usecs > 900)
+ return 0;
- ehci_vdbg(ehci, "tt frame %d check %d usecs start uframe %d in"
- " schedule %d/%d/%d/%d/%d/%d/%d/%d\n",
- frame, usecs, uframe,
- tt_usecs[0], tt_usecs[1], tt_usecs[2], tt_usecs[3],
- tt_usecs[4], tt_usecs[5], tt_usecs[6], tt_usecs[7]);
+ uf = frame << 3;
+ for (i = 0; i < 8; (++i, ++uf))
+ tt_usecs[i] = ehci->tt_budget[uf];
- if (max_tt_usecs[uframe] <= tt_usecs[uframe]) {
- ehci_vdbg(ehci, "frame %d uframe %d fully scheduled\n",
- frame, uframe);
+ if (max_tt_usecs[uframe] <= tt_usecs[uframe])
return 0;
- }
/* special case for isoc transfers larger than 125us:
* the first and each subsequent fully used uframe
@@ -341,15 +411,10 @@ static int tt_available (
*/
if (125 < usecs) {
int ufs = (usecs / 125);
- int i;
+
for (i = uframe; i < (uframe + ufs) && i < 8; i++)
- if (0 < tt_usecs[i]) {
- ehci_vdbg(ehci,
- "multi-uframe xfer can't fit "
- "in frame %d uframe %d\n",
- frame, i);
+ if (0 < tt_usecs[i])
return 0;
- }
}
tt_usecs[uframe] += usecs;
@@ -357,12 +422,8 @@ static int tt_available (
carryover_tt_bandwidth(tt_usecs);
/* fail if the carryover pushed bw past the last uframe's limit */
- if (max_tt_usecs[7] < tt_usecs[7]) {
- ehci_vdbg(ehci,
- "tt unavailable usecs %d frame %d uframe %d\n",
- usecs, frame, uframe);
+ if (max_tt_usecs[7] < tt_usecs[7])
return 0;
- }
}
return 1;
@@ -404,7 +465,7 @@ static int tt_no_collision (
continue;
case Q_TYPE_QH:
hw = here.qh->hw;
- if (same_tt (dev, here.qh->dev)) {
+ if (same_tt(dev, here.qh->ps.udev)) {
u32 mask;
mask = hc32_to_cpu(ehci,
@@ -451,69 +512,26 @@ static int tt_no_collision (
/*-------------------------------------------------------------------------*/
-static int enable_periodic (struct ehci_hcd *ehci)
+static void enable_periodic(struct ehci_hcd *ehci)
{
- u32 cmd;
- int status;
+ if (ehci->periodic_count++)
+ return;
- if (ehci->periodic_sched++)
- return 0;
+ /* Stop waiting to turn off the periodic schedule */
+ ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
- /* did clearing PSE did take effect yet?
- * takes effect only at frame boundaries...
- */
- status = handshake_on_error_set_halt(ehci, &ehci->regs->status,
- STS_PSS, 0, 9 * 125);
- if (status)
- return status;
-
- cmd = ehci_readl(ehci, &ehci->regs->command) | CMD_PSE;
- ehci_writel(ehci, cmd, &ehci->regs->command);
- /* posted write ... PSS happens later */
- ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
-
- /* make sure ehci_work scans these */
- ehci->next_uframe = ehci_readl(ehci, &ehci->regs->frame_index)
- % (ehci->periodic_size << 3);
- if (unlikely(ehci->broken_periodic))
- ehci->last_periodic_enable = ktime_get_real();
- return 0;
+ /* Don't start the schedule until PSS is 0 */
+ ehci_poll_PSS(ehci);
+ turn_on_io_watchdog(ehci);
}
-static int disable_periodic (struct ehci_hcd *ehci)
+static void disable_periodic(struct ehci_hcd *ehci)
{
- u32 cmd;
- int status;
-
- if (--ehci->periodic_sched)
- return 0;
-
- if (unlikely(ehci->broken_periodic)) {
- /* delay experimentally determined */
- ktime_t safe = ktime_add_us(ehci->last_periodic_enable, 1000);
- ktime_t now = ktime_get_real();
- s64 delay = ktime_us_delta(safe, now);
-
- if (unlikely(delay > 0))
- udelay(delay);
- }
-
- /* did setting PSE not take effect yet?
- * takes effect only at frame boundaries...
- */
- status = handshake_on_error_set_halt(ehci, &ehci->regs->status,
- STS_PSS, STS_PSS, 9 * 125);
- if (status)
- return status;
-
- cmd = ehci_readl(ehci, &ehci->regs->command) & ~CMD_PSE;
- ehci_writel(ehci, cmd, &ehci->regs->command);
- /* posted write ... */
-
- free_cached_lists(ehci);
+ if (--ehci->periodic_count)
+ return;
- ehci->next_uframe = -1;
- return 0;
+ /* Don't turn off the schedule until PSS is 1 */
+ ehci_poll_PSS(ehci);
}
/*-------------------------------------------------------------------------*/
@@ -524,22 +542,22 @@ static int disable_periodic (struct ehci_hcd *ehci)
* this just links in a qh; caller guarantees uframe masks are set right.
* no FSTN support (yet; ehci 0.96+)
*/
-static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
+static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
unsigned i;
- unsigned period = qh->period;
+ unsigned period = qh->ps.period;
- dev_dbg (&qh->dev->dev,
+ dev_dbg(&qh->ps.udev->dev,
"link qh%d-%04x/%p start %d [%d/%d us]\n",
period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
& (QH_CMASK | QH_SMASK),
- qh, qh->start, qh->usecs, qh->c_usecs);
+ qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
/* high bandwidth, or otherwise every microframe */
if (period == 0)
period = 1;
- for (i = qh->start; i < ehci->periodic_size; i += period) {
+ for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
union ehci_shadow *prev = &ehci->pshadow[i];
__hc32 *hw_p = &ehci->periodic[i];
union ehci_shadow here = *prev;
@@ -559,7 +577,7 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
* enables sharing interior tree nodes
*/
while (here.ptr && qh != here.qh) {
- if (qh->period > here.qh->period)
+ if (qh->ps.period > here.qh->ps.period)
break;
prev = &here.qh->qh_next;
hw_p = &here.qh->hw->hw_next;
@@ -577,97 +595,154 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
}
qh->qh_state = QH_STATE_LINKED;
qh->xacterrs = 0;
- qh_get (qh);
+ qh->exception = 0;
- /* update per-qh bandwidth for usbfs */
- ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period
- ? ((qh->usecs + qh->c_usecs) / qh->period)
- : (qh->usecs * 8);
+ /* update per-qh bandwidth for debugfs */
+ ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
+ ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
+ : (qh->ps.usecs * 8);
+
+ list_add(&qh->intr_node, &ehci->intr_qh_list);
/* maybe enable periodic schedule processing */
- return enable_periodic(ehci);
+ ++ehci->intr_count;
+ enable_periodic(ehci);
}
-static int qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
+static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
unsigned i;
unsigned period;
- // FIXME:
- // IF this isn't high speed
- // and this qh is active in the current uframe
- // (and overlay token SplitXstate is false?)
- // THEN
- // qh->hw_info1 |= cpu_to_hc32(1 << 7 /* "ignore" */);
+ /*
+ * If qh is for a low/full-speed device, simply unlinking it
+ * could interfere with an ongoing split transaction. To unlink
+ * it safely would require setting the QH_INACTIVATE bit and
+ * waiting at least one frame, as described in EHCI 4.12.2.5.
+ *
+ * We won't bother with any of this. Instead, we assume that the
+ * only reason for unlinking an interrupt QH while the current URB
+ * is still active is to dequeue all the URBs (flush the whole
+ * endpoint queue).
+ *
+ * If rebalancing the periodic schedule is ever implemented, this
+ * approach will no longer be valid.
+ */
/* high bandwidth, or otherwise part of every microframe */
- if ((period = qh->period) == 0)
- period = 1;
+ period = qh->ps.period ? : 1;
- for (i = qh->start; i < ehci->periodic_size; i += period)
+ for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
periodic_unlink (ehci, i, qh);
- /* update per-qh bandwidth for usbfs */
- ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period
- ? ((qh->usecs + qh->c_usecs) / qh->period)
- : (qh->usecs * 8);
+ /* update per-qh bandwidth for debugfs */
+ ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
+ ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
+ : (qh->ps.usecs * 8);
- dev_dbg (&qh->dev->dev,
+ dev_dbg(&qh->ps.udev->dev,
"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
- qh->period,
+ qh->ps.period,
hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
- qh, qh->start, qh->usecs, qh->c_usecs);
+ qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
/* qh->qh_next still "live" to HC */
qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.ptr = NULL;
- qh_put (qh);
- /* maybe turn off periodic schedule */
- return disable_periodic(ehci);
+ if (ehci->qh_scan_next == qh)
+ ehci->qh_scan_next = list_entry(qh->intr_node.next,
+ struct ehci_qh, intr_node);
+ list_del(&qh->intr_node);
}
-static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
+static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- unsigned wait;
- struct ehci_qh_hw *hw = qh->hw;
- int rc;
+ if (qh->qh_state != QH_STATE_LINKED ||
+ list_empty(&qh->unlink_node))
+ return;
+
+ list_del_init(&qh->unlink_node);
- /* If the QH isn't linked then there's nothing we can do
- * unless we were called during a giveback, in which case
- * qh_completions() has to deal with it.
+ /*
+ * TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for
+ * avoiding unnecessary CPU wakeup
*/
- if (qh->qh_state != QH_STATE_LINKED) {
- if (qh->qh_state == QH_STATE_COMPLETING)
- qh->needs_rescan = 1;
+}
+
+static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+ /* If the QH isn't linked then there's nothing we can do. */
+ if (qh->qh_state != QH_STATE_LINKED)
return;
- }
+
+ /* if the qh is waiting for unlink, cancel it now */
+ cancel_unlink_wait_intr(ehci, qh);
qh_unlink_periodic (ehci, qh);
- /* simple/paranoid: always delay, expecting the HC needs to read
- * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
- * expect khubd to clean up after any CSPLITs we won't issue.
- * active high speed queues may need bigger delays...
+ /* Make sure the unlinks are visible before starting the timer */
+ wmb();
+
+ /*
+ * The EHCI spec doesn't say how long it takes the controller to
+ * stop accessing an unlinked interrupt QH. The timer delay is
+ * 9 uframes; presumably that will be long enough.
*/
- if (list_empty (&qh->qtd_list)
- || (cpu_to_hc32(ehci, QH_CMASK)
- & hw->hw_info2) != 0)
- wait = 2;
- else
- wait = 55; /* worst case: 3 * 1024 */
+ qh->unlink_cycle = ehci->intr_unlink_cycle;
+
+ /* New entries go at the end of the intr_unlink list */
+ list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
+
+ if (ehci->intr_unlinking)
+ ; /* Avoid recursive calls */
+ else if (ehci->rh_state < EHCI_RH_RUNNING)
+ ehci_handle_intr_unlinks(ehci);
+ else if (ehci->intr_unlink.next == &qh->unlink_node) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
+ ++ehci->intr_unlink_cycle;
+ }
+}
+
+/*
+ * It is common only one intr URB is scheduled on one qh, and
+ * given complete() is run in tasklet context, introduce a bit
+ * delay to avoid unlink qh too early.
+ */
+static void start_unlink_intr_wait(struct ehci_hcd *ehci,
+ struct ehci_qh *qh)
+{
+ qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
+
+ /* New entries go at the end of the intr_unlink_wait list */
+ list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
+
+ if (ehci->rh_state < EHCI_RH_RUNNING)
+ ehci_handle_start_intr_unlinks(ehci);
+ else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
+ ++ehci->intr_unlink_wait_cycle;
+ }
+}
+
+static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+ struct ehci_qh_hw *hw = qh->hw;
+ int rc;
- udelay (wait);
qh->qh_state = QH_STATE_IDLE;
hw->hw_next = EHCI_LIST_END(ehci);
- wmb ();
- qh_completions(ehci, qh);
+ if (!list_empty(&qh->qtd_list))
+ qh_completions(ehci, qh);
/* reschedule QH iff another request is queued */
- if (!list_empty(&qh->qtd_list) &&
- HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
+ if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
rc = qh_schedule(ehci, qh);
+ if (rc == 0) {
+ qh_refresh(ehci, qh);
+ qh_link_periodic(ehci, qh);
+ }
/* An error here likely indicates handshake failure
* or no space left in the schedule. Neither fault
@@ -675,10 +750,15 @@ static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
*
* FIXME kill the now-dysfunctional queued urbs
*/
- if (rc != 0)
+ else {
ehci_err(ehci, "can't reschedule qh %p, err %d\n",
qh, rc);
+ }
}
+
+ /* maybe turn off periodic schedule */
+ --ehci->intr_count;
+ disable_periodic(ehci);
}
/*-------------------------------------------------------------------------*/
@@ -687,42 +767,22 @@ static int check_period (
struct ehci_hcd *ehci,
unsigned frame,
unsigned uframe,
- unsigned period,
+ unsigned uperiod,
unsigned usecs
) {
- int claimed;
-
/* complete split running into next frame?
* given FSTN support, we could sometimes check...
*/
if (uframe >= 8)
return 0;
- /*
- * 80% periodic == 100 usec/uframe available
- * convert "usecs we need" to "max already claimed"
- */
- usecs = 100 - usecs;
+ /* convert "usecs we need" to "max already claimed" */
+ usecs = ehci->uframe_periodic_max - usecs;
- /* we "know" 2 and 4 uframe intervals were rejected; so
- * for period 0, check _every_ microframe in the schedule.
- */
- if (unlikely (period == 0)) {
- do {
- for (uframe = 0; uframe < 7; uframe++) {
- claimed = periodic_usecs (ehci, frame, uframe);
- if (claimed > usecs)
- return 0;
- }
- } while ((frame += 1) < ehci->periodic_size);
-
- /* just check the specified uframe, at that period */
- } else {
- do {
- claimed = periodic_usecs (ehci, frame, uframe);
- if (claimed > usecs)
- return 0;
- } while ((frame += period) < ehci->periodic_size);
+ for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE;
+ uframe += uperiod) {
+ if (ehci->bandwidth[uframe] > usecs)
+ return 0;
}
// success!
@@ -733,40 +793,40 @@ static int check_intr_schedule (
struct ehci_hcd *ehci,
unsigned frame,
unsigned uframe,
- const struct ehci_qh *qh,
- __hc32 *c_maskp
+ struct ehci_qh *qh,
+ unsigned *c_maskp,
+ struct ehci_tt *tt
)
{
int retval = -ENOSPC;
u8 mask = 0;
- if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
+ if (qh->ps.c_usecs && uframe >= 6) /* FSTN territory? */
goto done;
- if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
+ if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
goto done;
- if (!qh->c_usecs) {
+ if (!qh->ps.c_usecs) {
retval = 0;
*c_maskp = 0;
goto done;
}
#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
- if (tt_available (ehci, qh->period, qh->dev, frame, uframe,
- qh->tt_usecs)) {
+ if (tt_available(ehci, &qh->ps, tt, frame, uframe)) {
unsigned i;
/* TODO : this may need FSTN for SSPLIT in uframe 5. */
- for (i=uframe+1; i<8 && i<uframe+4; i++)
- if (!check_period (ehci, frame, i,
- qh->period, qh->c_usecs))
+ for (i = uframe+2; i < 8 && i <= uframe+4; i++)
+ if (!check_period(ehci, frame, i,
+ qh->ps.bw_uperiod, qh->ps.c_usecs))
goto done;
else
mask |= 1 << i;
retval = 0;
- *c_maskp = cpu_to_hc32(ehci, mask << 8);
+ *c_maskp = mask;
}
#else
/* Make sure this tt's buffer is also available for CSPLITs.
@@ -777,15 +837,15 @@ static int check_intr_schedule (
* one smart pass...
*/
mask = 0x03 << (uframe + qh->gap_uf);
- *c_maskp = cpu_to_hc32(ehci, mask << 8);
+ *c_maskp = mask;
mask |= 1 << uframe;
- if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) {
- if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
- qh->period, qh->c_usecs))
+ if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
+ if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
+ qh->ps.bw_uperiod, qh->ps.c_usecs))
goto done;
- if (!check_period (ehci, frame, uframe + qh->gap_uf,
- qh->period, qh->c_usecs))
+ if (!check_period(ehci, frame, uframe + qh->gap_uf,
+ qh->ps.bw_uperiod, qh->ps.c_usecs))
goto done;
retval = 0;
}
@@ -799,66 +859,68 @@ done:
*/
static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- int status;
+ int status = 0;
unsigned uframe;
- __hc32 c_mask;
- unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
+ unsigned c_mask;
struct ehci_qh_hw *hw = qh->hw;
+ struct ehci_tt *tt;
- qh_refresh(ehci, qh);
hw->hw_next = EHCI_LIST_END(ehci);
- frame = qh->start;
/* reuse the previous schedule slots, if we can */
- if (frame < qh->period) {
- uframe = ffs(hc32_to_cpup(ehci, &hw->hw_info2) & QH_SMASK);
- status = check_intr_schedule (ehci, frame, --uframe,
- qh, &c_mask);
- } else {
- uframe = 0;
- c_mask = 0;
- status = -ENOSPC;
+ if (qh->ps.phase != NO_FRAME) {
+ ehci_dbg(ehci, "reused qh %p schedule\n", qh);
+ return 0;
}
+ uframe = 0;
+ c_mask = 0;
+ tt = find_tt(qh->ps.udev);
+ if (IS_ERR(tt)) {
+ status = PTR_ERR(tt);
+ goto done;
+ }
+ compute_tt_budget(ehci->tt_budget, tt);
+
/* else scan the schedule to find a group of slots such that all
* uframes have enough periodic bandwidth available.
*/
- if (status) {
- /* "normal" case, uframing flexible except with splits */
- if (qh->period) {
- int i;
-
- for (i = qh->period; status && i > 0; --i) {
- frame = ++ehci->random_frame % qh->period;
- for (uframe = 0; uframe < 8; uframe++) {
- status = check_intr_schedule (ehci,
- frame, uframe, qh,
- &c_mask);
- if (status == 0)
- break;
- }
+ /* "normal" case, uframing flexible except with splits */
+ if (qh->ps.bw_period) {
+ int i;
+ unsigned frame;
+
+ for (i = qh->ps.bw_period; i > 0; --i) {
+ frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
+ for (uframe = 0; uframe < 8; uframe++) {
+ status = check_intr_schedule(ehci,
+ frame, uframe, qh, &c_mask, tt);
+ if (status == 0)
+ goto got_it;
}
-
- /* qh->period == 0 means every uframe */
- } else {
- frame = 0;
- status = check_intr_schedule (ehci, 0, 0, qh, &c_mask);
}
- if (status)
- goto done;
- qh->start = frame;
- /* reset S-frame and (maybe) C-frame masks */
- hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
- hw->hw_info2 |= qh->period
- ? cpu_to_hc32(ehci, 1 << uframe)
- : cpu_to_hc32(ehci, QH_SMASK);
- hw->hw_info2 |= c_mask;
- } else
- ehci_dbg (ehci, "reused qh %p schedule\n", qh);
+ /* qh->ps.bw_period == 0 means every uframe */
+ } else {
+ status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt);
+ }
+ if (status)
+ goto done;
+
+ got_it:
+ qh->ps.phase = (qh->ps.period ? ehci->random_frame &
+ (qh->ps.period - 1) : 0);
+ qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1);
+ qh->ps.phase_uf = uframe;
+ qh->ps.cs_mask = qh->ps.period ?
+ (c_mask << 8) | (1 << uframe) :
+ QH_SMASK;
+
+ /* reset S-frame and (maybe) C-frame masks */
+ hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
+ hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
+ reserve_release_intr_bandwidth(ehci, qh, 1);
- /* stuff into the periodic schedule */
- status = qh_link_periodic (ehci, qh);
done:
return status;
}
@@ -904,6 +966,15 @@ static int intr_submit (
qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
BUG_ON (qh == NULL);
+ /* stuff into the periodic schedule */
+ if (qh->qh_state == QH_STATE_IDLE) {
+ qh_refresh(ehci, qh);
+ qh_link_periodic(ehci, qh);
+ } else {
+ /* cancel unlink wait for the qh */
+ cancel_unlink_wait_intr(ehci, qh);
+ }
+
/* ... update usbfs periodic stats */
ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
@@ -918,6 +989,34 @@ done_not_linked:
return status;
}
+static void scan_intr(struct ehci_hcd *ehci)
+{
+ struct ehci_qh *qh;
+
+ list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
+ intr_node) {
+
+ /* clean any finished work for this qh */
+ if (!list_empty(&qh->qtd_list)) {
+ int temp;
+
+ /*
+ * Unlinks could happen here; completion reporting
+ * drops the lock. That's why ehci->qh_scan_next
+ * always holds the next qh to scan; if the next qh
+ * gets unlinked then ehci->qh_scan_next is adjusted
+ * in qh_unlink_periodic().
+ */
+ temp = qh_completions(ehci, qh);
+ if (unlikely(temp))
+ start_unlink_intr(ehci, qh);
+ else if (unlikely(list_empty(&qh->qtd_list) &&
+ qh->qh_state == QH_STATE_LINKED))
+ start_unlink_intr_wait(ehci, qh);
+ }
+ }
+}
+
/*-------------------------------------------------------------------------*/
/* ehci_iso_stream ops work with both ITD and SITD */
@@ -931,8 +1030,8 @@ iso_stream_alloc (gfp_t mem_flags)
if (likely (stream != NULL)) {
INIT_LIST_HEAD(&stream->td_list);
INIT_LIST_HEAD(&stream->free_list);
- stream->next_uframe = -1;
- stream->refcount = 1;
+ stream->next_uframe = NO_FRAME;
+ stream->ps.phase = NO_FRAME;
}
return stream;
}
@@ -941,25 +1040,24 @@ static void
iso_stream_init (
struct ehci_hcd *ehci,
struct ehci_iso_stream *stream,
- struct usb_device *dev,
- int pipe,
- unsigned interval
+ struct urb *urb
)
{
static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
+ struct usb_device *dev = urb->dev;
u32 buf1;
unsigned epnum, maxp;
int is_input;
- long bandwidth;
+ unsigned tmp;
/*
* this might be a "high bandwidth" highspeed endpoint,
* as encoded in the ep descriptor's wMaxPacket field
*/
- epnum = usb_pipeendpoint (pipe);
- is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
- maxp = usb_maxpacket(dev, pipe, !is_input);
+ epnum = usb_pipeendpoint(urb->pipe);
+ is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
+ maxp = usb_endpoint_maxp(&urb->ep->desc);
if (is_input) {
buf1 = (1 << 11);
} else {
@@ -983,9 +1081,19 @@ iso_stream_init (
/* usbfs wants to report the average usecs per frame tied up
* when transfers on this endpoint are scheduled ...
*/
- stream->usecs = HS_USECS_ISO (maxp);
- bandwidth = stream->usecs * 8;
- bandwidth /= interval;
+ stream->ps.usecs = HS_USECS_ISO(maxp);
+
+ /* period for bandwidth allocation */
+ tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
+ 1 << (urb->ep->desc.bInterval - 1));
+
+ /* Allow urb->interval to override */
+ stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
+
+ stream->uperiod = urb->interval;
+ stream->ps.period = urb->interval >> 3;
+ stream->bandwidth = stream->ps.usecs * 8 /
+ stream->ps.bw_uperiod;
} else {
u32 addr;
@@ -999,93 +1107,49 @@ iso_stream_init (
addr |= dev->tt->hub->devnum << 16;
addr |= epnum << 8;
addr |= dev->devnum;
- stream->usecs = HS_USECS_ISO (maxp);
+ stream->ps.usecs = HS_USECS_ISO(maxp);
think_time = dev->tt ? dev->tt->think_time : 0;
- stream->tt_usecs = NS_TO_US (think_time + usb_calc_bus_time (
+ stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
dev->speed, is_input, 1, maxp));
hs_transfers = max (1u, (maxp + 187) / 188);
if (is_input) {
u32 tmp;
addr |= 1 << 31;
- stream->c_usecs = stream->usecs;
- stream->usecs = HS_USECS_ISO (1);
- stream->raw_mask = 1;
+ stream->ps.c_usecs = stream->ps.usecs;
+ stream->ps.usecs = HS_USECS_ISO(1);
+ stream->ps.cs_mask = 1;
/* c-mask as specified in USB 2.0 11.18.4 3.c */
tmp = (1 << (hs_transfers + 2)) - 1;
- stream->raw_mask |= tmp << (8 + 2);
+ stream->ps.cs_mask |= tmp << (8 + 2);
} else
- stream->raw_mask = smask_out [hs_transfers - 1];
- bandwidth = stream->usecs + stream->c_usecs;
- bandwidth /= interval << 3;
+ stream->ps.cs_mask = smask_out[hs_transfers - 1];
+
+ /* period for bandwidth allocation */
+ tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
+ 1 << (urb->ep->desc.bInterval - 1));
+
+ /* Allow urb->interval to override */
+ stream->ps.bw_period = min_t(unsigned, tmp, urb->interval);
+ stream->ps.bw_uperiod = stream->ps.bw_period << 3;
- /* stream->splits gets created from raw_mask later */
+ stream->ps.period = urb->interval;
+ stream->uperiod = urb->interval << 3;
+ stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) /
+ stream->ps.bw_period;
+
+ /* stream->splits gets created from cs_mask later */
stream->address = cpu_to_hc32(ehci, addr);
}
- stream->bandwidth = bandwidth;
- stream->udev = dev;
+ stream->ps.udev = dev;
+ stream->ps.ep = urb->ep;
stream->bEndpointAddress = is_input | epnum;
- stream->interval = interval;
stream->maxp = maxp;
}
-static void
-iso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream)
-{
- stream->refcount--;
-
- /* free whenever just a dev->ep reference remains.
- * not like a QH -- no persistent state (toggle, halt)
- */
- if (stream->refcount == 1) {
- int is_in;
-
- // BUG_ON (!list_empty(&stream->td_list));
-
- while (!list_empty (&stream->free_list)) {
- struct list_head *entry;
-
- entry = stream->free_list.next;
- list_del (entry);
-
- /* knows about ITD vs SITD */
- if (stream->highspeed) {
- struct ehci_itd *itd;
-
- itd = list_entry (entry, struct ehci_itd,
- itd_list);
- dma_pool_free (ehci->itd_pool, itd,
- itd->itd_dma);
- } else {
- struct ehci_sitd *sitd;
-
- sitd = list_entry (entry, struct ehci_sitd,
- sitd_list);
- dma_pool_free (ehci->sitd_pool, sitd,
- sitd->sitd_dma);
- }
- }
-
- is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0;
- stream->bEndpointAddress &= 0x0f;
- if (stream->ep)
- stream->ep->hcpriv = NULL;
-
- kfree(stream);
- }
-}
-
-static inline struct ehci_iso_stream *
-iso_stream_get (struct ehci_iso_stream *stream)
-{
- if (likely (stream != NULL))
- stream->refcount++;
- return stream;
-}
-
static struct ehci_iso_stream *
iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
{
@@ -1106,11 +1170,8 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
if (unlikely (stream == NULL)) {
stream = iso_stream_alloc(GFP_ATOMIC);
if (likely (stream != NULL)) {
- /* dev->ep owns the initial refcount */
ep->hcpriv = stream;
- stream->ep = ep;
- iso_stream_init(ehci, stream, urb->dev, urb->pipe,
- urb->interval);
+ iso_stream_init(ehci, stream, urb);
}
/* if dev->ep [epnum] is a QH, hw is set */
@@ -1121,9 +1182,6 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
stream = NULL;
}
- /* caller guarantees an eventual matching iso_stream_put */
- stream = iso_stream_get (stream);
-
spin_unlock_irqrestore (&ehci->lock, flags);
return stream;
}
@@ -1158,7 +1216,7 @@ itd_sched_init(
dma_addr_t dma = urb->transfer_dma;
/* how many uframes are needed for these transfers */
- iso_sched->span = urb->number_of_packets * stream->interval;
+ iso_sched->span = urb->number_of_packets * stream->uperiod;
/* figure out per-uframe itd fields that we'll need later
* when we fit new itds into the schedule.
@@ -1231,17 +1289,19 @@ itd_urb_transaction (
spin_lock_irqsave (&ehci->lock, flags);
for (i = 0; i < num_itds; i++) {
- /* free_list.next might be cache-hot ... but maybe
- * the HC caches it too. avoid that issue for now.
+ /*
+ * Use iTDs from the free list, but not iTDs that may
+ * still be in use by the hardware.
*/
-
- /* prefer previously-allocated itds */
- if (likely (!list_empty(&stream->free_list))) {
- itd = list_entry (stream->free_list.prev,
+ if (likely(!list_empty(&stream->free_list))) {
+ itd = list_first_entry(&stream->free_list,
struct ehci_itd, itd_list);
+ if (itd->frame == ehci->now_frame)
+ goto alloc_itd;
list_del (&itd->itd_list);
itd_dma = itd->itd_dma;
} else {
+ alloc_itd:
spin_unlock_irqrestore (&ehci->lock, flags);
itd = dma_pool_alloc (ehci->itd_pool, mem_flags,
&itd_dma);
@@ -1255,6 +1315,7 @@ itd_urb_transaction (
memset (itd, 0, sizeof *itd);
itd->itd_dma = itd_dma;
+ itd->frame = NO_FRAME;
list_add (&itd->itd_list, &sched->td_list);
}
spin_unlock_irqrestore (&ehci->lock, flags);
@@ -1267,102 +1328,155 @@ itd_urb_transaction (
/*-------------------------------------------------------------------------*/
+static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
+ struct ehci_iso_stream *stream, int sign)
+{
+ unsigned uframe;
+ unsigned i, j;
+ unsigned s_mask, c_mask, m;
+ int usecs = stream->ps.usecs;
+ int c_usecs = stream->ps.c_usecs;
+ int tt_usecs = stream->ps.tt_usecs;
+ struct ehci_tt *tt;
+
+ if (stream->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
+ return;
+ uframe = stream->ps.bw_phase << 3;
+
+ bandwidth_dbg(ehci, sign, "iso", &stream->ps);
+
+ if (sign < 0) { /* Release bandwidth */
+ usecs = -usecs;
+ c_usecs = -c_usecs;
+ tt_usecs = -tt_usecs;
+ }
+
+ if (!stream->splits) { /* High speed */
+ for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
+ i += stream->ps.bw_uperiod)
+ ehci->bandwidth[i] += usecs;
+
+ } else { /* Full speed */
+ s_mask = stream->ps.cs_mask;
+ c_mask = s_mask >> 8;
+
+ /* NOTE: adjustment needed for frame overflow */
+ for (i = uframe; i < EHCI_BANDWIDTH_SIZE;
+ i += stream->ps.bw_uperiod) {
+ for ((j = stream->ps.phase_uf, m = 1 << j); j < 8;
+ (++j, m <<= 1)) {
+ if (s_mask & m)
+ ehci->bandwidth[i+j] += usecs;
+ else if (c_mask & m)
+ ehci->bandwidth[i+j] += c_usecs;
+ }
+ }
+
+ tt = find_tt(stream->ps.udev);
+ if (sign > 0)
+ list_add_tail(&stream->ps.ps_list, &tt->ps_list);
+ else
+ list_del(&stream->ps.ps_list);
+
+ for (i = uframe >> 3; i < EHCI_BANDWIDTH_FRAMES;
+ i += stream->ps.bw_period)
+ tt->bandwidth[i] += tt_usecs;
+ }
+}
+
static inline int
itd_slot_ok (
struct ehci_hcd *ehci,
- u32 mod,
- u32 uframe,
- u8 usecs,
- u32 period
+ struct ehci_iso_stream *stream,
+ unsigned uframe
)
{
- uframe %= period;
- do {
- /* can't commit more than 80% periodic == 100 usec */
- if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7)
- > (100 - usecs))
- return 0;
+ unsigned usecs;
- /* we know urb->interval is 2^N uframes */
- uframe += period;
- } while (uframe < mod);
+ /* convert "usecs we need" to "max already claimed" */
+ usecs = ehci->uframe_periodic_max - stream->ps.usecs;
+
+ for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE;
+ uframe += stream->ps.bw_uperiod) {
+ if (ehci->bandwidth[uframe] > usecs)
+ return 0;
+ }
return 1;
}
static inline int
sitd_slot_ok (
struct ehci_hcd *ehci,
- u32 mod,
struct ehci_iso_stream *stream,
- u32 uframe,
+ unsigned uframe,
struct ehci_iso_sched *sched,
- u32 period_uframes
+ struct ehci_tt *tt
)
{
- u32 mask, tmp;
- u32 frame, uf;
+ unsigned mask, tmp;
+ unsigned frame, uf;
+
+ mask = stream->ps.cs_mask << (uframe & 7);
- mask = stream->raw_mask << (uframe & 7);
+ /* for OUT, don't wrap SSPLIT into H-microframe 7 */
+ if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7))
+ return 0;
/* for IN, don't wrap CSPLIT into the next frame */
if (mask & ~0xffff)
return 0;
- /* this multi-pass logic is simple, but performance may
- * suffer when the schedule data isn't cached.
- */
-
/* check bandwidth */
- uframe %= period_uframes;
- do {
- u32 max_used;
-
- frame = uframe >> 3;
- uf = uframe & 7;
+ uframe &= stream->ps.bw_uperiod - 1;
+ frame = uframe >> 3;
#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
- /* The tt's fullspeed bus bandwidth must be available.
- * tt_available scheduling guarantees 10+% for control/bulk.
- */
- if (!tt_available (ehci, period_uframes << 3,
- stream->udev, frame, uf, stream->tt_usecs))
- return 0;
+ /* The tt's fullspeed bus bandwidth must be available.
+ * tt_available scheduling guarantees 10+% for control/bulk.
+ */
+ uf = uframe & 7;
+ if (!tt_available(ehci, &stream->ps, tt, frame, uf))
+ return 0;
#else
- /* tt must be idle for start(s), any gap, and csplit.
- * assume scheduling slop leaves 10+% for control/bulk.
- */
- if (!tt_no_collision (ehci, period_uframes << 3,
- stream->udev, frame, mask))
- return 0;
+ /* tt must be idle for start(s), any gap, and csplit.
+ * assume scheduling slop leaves 10+% for control/bulk.
+ */
+ if (!tt_no_collision(ehci, stream->ps.bw_period,
+ stream->ps.udev, frame, mask))
+ return 0;
#endif
+ do {
+ unsigned max_used;
+ unsigned i;
+
/* check starts (OUT uses more than one) */
- max_used = 100 - stream->usecs;
- for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) {
- if (periodic_usecs (ehci, frame, uf) > max_used)
+ uf = uframe;
+ max_used = ehci->uframe_periodic_max - stream->ps.usecs;
+ for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) {
+ if (ehci->bandwidth[uf] > max_used)
return 0;
}
/* for IN, check CSPLIT */
- if (stream->c_usecs) {
- uf = uframe & 7;
- max_used = 100 - stream->c_usecs;
- do {
- tmp = 1 << uf;
- tmp <<= 8;
- if ((stream->raw_mask & tmp) == 0)
+ if (stream->ps.c_usecs) {
+ max_used = ehci->uframe_periodic_max -
+ stream->ps.c_usecs;
+ uf = uframe & ~7;
+ tmp = 1 << (2+8);
+ for (i = (uframe & 7) + 2; i < 8; (++i, tmp <<= 1)) {
+ if ((stream->ps.cs_mask & tmp) == 0)
continue;
- if (periodic_usecs (ehci, frame, uf)
- > max_used)
+ if (ehci->bandwidth[uf+i] > max_used)
return 0;
- } while (++uf < 8);
+ }
}
- /* we know urb->interval is 2^N uframes */
- uframe += period_uframes;
- } while (uframe < mod);
+ uframe += stream->ps.bw_uperiod;
+ } while (uframe < EHCI_BANDWIDTH_SIZE);
- stream->splits = cpu_to_hc32(ehci, stream->raw_mask << (uframe & 7));
+ stream->ps.cs_mask <<= uframe & 7;
+ stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask);
return 1;
}
@@ -1377,8 +1491,6 @@ sitd_slot_ok (
* given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
*/
-#define SCHEDULE_SLOP 80 /* microframes */
-
static int
iso_stream_schedule (
struct ehci_hcd *ehci,
@@ -1386,118 +1498,184 @@ iso_stream_schedule (
struct ehci_iso_stream *stream
)
{
- u32 now, next, start, period, span;
- int status;
+ u32 now, base, next, start, period, span, now2;
+ u32 wrap = 0, skip = 0;
+ int status = 0;
unsigned mod = ehci->periodic_size << 3;
struct ehci_iso_sched *sched = urb->hcpriv;
+ bool empty = list_empty(&stream->td_list);
+ bool new_stream = false;
- period = urb->interval;
+ period = stream->uperiod;
span = sched->span;
- if (!stream->highspeed) {
- period <<= 3;
+ if (!stream->highspeed)
span <<= 3;
- }
- if (span > mod - SCHEDULE_SLOP) {
- ehci_dbg (ehci, "iso request %p too long\n", urb);
- status = -EFBIG;
- goto fail;
- }
+ /* Start a new isochronous stream? */
+ if (unlikely(empty && !hcd_periodic_completion_in_progress(
+ ehci_to_hcd(ehci), urb->ep))) {
- now = ehci_readl(ehci, &ehci->regs->frame_index) & (mod - 1);
+ /* Schedule the endpoint */
+ if (stream->ps.phase == NO_FRAME) {
+ int done = 0;
+ struct ehci_tt *tt = find_tt(stream->ps.udev);
- /* Typical case: reuse current schedule, stream is still active.
- * Hopefully there are no gaps from the host falling behind
- * (irq delays etc), but if there are we'll take the next
- * slot in the schedule, implicitly assuming URB_ISO_ASAP.
- */
- if (likely (!list_empty (&stream->td_list))) {
- u32 excess;
+ if (IS_ERR(tt)) {
+ status = PTR_ERR(tt);
+ goto fail;
+ }
+ compute_tt_budget(ehci->tt_budget, tt);
- /* For high speed devices, allow scheduling within the
- * isochronous scheduling threshold. For full speed devices
- * and Intel PCI-based controllers, don't (work around for
- * Intel ICH9 bug).
- */
- if (!stream->highspeed && ehci->fs_i_thresh)
- next = now + ehci->i_thresh;
- else
- next = now;
+ start = ((-(++ehci->random_frame)) << 3) & (period - 1);
- /* Fell behind (by up to twice the slop amount)?
- * We decide based on the time of the last currently-scheduled
- * slot, not the time of the next available slot.
- */
- excess = (stream->next_uframe - period - next) & (mod - 1);
- if (excess >= mod - 2 * SCHEDULE_SLOP)
- start = next + excess - mod + period *
- DIV_ROUND_UP(mod - excess, period);
- else
- start = next + excess + period;
- if (start - now >= mod) {
- ehci_dbg(ehci, "request %p would overflow (%d+%d >= %d)\n",
- urb, start - now - period, period,
- mod);
- status = -EFBIG;
- goto fail;
+ /* find a uframe slot with enough bandwidth.
+ * Early uframes are more precious because full-speed
+ * iso IN transfers can't use late uframes,
+ * and therefore they should be allocated last.
+ */
+ next = start;
+ start += period;
+ do {
+ start--;
+ /* check schedule: enough space? */
+ if (stream->highspeed) {
+ if (itd_slot_ok(ehci, stream, start))
+ done = 1;
+ } else {
+ if ((start % 8) >= 6)
+ continue;
+ if (sitd_slot_ok(ehci, stream, start,
+ sched, tt))
+ done = 1;
+ }
+ } while (start > next && !done);
+
+ /* no room in the schedule */
+ if (!done) {
+ ehci_dbg(ehci, "iso sched full %p", urb);
+ status = -ENOSPC;
+ goto fail;
+ }
+ stream->ps.phase = (start >> 3) &
+ (stream->ps.period - 1);
+ stream->ps.bw_phase = stream->ps.phase &
+ (stream->ps.bw_period - 1);
+ stream->ps.phase_uf = start & 7;
+ reserve_release_iso_bandwidth(ehci, stream, 1);
}
+
+ /* New stream is already scheduled; use the upcoming slot */
+ else {
+ start = (stream->ps.phase << 3) + stream->ps.phase_uf;
+ }
+
+ stream->next_uframe = start;
+ new_stream = true;
}
- /* need to schedule; when's the next (u)frame we could start?
- * this is bigger than ehci->i_thresh allows; scheduling itself
- * isn't free, the slop should handle reasonably slow cpus. it
- * can also help high bandwidth if the dma and irq loads don't
- * jump until after the queue is primed.
+ now = ehci_read_frame_index(ehci) & (mod - 1);
+
+ /* Take the isochronous scheduling threshold into account */
+ if (ehci->i_thresh)
+ next = now + ehci->i_thresh; /* uframe cache */
+ else
+ next = (now + 2 + 7) & ~0x07; /* full frame cache */
+
+ /*
+ * Use ehci->last_iso_frame as the base. There can't be any
+ * TDs scheduled for earlier than that.
*/
- else {
- start = SCHEDULE_SLOP + (now & ~0x07);
+ base = ehci->last_iso_frame << 3;
+ next = (next - base) & (mod - 1);
+ start = (stream->next_uframe - base) & (mod - 1);
- /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
+ if (unlikely(new_stream))
+ goto do_ASAP;
- /* find a uframe slot with enough bandwidth */
- next = start + period;
- for (; start < next; start++) {
+ /*
+ * Typical case: reuse current schedule, stream may still be active.
+ * Hopefully there are no gaps from the host falling behind
+ * (irq delays etc). If there are, the behavior depends on
+ * whether URB_ISO_ASAP is set.
+ */
+ now2 = (now - base) & (mod - 1);
- /* check schedule: enough space? */
- if (stream->highspeed) {
- if (itd_slot_ok(ehci, mod, start,
- stream->usecs, period))
- break;
- } else {
- if ((start % 8) >= 6)
- continue;
- if (sitd_slot_ok(ehci, mod, stream,
- start, sched, period))
- break;
- }
- }
+ /* Is the schedule already full? */
+ if (unlikely(!empty && start < period)) {
+ ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n",
+ urb, stream->next_uframe, base, period, mod);
+ status = -ENOSPC;
+ goto fail;
+ }
- /* no room in the schedule */
- if (start == next) {
- ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n",
- urb, now, now + mod);
- status = -ENOSPC;
- goto fail;
+ /* Is the next packet scheduled after the base time? */
+ if (likely(!empty || start <= now2 + period)) {
+
+ /* URB_ISO_ASAP: make sure that start >= next */
+ if (unlikely(start < next &&
+ (urb->transfer_flags & URB_ISO_ASAP)))
+ goto do_ASAP;
+
+ /* Otherwise use start, if it's not in the past */
+ if (likely(start >= now2))
+ goto use_start;
+
+ /* Otherwise we got an underrun while the queue was empty */
+ } else {
+ if (urb->transfer_flags & URB_ISO_ASAP)
+ goto do_ASAP;
+ wrap = mod;
+ now2 += mod;
+ }
+
+ /* How many uframes and packets do we need to skip? */
+ skip = (now2 - start + period - 1) & -period;
+ if (skip >= span) { /* Entirely in the past? */
+ ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n",
+ urb, start + base, span - period, now2 + base,
+ base);
+
+ /* Try to keep the last TD intact for scanning later */
+ skip = span - period;
+
+ /* Will it come before the current scan position? */
+ if (empty) {
+ skip = span; /* Skip the entire URB */
+ status = 1; /* and give it back immediately */
+ iso_sched_free(stream, sched);
+ sched = NULL;
}
}
+ urb->error_count = skip / period;
+ if (sched)
+ sched->first_packet = urb->error_count;
+ goto use_start;
+
+ do_ASAP:
+ /* Use the first slot after "next" */
+ start = next + ((start - next) & (period - 1));
+ use_start:
/* Tried to schedule too far into the future? */
- if (unlikely(start - now + span - period
- >= mod - 2 * SCHEDULE_SLOP)) {
- ehci_dbg(ehci, "request %p would overflow (%d+%d >= %d)\n",
- urb, start - now, span - period,
- mod - 2 * SCHEDULE_SLOP);
+ if (unlikely(start + span - period >= mod + wrap)) {
+ ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
+ urb, start, span - period, mod + wrap);
status = -EFBIG;
goto fail;
}
- stream->next_uframe = start & (mod - 1);
+ start += base;
+ stream->next_uframe = (start + skip) & (mod - 1);
/* report high speed start in uframes; full speed, in frames */
- urb->start_frame = stream->next_uframe;
+ urb->start_frame = start & (mod - 1);
if (!stream->highspeed)
urb->start_frame >>= 3;
- return 0;
+
+ /* Make sure scan_isoc() sees these */
+ if (ehci->isoc_count == 0)
+ ehci->last_iso_frame = now >> 3;
+ return status;
fail:
iso_sched_free(stream, sched);
@@ -1584,8 +1762,7 @@ itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
}
/* fit urb's itds into the selected schedule slot; activate as needed */
-static int
-itd_link_urb (
+static void itd_link_urb(
struct ehci_hcd *ehci,
struct urb *urb,
unsigned mod,
@@ -1599,20 +1776,20 @@ itd_link_urb (
next_uframe = stream->next_uframe & (mod - 1);
- if (unlikely (list_empty(&stream->td_list))) {
+ if (unlikely (list_empty(&stream->td_list)))
ehci_to_hcd(ehci)->self.bandwidth_allocated
+= stream->bandwidth;
- ehci_vdbg (ehci,
- "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
- urb->dev->devpath, stream->bEndpointAddress & 0x0f,
- (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
- urb->interval,
- next_uframe >> 3, next_uframe & 0x7);
+
+ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+ if (ehci->amd_pll_fix == 1)
+ usb_amd_quirk_pll_disable();
}
+
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
/* fill iTDs uframe by uframe */
- for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) {
+ for (packet = iso_sched->first_packet, itd = NULL;
+ packet < urb->number_of_packets;) {
if (itd == NULL) {
/* ASSERT: we have all necessary itds */
// BUG_ON (list_empty (&iso_sched->td_list));
@@ -1622,7 +1799,7 @@ itd_link_urb (
itd = list_entry (iso_sched->td_list.next,
struct ehci_itd, itd_list);
list_move_tail (&itd->itd_list, &stream->td_list);
- itd->stream = iso_stream_get (stream);
+ itd->stream = stream;
itd->urb = urb;
itd_init (ehci, stream, itd);
}
@@ -1632,7 +1809,7 @@ itd_link_urb (
itd_patch(ehci, itd, iso_sched, packet, uframe);
- next_uframe += stream->interval;
+ next_uframe += stream->uperiod;
next_uframe &= mod - 1;
packet++;
@@ -1647,10 +1824,10 @@ itd_link_urb (
/* don't need that schedule data any more */
iso_sched_free (stream, iso_sched);
- urb->hcpriv = NULL;
+ urb->hcpriv = stream;
- timer_action (ehci, TIMER_IO_WATCHDOG);
- return enable_periodic(ehci);
+ ++ehci->isoc_count;
+ enable_periodic(ehci);
}
#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
@@ -1665,11 +1842,8 @@ itd_link_urb (
* (b) only this endpoint's completions submit URBs. It seems some silicon
* corrupts things if you reuse completed descriptors very quickly...
*/
-static unsigned
-itd_complete (
- struct ehci_hcd *ehci,
- struct ehci_itd *itd
-) {
+static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
+{
struct urb *urb = itd->urb;
struct usb_iso_packet_descriptor *desc;
u32 t;
@@ -1677,7 +1851,7 @@ itd_complete (
int urb_index = -1;
struct ehci_iso_stream *stream = itd->stream;
struct usb_device *dev;
- unsigned retval = false;
+ bool retval = false;
/* for each uframe with a packet */
for (uframe = 0; uframe < 8; uframe++) {
@@ -1712,7 +1886,7 @@ itd_complete (
urb->actual_length += desc->actual_length;
} else {
/* URB was too late */
- desc->status = -EXDEV;
+ urb->error_count++;
}
}
@@ -1730,39 +1904,33 @@ itd_complete (
ehci_urb_done(ehci, urb, 0);
retval = true;
urb = NULL;
- (void) disable_periodic(ehci);
+
+ --ehci->isoc_count;
+ disable_periodic(ehci);
+
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
+ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+ if (ehci->amd_pll_fix == 1)
+ usb_amd_quirk_pll_enable();
+ }
- if (unlikely(list_is_singular(&stream->td_list))) {
+ if (unlikely(list_is_singular(&stream->td_list)))
ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
- ehci_vdbg (ehci,
- "deschedule devp %s ep%d%s-iso\n",
- dev->devpath, stream->bEndpointAddress & 0x0f,
- (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
- }
- iso_stream_put (ehci, stream);
done:
itd->urb = NULL;
- if (ehci->clock_frame != itd->frame || itd->index[7] != -1) {
- /* OK to recycle this ITD now. */
- itd->stream = NULL;
- list_move(&itd->itd_list, &stream->free_list);
- iso_stream_put(ehci, stream);
- } else {
- /* HW might remember this ITD, so we can't recycle it yet.
- * Move it to a safe place until a new frame starts.
- */
- list_move(&itd->itd_list, &ehci->cached_itd_list);
- if (stream->refcount == 2) {
- /* If iso_stream_put() were called here, stream
- * would be freed. Instead, just prevent reuse.
- */
- stream->ep->hcpriv = NULL;
- stream->ep = NULL;
- }
+
+ /* Add to the end of the free list for later reuse */
+ list_move_tail(&itd->itd_list, &stream->free_list);
+
+ /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
+ if (list_empty(&stream->td_list)) {
+ list_splice_tail_init(&stream->free_list,
+ &ehci->cached_itd_list);
+ start_free_itds(ehci);
}
+
return retval;
}
@@ -1781,9 +1949,9 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
ehci_dbg (ehci, "can't get iso stream\n");
return -ENOMEM;
}
- if (unlikely (urb->interval != stream->interval)) {
+ if (unlikely(urb->interval != stream->uperiod)) {
ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
- stream->interval, urb->interval);
+ stream->uperiod, urb->interval);
goto done;
}
@@ -1815,16 +1983,17 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
if (unlikely(status))
goto done_not_linked;
status = iso_stream_schedule(ehci, urb, stream);
- if (likely (status == 0))
+ if (likely(status == 0)) {
itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
- else
+ } else if (status > 0) {
+ status = 0;
+ ehci_urb_done(ehci, urb, 0);
+ } else {
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
-done_not_linked:
+ }
+ done_not_linked:
spin_unlock_irqrestore (&ehci->lock, flags);
-
-done:
- if (unlikely (status < 0))
- iso_stream_put (ehci, stream);
+ done:
return status;
}
@@ -1847,7 +2016,7 @@ sitd_sched_init(
dma_addr_t dma = urb->transfer_dma;
/* how many frames are needed for these transfers */
- iso_sched->span = urb->number_of_packets * stream->interval;
+ iso_sched->span = urb->number_of_packets * stream->ps.period;
/* figure out per-frame sitd fields that we'll need later
* when we fit new sitds into the schedule.
@@ -1913,17 +2082,19 @@ sitd_urb_transaction (
* means we never need two sitds for full speed packets.
*/
- /* free_list.next might be cache-hot ... but maybe
- * the HC caches it too. avoid that issue for now.
+ /*
+ * Use siTDs from the free list, but not siTDs that may
+ * still be in use by the hardware.
*/
-
- /* prefer previously-allocated sitds */
- if (!list_empty(&stream->free_list)) {
- sitd = list_entry (stream->free_list.prev,
+ if (likely(!list_empty(&stream->free_list))) {
+ sitd = list_first_entry(&stream->free_list,
struct ehci_sitd, sitd_list);
+ if (sitd->frame == ehci->now_frame)
+ goto alloc_sitd;
list_del (&sitd->sitd_list);
sitd_dma = sitd->sitd_dma;
} else {
+ alloc_sitd:
spin_unlock_irqrestore (&ehci->lock, flags);
sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags,
&sitd_dma);
@@ -1937,6 +2108,7 @@ sitd_urb_transaction (
memset (sitd, 0, sizeof *sitd);
sitd->sitd_dma = sitd_dma;
+ sitd->frame = NO_FRAME;
list_add (&sitd->sitd_list, &iso_sched->td_list);
}
@@ -1992,8 +2164,7 @@ sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
}
/* fit urb's sitds into the selected schedule slot; activate as needed */
-static int
-sitd_link_urb (
+static void sitd_link_urb(
struct ehci_hcd *ehci,
struct urb *urb,
unsigned mod,
@@ -2007,21 +2178,20 @@ sitd_link_urb (
next_uframe = stream->next_uframe;
- if (list_empty(&stream->td_list)) {
+ if (list_empty(&stream->td_list))
/* usbfs ignores TT bandwidth */
ehci_to_hcd(ehci)->self.bandwidth_allocated
+= stream->bandwidth;
- ehci_vdbg (ehci,
- "sched devp %s ep%d%s-iso [%d] %dms/%04x\n",
- urb->dev->devpath, stream->bEndpointAddress & 0x0f,
- (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
- (next_uframe >> 3) & (ehci->periodic_size - 1),
- stream->interval, hc32_to_cpu(ehci, stream->splits));
+
+ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+ if (ehci->amd_pll_fix == 1)
+ usb_amd_quirk_pll_disable();
}
+
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
/* fill sITDs frame by frame */
- for (packet = 0, sitd = NULL;
+ for (packet = sched->first_packet, sitd = NULL;
packet < urb->number_of_packets;
packet++) {
@@ -2033,23 +2203,23 @@ sitd_link_urb (
sitd = list_entry (sched->td_list.next,
struct ehci_sitd, sitd_list);
list_move_tail (&sitd->sitd_list, &stream->td_list);
- sitd->stream = iso_stream_get (stream);
+ sitd->stream = stream;
sitd->urb = urb;
sitd_patch(ehci, stream, sitd, sched, packet);
sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
sitd);
- next_uframe += stream->interval << 3;
+ next_uframe += stream->uperiod;
}
stream->next_uframe = next_uframe & (mod - 1);
/* don't need that schedule data any more */
iso_sched_free (stream, sched);
- urb->hcpriv = NULL;
+ urb->hcpriv = stream;
- timer_action (ehci, TIMER_IO_WATCHDOG);
- return enable_periodic(ehci);
+ ++ehci->isoc_count;
+ enable_periodic(ehci);
}
/*-------------------------------------------------------------------------*/
@@ -2067,25 +2237,22 @@ sitd_link_urb (
* (b) only this endpoint's completions submit URBs. It seems some silicon
* corrupts things if you reuse completed descriptors very quickly...
*/
-static unsigned
-sitd_complete (
- struct ehci_hcd *ehci,
- struct ehci_sitd *sitd
-) {
+static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
+{
struct urb *urb = sitd->urb;
struct usb_iso_packet_descriptor *desc;
u32 t;
int urb_index = -1;
struct ehci_iso_stream *stream = sitd->stream;
struct usb_device *dev;
- unsigned retval = false;
+ bool retval = false;
urb_index = sitd->index;
desc = &urb->iso_frame_desc [urb_index];
t = hc32_to_cpup(ehci, &sitd->hw_results);
/* report transfer status */
- if (t & SITD_ERRS) {
+ if (unlikely(t & SITD_ERRS)) {
urb->error_count++;
if (t & SITD_STS_DBE)
desc->status = usb_pipein (urb->pipe)
@@ -2095,6 +2262,9 @@ sitd_complete (
desc->status = -EOVERFLOW;
else /* XACT, MMF, etc */
desc->status = -EPROTO;
+ } else if (unlikely(t & SITD_STS_ACTIVE)) {
+ /* URB was too late */
+ urb->error_count++;
} else {
desc->status = 0;
desc->actual_length = desc->length - SITD_LENGTH(t);
@@ -2115,39 +2285,33 @@ sitd_complete (
ehci_urb_done(ehci, urb, 0);
retval = true;
urb = NULL;
- (void) disable_periodic(ehci);
+
+ --ehci->isoc_count;
+ disable_periodic(ehci);
+
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
+ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+ if (ehci->amd_pll_fix == 1)
+ usb_amd_quirk_pll_enable();
+ }
- if (list_is_singular(&stream->td_list)) {
+ if (list_is_singular(&stream->td_list))
ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
- ehci_vdbg (ehci,
- "deschedule devp %s ep%d%s-iso\n",
- dev->devpath, stream->bEndpointAddress & 0x0f,
- (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
- }
- iso_stream_put (ehci, stream);
done:
sitd->urb = NULL;
- if (ehci->clock_frame != sitd->frame) {
- /* OK to recycle this SITD now. */
- sitd->stream = NULL;
- list_move(&sitd->sitd_list, &stream->free_list);
- iso_stream_put(ehci, stream);
- } else {
- /* HW might remember this SITD, so we can't recycle it yet.
- * Move it to a safe place until a new frame starts.
- */
- list_move(&sitd->sitd_list, &ehci->cached_sitd_list);
- if (stream->refcount == 2) {
- /* If iso_stream_put() were called here, stream
- * would be freed. Instead, just prevent reuse.
- */
- stream->ep->hcpriv = NULL;
- stream->ep = NULL;
- }
+
+ /* Add to the end of the free list for later reuse */
+ list_move_tail(&sitd->sitd_list, &stream->free_list);
+
+ /* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
+ if (list_empty(&stream->td_list)) {
+ list_splice_tail_init(&stream->free_list,
+ &ehci->cached_sitd_list);
+ start_free_itds(ehci);
}
+
return retval;
}
@@ -2165,9 +2329,9 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
ehci_dbg (ehci, "can't get iso stream\n");
return -ENOMEM;
}
- if (urb->interval != stream->interval) {
+ if (urb->interval != stream->ps.period) {
ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
- stream->interval, urb->interval);
+ stream->ps.period, urb->interval);
goto done;
}
@@ -2197,77 +2361,47 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
if (unlikely(status))
goto done_not_linked;
status = iso_stream_schedule(ehci, urb, stream);
- if (status == 0)
+ if (likely(status == 0)) {
sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
- else
+ } else if (status > 0) {
+ status = 0;
+ ehci_urb_done(ehci, urb, 0);
+ } else {
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
-done_not_linked:
+ }
+ done_not_linked:
spin_unlock_irqrestore (&ehci->lock, flags);
-
-done:
- if (status < 0)
- iso_stream_put (ehci, stream);
+ done:
return status;
}
/*-------------------------------------------------------------------------*/
-static void free_cached_lists(struct ehci_hcd *ehci)
+static void scan_isoc(struct ehci_hcd *ehci)
{
- struct ehci_itd *itd, *n;
- struct ehci_sitd *sitd, *sn;
-
- list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
- struct ehci_iso_stream *stream = itd->stream;
- itd->stream = NULL;
- list_move(&itd->itd_list, &stream->free_list);
- iso_stream_put(ehci, stream);
- }
-
- list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
- struct ehci_iso_stream *stream = sitd->stream;
- sitd->stream = NULL;
- list_move(&sitd->sitd_list, &stream->free_list);
- iso_stream_put(ehci, stream);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-
-static void
-scan_periodic (struct ehci_hcd *ehci)
-{
- unsigned now_uframe, frame, clock, clock_frame, mod;
- unsigned modified;
-
- mod = ehci->periodic_size << 3;
+ unsigned uf, now_frame, frame;
+ unsigned fmask = ehci->periodic_size - 1;
+ bool modified, live;
/*
* When running, scan from last scan point up to "now"
* else clean up by scanning everything that's left.
* Touches as few pages as possible: cache-friendly.
*/
- now_uframe = ehci->next_uframe;
- if (HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
- clock = ehci_readl(ehci, &ehci->regs->frame_index);
- clock_frame = (clock >> 3) & (ehci->periodic_size - 1);
+ if (ehci->rh_state >= EHCI_RH_RUNNING) {
+ uf = ehci_read_frame_index(ehci);
+ now_frame = (uf >> 3) & fmask;
+ live = true;
} else {
- clock = now_uframe + mod - 1;
- clock_frame = -1;
- }
- if (ehci->clock_frame != clock_frame) {
- free_cached_lists(ehci);
- ehci->clock_frame = clock_frame;
+ now_frame = (ehci->last_iso_frame - 1) & fmask;
+ live = false;
}
- clock &= mod - 1;
- clock_frame = clock >> 3;
+ ehci->now_frame = now_frame;
+ frame = ehci->last_iso_frame;
for (;;) {
union ehci_shadow q, *q_p;
__hc32 type, *hw_p;
- unsigned incomplete = false;
-
- frame = now_uframe >> 3;
restart:
/* scan each element in frame's queue for completions */
@@ -2275,43 +2409,17 @@ restart:
hw_p = &ehci->periodic [frame];
q.ptr = q_p->ptr;
type = Q_NEXT_TYPE(ehci, *hw_p);
- modified = 0;
+ modified = false;
while (q.ptr != NULL) {
- unsigned uf;
- union ehci_shadow temp;
- int live;
-
- live = HC_IS_RUNNING (ehci_to_hcd(ehci)->state);
switch (hc32_to_cpu(ehci, type)) {
- case Q_TYPE_QH:
- /* handle any completions */
- temp.qh = qh_get (q.qh);
- type = Q_NEXT_TYPE(ehci, q.qh->hw->hw_next);
- q = q.qh->qh_next;
- modified = qh_completions (ehci, temp.qh);
- if (unlikely(list_empty(&temp.qh->qtd_list) ||
- temp.qh->needs_rescan))
- intr_deschedule (ehci, temp.qh);
- qh_put (temp.qh);
- break;
- case Q_TYPE_FSTN:
- /* for "save place" FSTNs, look at QH entries
- * in the previous frame for completions.
- */
- if (q.fstn->hw_prev != EHCI_LIST_END(ehci)) {
- dbg ("ignoring completions from FSTNs");
- }
- type = Q_NEXT_TYPE(ehci, q.fstn->hw_next);
- q = q.fstn->fstn_next;
- break;
case Q_TYPE_ITD:
/* If this ITD is still active, leave it for
* later processing ... check the next entry.
* No need to check for activity unless the
* frame is current.
*/
- if (frame == clock_frame && live) {
+ if (frame == now_frame && live) {
rmb();
for (uf = 0; uf < 8; uf++) {
if (q.itd->hw_transaction[uf] &
@@ -2319,7 +2427,6 @@ restart:
break;
}
if (uf < 8) {
- incomplete = true;
q_p = &q.itd->itd_next;
hw_p = &q.itd->hw_next;
type = Q_NEXT_TYPE(ehci,
@@ -2335,7 +2442,12 @@ restart:
* pointer for much longer, if at all.
*/
*q_p = q.itd->itd_next;
- *hw_p = q.itd->hw_next;
+ if (!ehci->use_dummy_qh ||
+ q.itd->hw_next != EHCI_LIST_END(ehci))
+ *hw_p = q.itd->hw_next;
+ else
+ *hw_p = cpu_to_hc32(ehci,
+ ehci->dummy->qh_dma);
type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
wmb();
modified = itd_complete (ehci, q.itd);
@@ -2347,14 +2459,12 @@ restart:
* No need to check for activity unless the
* frame is current.
*/
- if (((frame == clock_frame) ||
- (((frame + 1) & (ehci->periodic_size - 1))
- == clock_frame))
+ if (((frame == now_frame) ||
+ (((frame + 1) & fmask) == now_frame))
&& live
&& (q.sitd->hw_results &
SITD_ACTIVE(ehci))) {
- incomplete = true;
q_p = &q.sitd->sitd_next;
hw_p = &q.sitd->hw_next;
type = Q_NEXT_TYPE(ehci,
@@ -2368,68 +2478,40 @@ restart:
* URB completion.
*/
*q_p = q.sitd->sitd_next;
- *hw_p = q.sitd->hw_next;
+ if (!ehci->use_dummy_qh ||
+ q.sitd->hw_next != EHCI_LIST_END(ehci))
+ *hw_p = q.sitd->hw_next;
+ else
+ *hw_p = cpu_to_hc32(ehci,
+ ehci->dummy->qh_dma);
type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
wmb();
modified = sitd_complete (ehci, q.sitd);
q = *q_p;
break;
default:
- dbg ("corrupt type %d frame %d shadow %p",
+ ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
type, frame, q.ptr);
// BUG ();
+ /* FALL THROUGH */
+ case Q_TYPE_QH:
+ case Q_TYPE_FSTN:
+ /* End of the iTDs and siTDs */
q.ptr = NULL;
+ break;
}
/* assume completion callbacks modify the queue */
- if (unlikely (modified)) {
- if (likely(ehci->periodic_sched > 0))
- goto restart;
- /* short-circuit this scan */
- now_uframe = clock;
- break;
- }
+ if (unlikely(modified && ehci->isoc_count > 0))
+ goto restart;
}
- /* If we can tell we caught up to the hardware, stop now.
- * We can't advance our scan without collecting the ISO
- * transfers that are still pending in this frame.
- */
- if (incomplete && HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
- ehci->next_uframe = now_uframe;
+ /* Stop when we have reached the current frame */
+ if (frame == now_frame)
break;
- }
-
- // FIXME: this assumes we won't get lapped when
- // latencies climb; that should be rare, but...
- // detect it, and just go all the way around.
- // FLR might help detect this case, so long as latencies
- // don't exceed periodic_size msec (default 1.024 sec).
-
- // FIXME: likewise assumes HC doesn't halt mid-scan
-
- if (now_uframe == clock) {
- unsigned now;
- if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)
- || ehci->periodic_sched == 0)
- break;
- ehci->next_uframe = now_uframe;
- now = ehci_readl(ehci, &ehci->regs->frame_index) &
- (mod - 1);
- if (now_uframe == now)
- break;
-
- /* rescan the rest of this frame, then ... */
- clock = now;
- clock_frame = clock >> 3;
- if (ehci->clock_frame != clock_frame) {
- free_cached_lists(ehci);
- ehci->clock_frame = clock_frame;
- }
- } else {
- now_uframe++;
- now_uframe &= mod - 1;
- }
+ /* The last frame may still have active siTDs */
+ ehci->last_iso_frame = frame;
+ frame = (frame + 1) & fmask;
}
}
diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c
new file mode 100644
index 00000000000..cf126767386
--- /dev/null
+++ b/drivers/usb/host/ehci-sead3.c
@@ -0,0 +1,187 @@
+/*
+ * MIPS CI13320A EHCI Host Controller driver
+ * Based on "ehci-au1xxx.c" by K.Boge <karsten.boge@amd.com>
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+
+static int ehci_sead3_setup(struct usb_hcd *hcd)
+{
+ int ret;
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ ehci->caps = hcd->regs + 0x100;
+
+#ifdef __BIG_ENDIAN
+ ehci->big_endian_mmio = 1;
+ ehci->big_endian_desc = 1;
+#endif
+
+ ret = ehci_setup(hcd);
+ if (ret)
+ return ret;
+
+ ehci->need_io_watchdog = 0;
+
+ /* Set burst length to 16 words. */
+ ehci_writel(ehci, 0x1010, &ehci->regs->reserved1[1]);
+
+ return ret;
+}
+
+const struct hc_driver ehci_sead3_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "SEAD-3 EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+ /*
+ * basic lifecycle operations
+ *
+ */
+ .reset = ehci_sead3_setup,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static int ehci_hcd_sead3_drv_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct resource *res;
+ int ret;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ if (pdev->resource[1].flags != IORESOURCE_IRQ) {
+ pr_debug("resource[1] is not IORESOURCE_IRQ");
+ return -ENOMEM;
+ }
+ hcd = usb_create_hcd(&ehci_sead3_hc_driver, &pdev->dev, "SEAD-3");
+ if (!hcd)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
+ goto err1;
+ }
+
+ /* Root hub has integrated TT. */
+ hcd->has_tt = 1;
+
+ ret = usb_add_hcd(hcd, pdev->resource[1].start,
+ IRQF_SHARED);
+ if (ret == 0) {
+ platform_set_drvdata(pdev, hcd);
+ device_wakeup_enable(hcd->self.controller);
+ return ret;
+ }
+
+err1:
+ usb_put_hcd(hcd);
+ return ret;
+}
+
+static int ehci_hcd_sead3_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ehci_hcd_sead3_drv_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ bool do_wakeup = device_may_wakeup(dev);
+
+ return ehci_suspend(hcd, do_wakeup);
+}
+
+static int ehci_hcd_sead3_drv_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ ehci_resume(hcd, false);
+ return 0;
+}
+
+static const struct dev_pm_ops sead3_ehci_pmops = {
+ .suspend = ehci_hcd_sead3_drv_suspend,
+ .resume = ehci_hcd_sead3_drv_resume,
+};
+
+#define SEAD3_EHCI_PMOPS (&sead3_ehci_pmops)
+
+#else
+#define SEAD3_EHCI_PMOPS NULL
+#endif
+
+static struct platform_driver ehci_hcd_sead3_driver = {
+ .probe = ehci_hcd_sead3_drv_probe,
+ .remove = ehci_hcd_sead3_drv_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "sead3-ehci",
+ .owner = THIS_MODULE,
+ .pm = SEAD3_EHCI_PMOPS,
+ }
+};
+
+MODULE_ALIAS("platform:sead3-ehci");
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
new file mode 100644
index 00000000000..9b9b9f5b016
--- /dev/null
+++ b/drivers/usb/host/ehci-sh.c
@@ -0,0 +1,206 @@
+/*
+ * SuperH EHCI host controller driver
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * Based on ohci-sh.c and ehci-atmel.c.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/platform_data/ehci-sh.h>
+
+struct ehci_sh_priv {
+ struct clk *iclk, *fclk;
+ struct usb_hcd *hcd;
+};
+
+static int ehci_sh_reset(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ ehci->caps = hcd->regs;
+
+ return ehci_setup(hcd);
+}
+
+static const struct hc_driver ehci_sh_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "SuperH EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_USB2 | HCD_MEMORY | HCD_BH,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ehci_sh_reset,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+
+#ifdef CONFIG_PM
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+#endif
+
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static int ehci_hcd_sh_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct ehci_sh_priv *priv;
+ struct ehci_sh_platdata *pdata;
+ struct usb_hcd *hcd;
+ int irq, ret;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Found HC with no register addr. Check %s setup!\n",
+ dev_name(&pdev->dev));
+ ret = -ENODEV;
+ goto fail_create_hcd;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev,
+ "Found HC with no IRQ. Check %s setup!\n",
+ dev_name(&pdev->dev));
+ ret = -ENODEV;
+ goto fail_create_hcd;
+ }
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ /* initialize hcd */
+ hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd) {
+ ret = -ENOMEM;
+ goto fail_create_hcd;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
+ goto fail_request_resource;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct ehci_sh_priv),
+ GFP_KERNEL);
+ if (!priv) {
+ dev_dbg(&pdev->dev, "error allocating priv data\n");
+ ret = -ENOMEM;
+ goto fail_request_resource;
+ }
+
+ /* These are optional, we don't care if they fail */
+ priv->fclk = devm_clk_get(&pdev->dev, "usb_fck");
+ if (IS_ERR(priv->fclk))
+ priv->fclk = NULL;
+
+ priv->iclk = devm_clk_get(&pdev->dev, "usb_ick");
+ if (IS_ERR(priv->iclk))
+ priv->iclk = NULL;
+
+ clk_enable(priv->fclk);
+ clk_enable(priv->iclk);
+
+ if (pdata && pdata->phy_init)
+ pdata->phy_init();
+
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to add hcd");
+ goto fail_add_hcd;
+ }
+ device_wakeup_enable(hcd->self.controller);
+
+ priv->hcd = hcd;
+ platform_set_drvdata(pdev, priv);
+
+ return ret;
+
+fail_add_hcd:
+ clk_disable(priv->iclk);
+ clk_disable(priv->fclk);
+
+fail_request_resource:
+ usb_put_hcd(hcd);
+fail_create_hcd:
+ dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret);
+
+ return ret;
+}
+
+static int ehci_hcd_sh_remove(struct platform_device *pdev)
+{
+ struct ehci_sh_priv *priv = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = priv->hcd;
+
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+
+ clk_disable(priv->fclk);
+ clk_disable(priv->iclk);
+
+ return 0;
+}
+
+static void ehci_hcd_sh_shutdown(struct platform_device *pdev)
+{
+ struct ehci_sh_priv *priv = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = priv->hcd;
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver ehci_hcd_sh_driver = {
+ .probe = ehci_hcd_sh_probe,
+ .remove = ehci_hcd_sh_remove,
+ .shutdown = ehci_hcd_sh_shutdown,
+ .driver = {
+ .name = "sh_ehci",
+ .owner = THIS_MODULE,
+ },
+};
+
+MODULE_ALIAS("platform:sh_ehci");
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
new file mode 100644
index 00000000000..1d59958ad0c
--- /dev/null
+++ b/drivers/usb/host/ehci-spear.c
@@ -0,0 +1,195 @@
+/*
+* Driver for EHCI HCD on SPEAr SOC
+*
+* Copyright (C) 2010 ST Micro Electronics,
+* Deepak Sikri <deepak.sikri@st.com>
+*
+* Based on various ehci-*.c drivers
+*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file COPYING in the main directory of this archive for
+* more details.
+*/
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+
+#define DRIVER_DESC "EHCI SPEAr driver"
+
+static const char hcd_name[] = "SPEAr-ehci";
+
+struct spear_ehci {
+ struct clk *clk;
+};
+
+#define to_spear_ehci(hcd) (struct spear_ehci *)(hcd_to_ehci(hcd)->priv)
+
+static struct hc_driver __read_mostly ehci_spear_hc_driver;
+
+#ifdef CONFIG_PM_SLEEP
+static int ehci_spear_drv_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ bool do_wakeup = device_may_wakeup(dev);
+
+ return ehci_suspend(hcd, do_wakeup);
+}
+
+static int ehci_spear_drv_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ ehci_resume(hcd, false);
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(ehci_spear_pm_ops, ehci_spear_drv_suspend,
+ ehci_spear_drv_resume);
+
+static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd ;
+ struct spear_ehci *sehci;
+ struct resource *res;
+ struct clk *usbh_clk;
+ const struct hc_driver *driver = &ehci_spear_hc_driver;
+ int irq, retval;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ retval = irq;
+ goto fail;
+ }
+
+ /*
+ * Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+ retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto fail;
+
+ usbh_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(usbh_clk)) {
+ dev_err(&pdev->dev, "Error getting interface clock\n");
+ retval = PTR_ERR(usbh_clk);
+ goto fail;
+ }
+
+ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ retval = -ENOMEM;
+ goto fail;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ retval = -ENODEV;
+ goto err_put_hcd;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
+ goto err_put_hcd;
+ }
+
+ sehci = to_spear_ehci(hcd);
+ sehci->clk = usbh_clk;
+
+ /* registers start at offset 0x0 */
+ hcd_to_ehci(hcd)->caps = hcd->regs;
+
+ clk_prepare_enable(sehci->clk);
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (retval)
+ goto err_stop_ehci;
+
+ device_wakeup_enable(hcd->self.controller);
+ return retval;
+
+err_stop_ehci:
+ clk_disable_unprepare(sehci->clk);
+err_put_hcd:
+ usb_put_hcd(hcd);
+fail:
+ dev_err(&pdev->dev, "init fail, %d\n", retval);
+
+ return retval ;
+}
+
+static int spear_ehci_hcd_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct spear_ehci *sehci = to_spear_ehci(hcd);
+
+ usb_remove_hcd(hcd);
+
+ if (sehci->clk)
+ clk_disable_unprepare(sehci->clk);
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static struct of_device_id spear_ehci_id_table[] = {
+ { .compatible = "st,spear600-ehci", },
+ { },
+};
+
+static struct platform_driver spear_ehci_hcd_driver = {
+ .probe = spear_ehci_hcd_drv_probe,
+ .remove = spear_ehci_hcd_drv_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "spear-ehci",
+ .bus = &platform_bus_type,
+ .pm = &ehci_spear_pm_ops,
+ .of_match_table = spear_ehci_id_table,
+ }
+};
+
+static const struct ehci_driver_overrides spear_overrides __initdata = {
+ .extra_priv_size = sizeof(struct spear_ehci),
+};
+
+static int __init ehci_spear_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ehci_init_driver(&ehci_spear_hc_driver, &spear_overrides);
+ return platform_driver_register(&spear_ehci_hcd_driver);
+}
+module_init(ehci_spear_init);
+
+static void __exit ehci_spear_cleanup(void)
+{
+ platform_driver_unregister(&spear_ehci_hcd_driver);
+}
+module_exit(ehci_spear_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS("platform:spear-ehci");
+MODULE_AUTHOR("Deepak Sikri");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
new file mode 100644
index 00000000000..f6459dfb6f5
--- /dev/null
+++ b/drivers/usb/host/ehci-sysfs.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2007 by Alan Stern
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* this file is part of ehci-hcd.c */
+
+
+/* Display the ports dedicated to the companion controller */
+static ssize_t show_companion(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ehci_hcd *ehci;
+ int nports, index, n;
+ int count = PAGE_SIZE;
+ char *ptr = buf;
+
+ ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ nports = HCS_N_PORTS(ehci->hcs_params);
+
+ for (index = 0; index < nports; ++index) {
+ if (test_bit(index, &ehci->companion_ports)) {
+ n = scnprintf(ptr, count, "%d\n", index + 1);
+ ptr += n;
+ count -= n;
+ }
+ }
+ return ptr - buf;
+}
+
+/*
+ * Dedicate or undedicate a port to the companion controller.
+ * Syntax is "[-]portnum", where a leading '-' sign means
+ * return control of the port to the EHCI controller.
+ */
+static ssize_t store_companion(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ehci_hcd *ehci;
+ int portnum, new_owner;
+
+ ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ new_owner = PORT_OWNER; /* Owned by companion */
+ if (sscanf(buf, "%d", &portnum) != 1)
+ return -EINVAL;
+ if (portnum < 0) {
+ portnum = - portnum;
+ new_owner = 0; /* Owned by EHCI */
+ }
+ if (portnum <= 0 || portnum > HCS_N_PORTS(ehci->hcs_params))
+ return -ENOENT;
+ portnum--;
+ if (new_owner)
+ set_bit(portnum, &ehci->companion_ports);
+ else
+ clear_bit(portnum, &ehci->companion_ports);
+ set_owner(ehci, portnum, new_owner);
+ return count;
+}
+static DEVICE_ATTR(companion, 0644, show_companion, store_companion);
+
+
+/*
+ * Display / Set uframe_periodic_max
+ */
+static ssize_t show_uframe_periodic_max(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ehci_hcd *ehci;
+ int n;
+
+ ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max);
+ return n;
+}
+
+
+static ssize_t store_uframe_periodic_max(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ehci_hcd *ehci;
+ unsigned uframe_periodic_max;
+ unsigned uframe;
+ unsigned long flags;
+ ssize_t ret;
+
+ ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
+ return -EINVAL;
+
+ if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
+ ehci_info(ehci, "rejecting invalid request for "
+ "uframe_periodic_max=%u\n", uframe_periodic_max);
+ return -EINVAL;
+ }
+
+ ret = -EINVAL;
+
+ /*
+ * lock, so that our checking does not race with possible periodic
+ * bandwidth allocation through submitting new urbs.
+ */
+ spin_lock_irqsave (&ehci->lock, flags);
+
+ /*
+ * for request to decrease max periodic bandwidth, we have to check
+ * to see whether the decrease is possible.
+ */
+ if (uframe_periodic_max < ehci->uframe_periodic_max) {
+ u8 allocated_max = 0;
+
+ for (uframe = 0; uframe < EHCI_BANDWIDTH_SIZE; ++uframe)
+ allocated_max = max(allocated_max,
+ ehci->bandwidth[uframe]);
+
+ if (allocated_max > uframe_periodic_max) {
+ ehci_info(ehci,
+ "cannot decrease uframe_periodic_max becase "
+ "periodic bandwidth is already allocated "
+ "(%u > %u)\n",
+ allocated_max, uframe_periodic_max);
+ goto out_unlock;
+ }
+ }
+
+ /* increasing is always ok */
+
+ ehci_info(ehci, "setting max periodic bandwidth to %u%% "
+ "(== %u usec/uframe)\n",
+ 100*uframe_periodic_max/125, uframe_periodic_max);
+
+ if (uframe_periodic_max != 100)
+ ehci_warn(ehci, "max periodic bandwidth set is non-standard\n");
+
+ ehci->uframe_periodic_max = uframe_periodic_max;
+ ret = count;
+
+out_unlock:
+ spin_unlock_irqrestore (&ehci->lock, flags);
+ return ret;
+}
+static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max, store_uframe_periodic_max);
+
+
+static inline int create_sysfs_files(struct ehci_hcd *ehci)
+{
+ struct device *controller = ehci_to_hcd(ehci)->self.controller;
+ int i = 0;
+
+ /* with integrated TT there is no companion! */
+ if (!ehci_is_TDI(ehci))
+ i = device_create_file(controller, &dev_attr_companion);
+ if (i)
+ goto out;
+
+ i = device_create_file(controller, &dev_attr_uframe_periodic_max);
+out:
+ return i;
+}
+
+static inline void remove_sysfs_files(struct ehci_hcd *ehci)
+{
+ struct device *controller = ehci_to_hcd(ehci)->self.controller;
+
+ /* with integrated TT there is no companion! */
+ if (!ehci_is_TDI(ehci))
+ device_remove_file(controller, &dev_attr_companion);
+
+ device_remove_file(controller, &dev_attr_uframe_periodic_max);
+}
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
new file mode 100644
index 00000000000..6fdcb8ad229
--- /dev/null
+++ b/drivers/usb/host/ehci-tegra.c
@@ -0,0 +1,569 @@
+/*
+ * EHCI-compliant USB host controller driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2009 - 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/usb/ehci_def.h>
+#include <linux/usb/tegra_usb_phy.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
+
+#include "ehci.h"
+
+#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
+
+#define TEGRA_USB_DMA_ALIGN 32
+
+#define DRIVER_DESC "Tegra EHCI driver"
+#define DRV_NAME "tegra-ehci"
+
+static struct hc_driver __read_mostly tegra_ehci_hc_driver;
+
+struct tegra_ehci_soc_config {
+ bool has_hostpc;
+};
+
+struct tegra_ehci_hcd {
+ struct tegra_usb_phy *phy;
+ struct clk *clk;
+ struct reset_control *rst;
+ int port_resuming;
+ bool needs_double_reset;
+ enum tegra_usb_phy_port_speed port_speed;
+};
+
+static int tegra_ehci_internal_port_reset(
+ struct ehci_hcd *ehci,
+ u32 __iomem *portsc_reg
+)
+{
+ u32 temp;
+ unsigned long flags;
+ int retval = 0;
+ int i, tries;
+ u32 saved_usbintr;
+
+ spin_lock_irqsave(&ehci->lock, flags);
+ saved_usbintr = ehci_readl(ehci, &ehci->regs->intr_enable);
+ /* disable USB interrupt */
+ ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+ spin_unlock_irqrestore(&ehci->lock, flags);
+
+ /*
+ * Here we have to do Port Reset at most twice for
+ * Port Enable bit to be set.
+ */
+ for (i = 0; i < 2; i++) {
+ temp = ehci_readl(ehci, portsc_reg);
+ temp |= PORT_RESET;
+ ehci_writel(ehci, temp, portsc_reg);
+ mdelay(10);
+ temp &= ~PORT_RESET;
+ ehci_writel(ehci, temp, portsc_reg);
+ mdelay(1);
+ tries = 100;
+ do {
+ mdelay(1);
+ /*
+ * Up to this point, Port Enable bit is
+ * expected to be set after 2 ms waiting.
+ * USB1 usually takes extra 45 ms, for safety,
+ * we take 100 ms as timeout.
+ */
+ temp = ehci_readl(ehci, portsc_reg);
+ } while (!(temp & PORT_PE) && tries--);
+ if (temp & PORT_PE)
+ break;
+ }
+ if (i == 2)
+ retval = -ETIMEDOUT;
+
+ /*
+ * Clear Connect Status Change bit if it's set.
+ * We can't clear PORT_PEC. It will also cause PORT_PE to be cleared.
+ */
+ if (temp & PORT_CSC)
+ ehci_writel(ehci, PORT_CSC, portsc_reg);
+
+ /*
+ * Write to clear any interrupt status bits that might be set
+ * during port reset.
+ */
+ temp = ehci_readl(ehci, &ehci->regs->status);
+ ehci_writel(ehci, temp, &ehci->regs->status);
+
+ /* restore original interrupt enable bits */
+ ehci_writel(ehci, saved_usbintr, &ehci->regs->intr_enable);
+ return retval;
+}
+
+static int tegra_ehci_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength
+)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct tegra_ehci_hcd *tegra = (struct tegra_ehci_hcd *)ehci->priv;
+ u32 __iomem *status_reg;
+ u32 temp;
+ unsigned long flags;
+ int retval = 0;
+
+ status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
+
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ if (typeReq == GetPortStatus) {
+ temp = ehci_readl(ehci, status_reg);
+ if (tegra->port_resuming && !(temp & PORT_SUSPEND)) {
+ /* Resume completed, re-enable disconnect detection */
+ tegra->port_resuming = 0;
+ tegra_usb_phy_postresume(hcd->phy);
+ }
+ }
+
+ else if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
+ temp = ehci_readl(ehci, status_reg);
+ if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
+ retval = -EPIPE;
+ goto done;
+ }
+
+ temp &= ~(PORT_RWC_BITS | PORT_WKCONN_E);
+ temp |= PORT_WKDISC_E | PORT_WKOC_E;
+ ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+
+ /*
+ * If a transaction is in progress, there may be a delay in
+ * suspending the port. Poll until the port is suspended.
+ */
+ if (ehci_handshake(ehci, status_reg, PORT_SUSPEND,
+ PORT_SUSPEND, 5000))
+ pr_err("%s: timeout waiting for SUSPEND\n", __func__);
+
+ set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
+ goto done;
+ }
+
+ /* For USB1 port we need to issue Port Reset twice internally */
+ if (tegra->needs_double_reset &&
+ (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_RESET)) {
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ return tegra_ehci_internal_port_reset(ehci, status_reg);
+ }
+
+ /*
+ * Tegra host controller will time the resume operation to clear the bit
+ * when the port control state switches to HS or FS Idle. This behavior
+ * is different from EHCI where the host controller driver is required
+ * to set this bit to a zero after the resume duration is timed in the
+ * driver.
+ */
+ else if (typeReq == ClearPortFeature &&
+ wValue == USB_PORT_FEAT_SUSPEND) {
+ temp = ehci_readl(ehci, status_reg);
+ if ((temp & PORT_RESET) || !(temp & PORT_PE)) {
+ retval = -EPIPE;
+ goto done;
+ }
+
+ if (!(temp & PORT_SUSPEND))
+ goto done;
+
+ /* Disable disconnect detection during port resume */
+ tegra_usb_phy_preresume(hcd->phy);
+
+ ehci->reset_done[wIndex-1] = jiffies + msecs_to_jiffies(25);
+
+ temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
+ /* start resume signalling */
+ ehci_writel(ehci, temp | PORT_RESUME, status_reg);
+ set_bit(wIndex-1, &ehci->resuming_ports);
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ msleep(20);
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ /* Poll until the controller clears RESUME and SUSPEND */
+ if (ehci_handshake(ehci, status_reg, PORT_RESUME, 0, 2000))
+ pr_err("%s: timeout waiting for RESUME\n", __func__);
+ if (ehci_handshake(ehci, status_reg, PORT_SUSPEND, 0, 2000))
+ pr_err("%s: timeout waiting for SUSPEND\n", __func__);
+
+ ehci->reset_done[wIndex-1] = 0;
+ clear_bit(wIndex-1, &ehci->resuming_ports);
+
+ tegra->port_resuming = 1;
+ goto done;
+ }
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
+
+ /* Handle the hub control events here */
+ return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+
+done:
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ return retval;
+}
+
+struct dma_aligned_buffer {
+ void *kmalloc_ptr;
+ void *old_xfer_buffer;
+ u8 data[0];
+};
+
+static void free_dma_aligned_buffer(struct urb *urb)
+{
+ struct dma_aligned_buffer *temp;
+
+ if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ return;
+
+ temp = container_of(urb->transfer_buffer,
+ struct dma_aligned_buffer, data);
+
+ if (usb_urb_dir_in(urb))
+ memcpy(temp->old_xfer_buffer, temp->data,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = temp->old_xfer_buffer;
+ kfree(temp->kmalloc_ptr);
+
+ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+}
+
+static int alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
+{
+ struct dma_aligned_buffer *temp, *kmalloc_ptr;
+ size_t kmalloc_size;
+
+ if (urb->num_sgs || urb->sg ||
+ urb->transfer_buffer_length == 0 ||
+ !((uintptr_t)urb->transfer_buffer & (TEGRA_USB_DMA_ALIGN - 1)))
+ return 0;
+
+ /* Allocate a buffer with enough padding for alignment */
+ kmalloc_size = urb->transfer_buffer_length +
+ sizeof(struct dma_aligned_buffer) + TEGRA_USB_DMA_ALIGN - 1;
+
+ kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
+ if (!kmalloc_ptr)
+ return -ENOMEM;
+
+ /* Position our struct dma_aligned_buffer such that data is aligned */
+ temp = PTR_ALIGN(kmalloc_ptr + 1, TEGRA_USB_DMA_ALIGN) - 1;
+ temp->kmalloc_ptr = kmalloc_ptr;
+ temp->old_xfer_buffer = urb->transfer_buffer;
+ if (usb_urb_dir_out(urb))
+ memcpy(temp->data, urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = temp->data;
+
+ urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+
+ return 0;
+}
+
+static int tegra_ehci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ int ret;
+
+ ret = alloc_dma_aligned_buffer(urb, mem_flags);
+ if (ret)
+ return ret;
+
+ ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ if (ret)
+ free_dma_aligned_buffer(urb);
+
+ return ret;
+}
+
+static void tegra_ehci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+ free_dma_aligned_buffer(urb);
+}
+
+static const struct tegra_ehci_soc_config tegra30_soc_config = {
+ .has_hostpc = true,
+};
+
+static const struct tegra_ehci_soc_config tegra20_soc_config = {
+ .has_hostpc = false,
+};
+
+static struct of_device_id tegra_ehci_of_match[] = {
+ { .compatible = "nvidia,tegra30-ehci", .data = &tegra30_soc_config },
+ { .compatible = "nvidia,tegra20-ehci", .data = &tegra20_soc_config },
+ { },
+};
+
+static int tegra_ehci_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct tegra_ehci_soc_config *soc_config;
+ struct resource *res;
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct tegra_ehci_hcd *tegra;
+ int err = 0;
+ int irq;
+ struct usb_phy *u_phy;
+
+ match = of_match_device(tegra_ehci_of_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+ soc_config = match->data;
+
+ /* Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd) {
+ dev_err(&pdev->dev, "Unable to create HCD\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, hcd);
+ ehci = hcd_to_ehci(hcd);
+ tegra = (struct tegra_ehci_hcd *)ehci->priv;
+
+ hcd->has_tt = 1;
+
+ tegra->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tegra->clk)) {
+ dev_err(&pdev->dev, "Can't get ehci clock\n");
+ err = PTR_ERR(tegra->clk);
+ goto cleanup_hcd_create;
+ }
+
+ tegra->rst = devm_reset_control_get(&pdev->dev, "usb");
+ if (IS_ERR(tegra->rst)) {
+ dev_err(&pdev->dev, "Can't get ehci reset\n");
+ err = PTR_ERR(tegra->rst);
+ goto cleanup_hcd_create;
+ }
+
+ err = clk_prepare_enable(tegra->clk);
+ if (err)
+ goto cleanup_hcd_create;
+
+ reset_control_assert(tegra->rst);
+ udelay(1);
+ reset_control_deassert(tegra->rst);
+
+ u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
+ if (IS_ERR(u_phy)) {
+ err = PTR_ERR(u_phy);
+ goto cleanup_clk_en;
+ }
+ hcd->phy = u_phy;
+
+ tegra->needs_double_reset = of_property_read_bool(pdev->dev.of_node,
+ "nvidia,needs-double-reset");
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get I/O memory\n");
+ err = -ENXIO;
+ goto cleanup_clk_en;
+ }
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ err = PTR_ERR(hcd->regs);
+ goto cleanup_clk_en;
+ }
+ ehci->caps = hcd->regs + 0x100;
+ ehci->has_hostpc = soc_config->has_hostpc;
+
+ err = usb_phy_init(hcd->phy);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize phy\n");
+ goto cleanup_clk_en;
+ }
+
+ u_phy->otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
+ GFP_KERNEL);
+ if (!u_phy->otg) {
+ dev_err(&pdev->dev, "Failed to alloc memory for otg\n");
+ err = -ENOMEM;
+ goto cleanup_phy;
+ }
+ u_phy->otg->host = hcd_to_bus(hcd);
+
+ err = usb_phy_set_suspend(hcd->phy, 0);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to power on the phy\n");
+ goto cleanup_phy;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ err = -ENODEV;
+ goto cleanup_phy;
+ }
+
+ otg_set_host(u_phy->otg, &hcd->self);
+
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add USB HCD\n");
+ goto cleanup_otg_set_host;
+ }
+ device_wakeup_enable(hcd->self.controller);
+
+ return err;
+
+cleanup_otg_set_host:
+ otg_set_host(u_phy->otg, NULL);
+cleanup_phy:
+ usb_phy_shutdown(hcd->phy);
+cleanup_clk_en:
+ clk_disable_unprepare(tegra->clk);
+cleanup_hcd_create:
+ usb_put_hcd(hcd);
+ return err;
+}
+
+static int tegra_ehci_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct tegra_ehci_hcd *tegra =
+ (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
+
+ otg_set_host(hcd->phy->otg, NULL);
+
+ usb_phy_shutdown(hcd->phy);
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+
+ clk_disable_unprepare(tegra->clk);
+
+ return 0;
+}
+
+static void tegra_ehci_hcd_shutdown(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver tegra_ehci_driver = {
+ .probe = tegra_ehci_probe,
+ .remove = tegra_ehci_remove,
+ .shutdown = tegra_ehci_hcd_shutdown,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = tegra_ehci_of_match,
+ }
+};
+
+static int tegra_ehci_reset(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+ int txfifothresh;
+
+ retval = ehci_setup(hcd);
+ if (retval)
+ return retval;
+
+ /*
+ * We should really pull this value out of tegra_ehci_soc_config, but
+ * to avoid needing access to it, make use of the fact that Tegra20 is
+ * the only one so far that needs a value of 10, and Tegra20 is the
+ * only one which doesn't set has_hostpc.
+ */
+ txfifothresh = ehci->has_hostpc ? 0x10 : 10;
+ ehci_writel(ehci, txfifothresh << 16, &ehci->regs->txfill_tuning);
+
+ return 0;
+}
+
+static const struct ehci_driver_overrides tegra_overrides __initconst = {
+ .extra_priv_size = sizeof(struct tegra_ehci_hcd),
+ .reset = tegra_ehci_reset,
+};
+
+static int __init ehci_tegra_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info(DRV_NAME ": " DRIVER_DESC "\n");
+
+ ehci_init_driver(&tegra_ehci_hc_driver, &tegra_overrides);
+
+ /*
+ * The Tegra HW has some unusual quirks, which require Tegra-specific
+ * workarounds. We override certain hc_driver functions here to
+ * achieve that. We explicitly do not enhance ehci_driver_overrides to
+ * allow this more easily, since this is an unusual case, and we don't
+ * want to encourage others to override these functions by making it
+ * too easy.
+ */
+
+ tegra_ehci_hc_driver.map_urb_for_dma = tegra_ehci_map_urb_for_dma;
+ tegra_ehci_hc_driver.unmap_urb_for_dma = tegra_ehci_unmap_urb_for_dma;
+ tegra_ehci_hc_driver.hub_control = tegra_ehci_hub_control;
+
+ return platform_driver_register(&tegra_ehci_driver);
+}
+module_init(ehci_tegra_init);
+
+static void __exit ehci_tegra_cleanup(void)
+{
+ platform_driver_unregister(&tegra_ehci_driver);
+}
+module_exit(ehci_tegra_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_DEVICE_TABLE(of, tegra_ehci_of_match);
diff --git a/drivers/usb/host/ehci-tilegx.c b/drivers/usb/host/ehci-tilegx.c
new file mode 100644
index 00000000000..0d247673c3c
--- /dev/null
+++ b/drivers/usb/host/ehci-tilegx.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/*
+ * Tilera TILE-Gx USB EHCI host controller driver.
+ */
+
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/usb/tilegx.h>
+#include <linux/usb.h>
+
+#include <asm/homecache.h>
+
+#include <gxio/iorpc_usb_host.h>
+#include <gxio/usb_host.h>
+
+static void tilegx_start_ehc(void)
+{
+}
+
+static void tilegx_stop_ehc(void)
+{
+}
+
+static int tilegx_ehci_setup(struct usb_hcd *hcd)
+{
+ int ret = ehci_init(hcd);
+
+ /*
+ * Some drivers do:
+ *
+ * struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ * ehci->need_io_watchdog = 0;
+ *
+ * here, but since this is a new driver we're going to leave the
+ * watchdog enabled. Later we may try to turn it off and see
+ * whether we run into any problems.
+ */
+
+ return ret;
+}
+
+static const struct hc_driver ehci_tilegx_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Tile-Gx EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * Generic hardware linkage.
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+ /*
+ * Basic lifecycle operations.
+ */
+ .reset = tilegx_ehci_setup,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * Managing I/O requests and associated device resources.
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * Scheduling support.
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * Root hub support.
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static int ehci_hcd_tilegx_drv_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ pte_t pte = { 0 };
+ int my_cpu = smp_processor_id();
+ int ret;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ /*
+ * Try to initialize our GXIO context; if we can't, the device
+ * doesn't exist.
+ */
+ if (gxio_usb_host_init(&pdata->usb_ctx, pdata->dev_index, 1) != 0)
+ return -ENXIO;
+
+ hcd = usb_create_hcd(&ehci_tilegx_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd) {
+ ret = -ENOMEM;
+ goto err_hcd;
+ }
+
+ /*
+ * We don't use rsrc_start to map in our registers, but seems like
+ * we ought to set it to something, so we use the register VA.
+ */
+ hcd->rsrc_start =
+ (ulong) gxio_usb_host_get_reg_start(&pdata->usb_ctx);
+ hcd->rsrc_len = gxio_usb_host_get_reg_len(&pdata->usb_ctx);
+ hcd->regs = gxio_usb_host_get_reg_start(&pdata->usb_ctx);
+
+ tilegx_start_ehc();
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = hcd->regs;
+ ehci->regs =
+ hcd->regs + HC_LENGTH(ehci, readl(&ehci->caps->hc_capbase));
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = readl(&ehci->caps->hcs_params);
+
+ /* Create our IRQs and register them. */
+ pdata->irq = irq_alloc_hwirq(-1);
+ if (!pdata->irq) {
+ ret = -ENXIO;
+ goto err_no_irq;
+ }
+
+ tile_irq_activate(pdata->irq, TILE_IRQ_PERCPU);
+
+ /* Configure interrupts. */
+ ret = gxio_usb_host_cfg_interrupt(&pdata->usb_ctx,
+ cpu_x(my_cpu), cpu_y(my_cpu),
+ KERNEL_PL, pdata->irq);
+ if (ret) {
+ ret = -ENXIO;
+ goto err_have_irq;
+ }
+
+ /* Register all of our memory. */
+ pte = pte_set_home(pte, PAGE_HOME_HASH);
+ ret = gxio_usb_host_register_client_memory(&pdata->usb_ctx, pte, 0);
+ if (ret) {
+ ret = -ENXIO;
+ goto err_have_irq;
+ }
+
+ ret = usb_add_hcd(hcd, pdata->irq, IRQF_SHARED);
+ if (ret == 0) {
+ platform_set_drvdata(pdev, hcd);
+ device_wakeup_enable(hcd->self.controller);
+ return ret;
+ }
+
+err_have_irq:
+ irq_free_hwirq(pdata->irq);
+err_no_irq:
+ tilegx_stop_ehc();
+ usb_put_hcd(hcd);
+err_hcd:
+ gxio_usb_host_destroy(&pdata->usb_ctx);
+ return ret;
+}
+
+static int ehci_hcd_tilegx_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+ tilegx_stop_ehc();
+ gxio_usb_host_destroy(&pdata->usb_ctx);
+ irq_free_hwirq(pdata->irq);
+
+ return 0;
+}
+
+static void ehci_hcd_tilegx_drv_shutdown(struct platform_device *pdev)
+{
+ usb_hcd_platform_shutdown(pdev);
+ ehci_hcd_tilegx_drv_remove(pdev);
+}
+
+static struct platform_driver ehci_hcd_tilegx_driver = {
+ .probe = ehci_hcd_tilegx_drv_probe,
+ .remove = ehci_hcd_tilegx_drv_remove,
+ .shutdown = ehci_hcd_tilegx_drv_shutdown,
+ .driver = {
+ .name = "tilegx-ehci",
+ .owner = THIS_MODULE,
+ }
+};
+
+MODULE_ALIAS("platform:tilegx-ehci");
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
new file mode 100644
index 00000000000..424ac5d8371
--- /dev/null
+++ b/drivers/usb/host/ehci-timer.c
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2012 by Alan Stern
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+/* This file is part of ehci-hcd.c */
+
+/*-------------------------------------------------------------------------*/
+
+/* Set a bit in the USBCMD register */
+static void ehci_set_command_bit(struct ehci_hcd *ehci, u32 bit)
+{
+ ehci->command |= bit;
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+
+ /* unblock posted write */
+ ehci_readl(ehci, &ehci->regs->command);
+}
+
+/* Clear a bit in the USBCMD register */
+static void ehci_clear_command_bit(struct ehci_hcd *ehci, u32 bit)
+{
+ ehci->command &= ~bit;
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+
+ /* unblock posted write */
+ ehci_readl(ehci, &ehci->regs->command);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI timer support... Now using hrtimers.
+ *
+ * Lots of different events are triggered from ehci->hrtimer. Whenever
+ * the timer routine runs, it checks each possible event; events that are
+ * currently enabled and whose expiration time has passed get handled.
+ * The set of enabled events is stored as a collection of bitflags in
+ * ehci->enabled_hrtimer_events, and they are numbered in order of
+ * increasing delay values (ranging between 1 ms and 100 ms).
+ *
+ * Rather than implementing a sorted list or tree of all pending events,
+ * we keep track only of the lowest-numbered pending event, in
+ * ehci->next_hrtimer_event. Whenever ehci->hrtimer gets restarted, its
+ * expiration time is set to the timeout value for this event.
+ *
+ * As a result, events might not get handled right away; the actual delay
+ * could be anywhere up to twice the requested delay. This doesn't
+ * matter, because none of the events are especially time-critical. The
+ * ones that matter most all have a delay of 1 ms, so they will be
+ * handled after 2 ms at most, which is okay. In addition to this, we
+ * allow for an expiration range of 1 ms.
+ */
+
+/*
+ * Delay lengths for the hrtimer event types.
+ * Keep this list sorted by delay length, in the same order as
+ * the event types indexed by enum ehci_hrtimer_event in ehci.h.
+ */
+static unsigned event_delays_ns[] = {
+ 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_ASS */
+ 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_PSS */
+ 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_DEAD */
+ 1125 * NSEC_PER_USEC, /* EHCI_HRTIMER_UNLINK_INTR */
+ 2 * NSEC_PER_MSEC, /* EHCI_HRTIMER_FREE_ITDS */
+ 5 * NSEC_PER_MSEC, /* EHCI_HRTIMER_START_UNLINK_INTR */
+ 6 * NSEC_PER_MSEC, /* EHCI_HRTIMER_ASYNC_UNLINKS */
+ 10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IAA_WATCHDOG */
+ 10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_PERIODIC */
+ 15 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_ASYNC */
+ 100 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IO_WATCHDOG */
+};
+
+/* Enable a pending hrtimer event */
+static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event,
+ bool resched)
+{
+ ktime_t *timeout = &ehci->hr_timeouts[event];
+
+ if (resched)
+ *timeout = ktime_add(ktime_get(),
+ ktime_set(0, event_delays_ns[event]));
+ ehci->enabled_hrtimer_events |= (1 << event);
+
+ /* Track only the lowest-numbered pending event */
+ if (event < ehci->next_hrtimer_event) {
+ ehci->next_hrtimer_event = event;
+ hrtimer_start_range_ns(&ehci->hrtimer, *timeout,
+ NSEC_PER_MSEC, HRTIMER_MODE_ABS);
+ }
+}
+
+
+/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
+static void ehci_poll_ASS(struct ehci_hcd *ehci)
+{
+ unsigned actual, want;
+
+ /* Don't enable anything if the controller isn't running (e.g., died) */
+ if (ehci->rh_state != EHCI_RH_RUNNING)
+ return;
+
+ want = (ehci->command & CMD_ASE) ? STS_ASS : 0;
+ actual = ehci_readl(ehci, &ehci->regs->status) & STS_ASS;
+
+ if (want != actual) {
+
+ /* Poll again later, but give up after about 2-4 ms */
+ if (ehci->ASS_poll_count++ < 2) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
+ return;
+ }
+ ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n",
+ want, actual);
+ }
+ ehci->ASS_poll_count = 0;
+
+ /* The status is up-to-date; restart or stop the schedule as needed */
+ if (want == 0) { /* Stopped */
+ if (ehci->async_count > 0)
+ ehci_set_command_bit(ehci, CMD_ASE);
+
+ } else { /* Running */
+ if (ehci->async_count == 0) {
+
+ /* Turn off the schedule after a while */
+ ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_ASYNC,
+ true);
+ }
+ }
+}
+
+/* Turn off the async schedule after a brief delay */
+static void ehci_disable_ASE(struct ehci_hcd *ehci)
+{
+ ehci_clear_command_bit(ehci, CMD_ASE);
+}
+
+
+/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
+static void ehci_poll_PSS(struct ehci_hcd *ehci)
+{
+ unsigned actual, want;
+
+ /* Don't do anything if the controller isn't running (e.g., died) */
+ if (ehci->rh_state != EHCI_RH_RUNNING)
+ return;
+
+ want = (ehci->command & CMD_PSE) ? STS_PSS : 0;
+ actual = ehci_readl(ehci, &ehci->regs->status) & STS_PSS;
+
+ if (want != actual) {
+
+ /* Poll again later, but give up after about 2-4 ms */
+ if (ehci->PSS_poll_count++ < 2) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
+ return;
+ }
+ ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
+ want, actual);
+ }
+ ehci->PSS_poll_count = 0;
+
+ /* The status is up-to-date; restart or stop the schedule as needed */
+ if (want == 0) { /* Stopped */
+ if (ehci->periodic_count > 0)
+ ehci_set_command_bit(ehci, CMD_PSE);
+
+ } else { /* Running */
+ if (ehci->periodic_count == 0) {
+
+ /* Turn off the schedule after a while */
+ ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_PERIODIC,
+ true);
+ }
+ }
+}
+
+/* Turn off the periodic schedule after a brief delay */
+static void ehci_disable_PSE(struct ehci_hcd *ehci)
+{
+ ehci_clear_command_bit(ehci, CMD_PSE);
+}
+
+
+/* Poll the STS_HALT status bit; see when a dead controller stops */
+static void ehci_handle_controller_death(struct ehci_hcd *ehci)
+{
+ if (!(ehci_readl(ehci, &ehci->regs->status) & STS_HALT)) {
+
+ /* Give up after a few milliseconds */
+ if (ehci->died_poll_count++ < 5) {
+ /* Try again later */
+ ehci_enable_event(ehci, EHCI_HRTIMER_POLL_DEAD, true);
+ return;
+ }
+ ehci_warn(ehci, "Waited too long for the controller to stop, giving up\n");
+ }
+
+ /* Clean up the mess */
+ ehci->rh_state = EHCI_RH_HALTED;
+ ehci_writel(ehci, 0, &ehci->regs->configured_flag);
+ ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+ ehci_work(ehci);
+ end_unlink_async(ehci);
+
+ /* Not in process context, so don't try to reset the controller */
+}
+
+/* start to unlink interrupt QHs */
+static void ehci_handle_start_intr_unlinks(struct ehci_hcd *ehci)
+{
+ bool stopped = (ehci->rh_state < EHCI_RH_RUNNING);
+
+ /*
+ * Process all the QHs on the intr_unlink list that were added
+ * before the current unlink cycle began. The list is in
+ * temporal order, so stop when we reach the first entry in the
+ * current cycle. But if the root hub isn't running then
+ * process all the QHs on the list.
+ */
+ while (!list_empty(&ehci->intr_unlink_wait)) {
+ struct ehci_qh *qh;
+
+ qh = list_first_entry(&ehci->intr_unlink_wait,
+ struct ehci_qh, unlink_node);
+ if (!stopped && (qh->unlink_cycle ==
+ ehci->intr_unlink_wait_cycle))
+ break;
+ list_del_init(&qh->unlink_node);
+ start_unlink_intr(ehci, qh);
+ }
+
+ /* Handle remaining entries later */
+ if (!list_empty(&ehci->intr_unlink_wait)) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
+ ++ehci->intr_unlink_wait_cycle;
+ }
+}
+
+/* Handle unlinked interrupt QHs once they are gone from the hardware */
+static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci)
+{
+ bool stopped = (ehci->rh_state < EHCI_RH_RUNNING);
+
+ /*
+ * Process all the QHs on the intr_unlink list that were added
+ * before the current unlink cycle began. The list is in
+ * temporal order, so stop when we reach the first entry in the
+ * current cycle. But if the root hub isn't running then
+ * process all the QHs on the list.
+ */
+ ehci->intr_unlinking = true;
+ while (!list_empty(&ehci->intr_unlink)) {
+ struct ehci_qh *qh;
+
+ qh = list_first_entry(&ehci->intr_unlink, struct ehci_qh,
+ unlink_node);
+ if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle)
+ break;
+ list_del_init(&qh->unlink_node);
+ end_unlink_intr(ehci, qh);
+ }
+
+ /* Handle remaining entries later */
+ if (!list_empty(&ehci->intr_unlink)) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
+ ++ehci->intr_unlink_cycle;
+ }
+ ehci->intr_unlinking = false;
+}
+
+
+/* Start another free-iTDs/siTDs cycle */
+static void start_free_itds(struct ehci_hcd *ehci)
+{
+ if (!(ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_FREE_ITDS))) {
+ ehci->last_itd_to_free = list_entry(
+ ehci->cached_itd_list.prev,
+ struct ehci_itd, itd_list);
+ ehci->last_sitd_to_free = list_entry(
+ ehci->cached_sitd_list.prev,
+ struct ehci_sitd, sitd_list);
+ ehci_enable_event(ehci, EHCI_HRTIMER_FREE_ITDS, true);
+ }
+}
+
+/* Wait for controller to stop using old iTDs and siTDs */
+static void end_free_itds(struct ehci_hcd *ehci)
+{
+ struct ehci_itd *itd, *n;
+ struct ehci_sitd *sitd, *sn;
+
+ if (ehci->rh_state < EHCI_RH_RUNNING) {
+ ehci->last_itd_to_free = NULL;
+ ehci->last_sitd_to_free = NULL;
+ }
+
+ list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
+ list_del(&itd->itd_list);
+ dma_pool_free(ehci->itd_pool, itd, itd->itd_dma);
+ if (itd == ehci->last_itd_to_free)
+ break;
+ }
+ list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
+ list_del(&sitd->sitd_list);
+ dma_pool_free(ehci->sitd_pool, sitd, sitd->sitd_dma);
+ if (sitd == ehci->last_sitd_to_free)
+ break;
+ }
+
+ if (!list_empty(&ehci->cached_itd_list) ||
+ !list_empty(&ehci->cached_sitd_list))
+ start_free_itds(ehci);
+}
+
+
+/* Handle lost (or very late) IAA interrupts */
+static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
+{
+ u32 cmd, status;
+
+ /*
+ * Lost IAA irqs wedge things badly; seen first with a vt8235.
+ * So we need this watchdog, but must protect it against both
+ * (a) SMP races against real IAA firing and retriggering, and
+ * (b) clean HC shutdown, when IAA watchdog was pending.
+ */
+ if (!ehci->iaa_in_progress || ehci->rh_state != EHCI_RH_RUNNING)
+ return;
+
+ /* If we get here, IAA is *REALLY* late. It's barely
+ * conceivable that the system is so busy that CMD_IAAD
+ * is still legitimately set, so let's be sure it's
+ * clear before we read STS_IAA. (The HC should clear
+ * CMD_IAAD when it sets STS_IAA.)
+ */
+ cmd = ehci_readl(ehci, &ehci->regs->command);
+
+ /*
+ * If IAA is set here it either legitimately triggered
+ * after the watchdog timer expired (_way_ late, so we'll
+ * still count it as lost) ... or a silicon erratum:
+ * - VIA seems to set IAA without triggering the IRQ;
+ * - IAAD potentially cleared without setting IAA.
+ */
+ status = ehci_readl(ehci, &ehci->regs->status);
+ if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
+ COUNT(ehci->stats.lost_iaa);
+ ehci_writel(ehci, STS_IAA, &ehci->regs->status);
+ }
+
+ ehci_dbg(ehci, "IAA watchdog: status %x cmd %x\n", status, cmd);
+ end_unlink_async(ehci);
+}
+
+
+/* Enable the I/O watchdog, if appropriate */
+static void turn_on_io_watchdog(struct ehci_hcd *ehci)
+{
+ /* Not needed if the controller isn't running or it's already enabled */
+ if (ehci->rh_state != EHCI_RH_RUNNING ||
+ (ehci->enabled_hrtimer_events &
+ BIT(EHCI_HRTIMER_IO_WATCHDOG)))
+ return;
+
+ /*
+ * Isochronous transfers always need the watchdog.
+ * For other sorts we use it only if the flag is set.
+ */
+ if (ehci->isoc_count > 0 || (ehci->need_io_watchdog &&
+ ehci->async_count + ehci->intr_count > 0))
+ ehci_enable_event(ehci, EHCI_HRTIMER_IO_WATCHDOG, true);
+}
+
+
+/*
+ * Handler functions for the hrtimer event types.
+ * Keep this array in the same order as the event types indexed by
+ * enum ehci_hrtimer_event in ehci.h.
+ */
+static void (*event_handlers[])(struct ehci_hcd *) = {
+ ehci_poll_ASS, /* EHCI_HRTIMER_POLL_ASS */
+ ehci_poll_PSS, /* EHCI_HRTIMER_POLL_PSS */
+ ehci_handle_controller_death, /* EHCI_HRTIMER_POLL_DEAD */
+ ehci_handle_intr_unlinks, /* EHCI_HRTIMER_UNLINK_INTR */
+ end_free_itds, /* EHCI_HRTIMER_FREE_ITDS */
+ ehci_handle_start_intr_unlinks, /* EHCI_HRTIMER_START_UNLINK_INTR */
+ unlink_empty_async, /* EHCI_HRTIMER_ASYNC_UNLINKS */
+ ehci_iaa_watchdog, /* EHCI_HRTIMER_IAA_WATCHDOG */
+ ehci_disable_PSE, /* EHCI_HRTIMER_DISABLE_PERIODIC */
+ ehci_disable_ASE, /* EHCI_HRTIMER_DISABLE_ASYNC */
+ ehci_work, /* EHCI_HRTIMER_IO_WATCHDOG */
+};
+
+static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t)
+{
+ struct ehci_hcd *ehci = container_of(t, struct ehci_hcd, hrtimer);
+ ktime_t now;
+ unsigned long events;
+ unsigned long flags;
+ unsigned e;
+
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ events = ehci->enabled_hrtimer_events;
+ ehci->enabled_hrtimer_events = 0;
+ ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
+
+ /*
+ * Check each pending event. If its time has expired, handle
+ * the event; otherwise re-enable it.
+ */
+ now = ktime_get();
+ for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) {
+ if (now.tv64 >= ehci->hr_timeouts[e].tv64)
+ event_handlers[e](ehci);
+ else
+ ehci_enable_event(ehci, e, false);
+ }
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ return HRTIMER_NORESTART;
+}
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index cfa21ea20f8..a9303aff125 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -11,14 +11,29 @@
*
*/
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
-/*ebable phy0 and phy1 for w90p910*/
+#include "ehci.h"
+
+/* enable phy0 and phy1 for w90p910 */
#define ENPHY (0x01<<8)
#define PHY0_CTR (0xA4)
#define PHY1_CTR (0xA8)
-static int __devinit usb_w90x900_probe(const struct hc_driver *driver,
+#define DRIVER_DESC "EHCI w90x900 driver"
+
+static const char hcd_name[] = "ehci-w90x900 ";
+
+static struct hc_driver __read_mostly ehci_w90x900_hc_driver;
+
+static int usb_w90x900_probe(const struct hc_driver *driver,
struct platform_device *pdev)
{
struct usb_hcd *hcd;
@@ -41,23 +56,18 @@ static int __devinit usb_w90x900_probe(const struct hc_driver *driver,
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- retval = -EBUSY;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
goto err2;
}
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (hcd->regs == NULL) {
- retval = -EFAULT;
- goto err3;
- }
-
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs;
ehci->regs = hcd->regs +
- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+ HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
/* enable PHY 0,1,the regs only apply to w90p910
* 0xA4,0xA8 were offsets of PHY0 and PHY1 controller of
@@ -71,85 +81,30 @@ static int __devinit usb_w90x900_probe(const struct hc_driver *driver,
val |= ENPHY;
__raw_writel(val, ehci->regs+PHY1_CTR);
- ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
- ehci->sbrn = 0x20;
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- goto err4;
+ goto err2;
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval != 0)
- goto err4;
-
- ehci_writel(ehci, 1, &ehci->regs->configured_flag);
+ goto err2;
+ device_wakeup_enable(hcd->self.controller);
return retval;
-err4:
- iounmap(hcd->regs);
-err3:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err2:
usb_put_hcd(hcd);
err1:
return retval;
}
-static
-void usb_w90x900_remove(struct usb_hcd *hcd, struct platform_device *pdev)
+static void usb_w90x900_remove(struct usb_hcd *hcd,
+ struct platform_device *pdev)
{
usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
}
-static const struct hc_driver ehci_w90x900_hc_driver = {
- .description = hcd_name,
- .product_desc = "Nuvoton w90x900 EHCI Host Controller",
- .hcd_priv_size = sizeof(struct ehci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ehci_irq,
- .flags = HCD_USB2|HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .reset = ehci_init,
- .start = ehci_run,
-
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ehci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
-#endif
- .relinquish_port = ehci_relinquish_port,
- .port_handed_over = ehci_port_handed_over,
-};
-
-static int __devinit ehci_w90x900_probe(struct platform_device *pdev)
+static int ehci_w90x900_probe(struct platform_device *pdev)
{
if (usb_disabled())
return -ENODEV;
@@ -157,7 +112,7 @@ static int __devinit ehci_w90x900_probe(struct platform_device *pdev)
return usb_w90x900_probe(&ehci_w90x900_hc_driver, pdev);
}
-static int __devexit ehci_w90x900_remove(struct platform_device *pdev)
+static int ehci_w90x900_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
@@ -168,14 +123,32 @@ static int __devexit ehci_w90x900_remove(struct platform_device *pdev)
static struct platform_driver ehci_hcd_w90x900_driver = {
.probe = ehci_w90x900_probe,
- .remove = __devexit_p(ehci_w90x900_remove),
+ .remove = ehci_w90x900_remove,
.driver = {
.name = "w90x900-ehci",
.owner = THIS_MODULE,
},
};
+static int __init ehci_w90X900_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ehci_init_driver(&ehci_w90x900_hc_driver, NULL);
+ return platform_driver_register(&ehci_hcd_w90x900_driver);
+}
+module_init(ehci_w90X900_init);
+
+static void __exit ehci_w90X900_cleanup(void)
+{
+ platform_driver_unregister(&ehci_hcd_w90x900_driver);
+}
+module_exit(ehci_w90X900_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
-MODULE_DESCRIPTION("w90p910 usb ehci driver!");
-MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:w90p910-ehci");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 6c8076ad821..fe57710753e 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -25,34 +25,12 @@
*
*/
+#include <linux/err.h>
#include <linux/signal.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-
-/**
- * ehci_xilinx_of_setup - Initialize the device for ehci_reset()
- * @hcd: Pointer to the usb_hcd device to which the host controller bound
- *
- * called during probe() after chip reset completes.
- */
-static int ehci_xilinx_of_setup(struct usb_hcd *hcd)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- int retval;
-
- retval = ehci_halt(ehci);
- if (retval)
- return retval;
-
- retval = ehci_init(hcd);
- if (retval)
- return retval;
-
- ehci->sbrn = 0x20;
-
- return ehci_reset(ehci);
-}
+#include <linux/of_address.h>
/**
* ehci_xilinx_port_handed_over - hand the port out if failed to enable it
@@ -101,12 +79,12 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
*/
- .reset = ehci_xilinx_of_setup,
+ .reset = ehci_setup,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
@@ -117,6 +95,7 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
@@ -141,15 +120,13 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
/**
* ehci_hcd_xilinx_of_probe - Probe method for the USB host controller
* @op: pointer to the platform_device bound to the host controller
- * @match: pointer to of_device_id structure, not used
*
* This function requests resources and sets up appropriate properties for the
* host controller. Because the Xilinx USB host controller can be configured
* as HS only or HS/FS only, it checks the configuration in the device tree
* entry, and sets an appropriate value for hcd->has_tt.
*/
-static int __devinit
-ehci_hcd_xilinx_of_probe(struct platform_device *op, const struct of_device_id *match)
+static int ehci_hcd_xilinx_of_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
struct usb_hcd *hcd;
@@ -174,26 +151,20 @@ ehci_hcd_xilinx_of_probe(struct platform_device *op, const struct of_device_id *
return -ENOMEM;
hcd->rsrc_start = res.start;
- hcd->rsrc_len = res.end - res.start + 1;
-
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
- rv = -EBUSY;
- goto err_rmr;
- }
+ hcd->rsrc_len = resource_size(&res);
irq = irq_of_parse_and_map(dn, 0);
- if (irq == NO_IRQ) {
- printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
+ if (!irq) {
+ dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
+ __FILE__);
rv = -EBUSY;
goto err_irq;
}
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
- rv = -ENOMEM;
- goto err_ioremap;
+ hcd->regs = devm_ioremap_resource(&op->dev, &res);
+ if (IS_ERR(hcd->regs)) {
+ rv = PTR_ERR(hcd->regs);
+ goto err_irq;
}
ehci = hcd_to_ehci(hcd);
@@ -219,22 +190,14 @@ ehci_hcd_xilinx_of_probe(struct platform_device *op, const struct of_device_id *
/* Debug registers are at the first 0x100 region
*/
ehci->caps = hcd->regs + 0x100;
- ehci->regs = hcd->regs + 0x100 +
- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
-
- /* cache this readonly data; minimize chip reads */
- ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
rv = usb_add_hcd(hcd, irq, 0);
- if (rv == 0)
+ if (rv == 0) {
+ device_wakeup_enable(hcd->self.controller);
return 0;
+ }
- iounmap(hcd->regs);
-
-err_ioremap:
err_irq:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-err_rmr:
usb_put_hcd(hcd);
return rv;
@@ -249,48 +212,27 @@ err_rmr:
*/
static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
{
- struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
- dev_set_drvdata(&op->dev, NULL);
+ struct usb_hcd *hcd = platform_get_drvdata(op);
dev_dbg(&op->dev, "stopping XILINX-OF USB Controller\n");
usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-
usb_put_hcd(hcd);
return 0;
}
-/**
- * ehci_hcd_xilinx_of_shutdown - shutdown the hcd
- * @op: pointer to platform_device structure that is to be removed
- *
- * Properly shutdown the hcd, call driver's shutdown routine.
- */
-static int ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
-{
- struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
-
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
-
- return 0;
-}
-
-
static const struct of_device_id ehci_hcd_xilinx_of_match[] = {
{.compatible = "xlnx,xps-usb-host-1.00.a",},
{},
};
MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match);
-static struct of_platform_driver ehci_hcd_xilinx_of_driver = {
+static struct platform_driver ehci_hcd_xilinx_of_driver = {
.probe = ehci_hcd_xilinx_of_probe,
.remove = ehci_hcd_xilinx_of_remove,
- .shutdown = ehci_hcd_xilinx_of_shutdown,
+ .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xilinx-of-ehci",
.owner = THIS_MODULE,
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index bde823f704e..eee228a26a0 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -38,11 +38,15 @@ typedef __u16 __bitwise __hc16;
#endif
/* statistics can be kept for tuning/monitoring */
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define EHCI_STATS
+#endif
+
struct ehci_stats {
/* irq usage */
unsigned long normal;
unsigned long error;
- unsigned long reclaim;
+ unsigned long iaa;
unsigned long lost_iaa;
/* termination of urbs from core */
@@ -50,8 +54,30 @@ struct ehci_stats {
unsigned long unlink;
};
+/*
+ * Scheduling and budgeting information for periodic transfers, for both
+ * high-speed devices and full/low-speed devices lying behind a TT.
+ */
+struct ehci_per_sched {
+ struct usb_device *udev; /* access to the TT */
+ struct usb_host_endpoint *ep;
+ struct list_head ps_list; /* node on ehci_tt's ps_list */
+ u16 tt_usecs; /* time on the FS/LS bus */
+ u16 cs_mask; /* C-mask and S-mask bytes */
+ u16 period; /* actual period in frames */
+ u16 phase; /* actual phase, frame part */
+ u8 bw_phase; /* same, for bandwidth
+ reservation */
+ u8 phase_uf; /* uframe part of the phase */
+ u8 usecs, c_usecs; /* times on the HS bus */
+ u8 bw_uperiod; /* period in microframes, for
+ bandwidth reservation */
+ u8 bw_period; /* same, in frames */
+};
+#define NO_FRAME 29999 /* frame not assigned yet */
+
/* ehci_hcd->lock guards shared data against other CPUs:
- * ehci_hcd: async, reclaim, periodic (and shadow), ...
+ * ehci_hcd: async, unlink, periodic (and shadow), ...
* usb_host_endpoint: hcpriv
* ehci_qh: qh_next, qtd_list
* ehci_qtd: qtd_list
@@ -62,7 +88,49 @@ struct ehci_stats {
#define EHCI_MAX_ROOT_PORTS 15 /* see HCS_N_PORTS */
+/*
+ * ehci_rh_state values of EHCI_RH_RUNNING or above mean that the
+ * controller may be doing DMA. Lower values mean there's no DMA.
+ */
+enum ehci_rh_state {
+ EHCI_RH_HALTED,
+ EHCI_RH_SUSPENDED,
+ EHCI_RH_RUNNING,
+ EHCI_RH_STOPPING
+};
+
+/*
+ * Timer events, ordered by increasing delay length.
+ * Always update event_delays_ns[] and event_handlers[] (defined in
+ * ehci-timer.c) in parallel with this list.
+ */
+enum ehci_hrtimer_event {
+ EHCI_HRTIMER_POLL_ASS, /* Poll for async schedule off */
+ EHCI_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */
+ EHCI_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */
+ EHCI_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */
+ EHCI_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */
+ EHCI_HRTIMER_START_UNLINK_INTR, /* Unlink empty interrupt QHs */
+ EHCI_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */
+ EHCI_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */
+ EHCI_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */
+ EHCI_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */
+ EHCI_HRTIMER_IO_WATCHDOG, /* Check for missing IRQs */
+ EHCI_HRTIMER_NUM_EVENTS /* Must come last */
+};
+#define EHCI_HRTIMER_NO_EVENT 99
+
struct ehci_hcd { /* one per controller */
+ /* timing support */
+ enum ehci_hrtimer_event next_hrtimer_event;
+ unsigned enabled_hrtimer_events;
+ ktime_t hr_timeouts[EHCI_HRTIMER_NUM_EVENTS];
+ struct hrtimer hrtimer;
+
+ int PSS_poll_count;
+ int ASS_poll_count;
+ int died_poll_count;
+
/* glue to PCI and HCD framework */
struct ehci_caps __iomem *caps;
struct ehci_regs __iomem *regs;
@@ -70,27 +138,51 @@ struct ehci_hcd { /* one per controller */
__u32 hcs_params; /* cached register copy */
spinlock_t lock;
+ enum ehci_rh_state rh_state;
+
+ /* general schedule support */
+ bool scanning:1;
+ bool need_rescan:1;
+ bool intr_unlinking:1;
+ bool iaa_in_progress:1;
+ bool async_unlinking:1;
+ bool shutdown:1;
+ struct ehci_qh *qh_scan_next;
/* async schedule support */
struct ehci_qh *async;
- struct ehci_qh *reclaim;
- unsigned scanning : 1;
+ struct ehci_qh *dummy; /* For AMD quirk use */
+ struct list_head async_unlink;
+ struct list_head async_idle;
+ unsigned async_unlink_cycle;
+ unsigned async_count; /* async activity count */
/* periodic schedule support */
#define DEFAULT_I_TDPS 1024 /* some HCs can do less */
unsigned periodic_size;
__hc32 *periodic; /* hw periodic table */
dma_addr_t periodic_dma;
+ struct list_head intr_qh_list;
unsigned i_thresh; /* uframes HC might cache */
union ehci_shadow *pshadow; /* mirror hw periodic table */
- int next_uframe; /* scan periodic, start here */
- unsigned periodic_sched; /* periodic activity count */
-
- /* list of itds & sitds completed while clock_frame was still active */
+ struct list_head intr_unlink_wait;
+ struct list_head intr_unlink;
+ unsigned intr_unlink_wait_cycle;
+ unsigned intr_unlink_cycle;
+ unsigned now_frame; /* frame from HC hardware */
+ unsigned last_iso_frame; /* last frame scanned for iso */
+ unsigned intr_count; /* intr activity count */
+ unsigned isoc_count; /* isoc activity count */
+ unsigned periodic_count; /* periodic activity count */
+ unsigned uframe_periodic_max; /* max periodic time per uframe */
+
+
+ /* list of itds & sitds completed while now_frame was still active */
struct list_head cached_itd_list;
+ struct ehci_itd *last_itd_to_free;
struct list_head cached_sitd_list;
- unsigned clock_frame;
+ struct ehci_sitd *last_sitd_to_free;
/* per root hub port */
unsigned long reset_done [EHCI_MAX_ROOT_PORTS];
@@ -106,6 +198,8 @@ struct ehci_hcd { /* one per controller */
the change-suspend feature turned on */
unsigned long suspended_ports; /* which ports are
suspended */
+ unsigned long resuming_ports; /* which ports have
+ started to resume */
/* per-HC memory pools (could be per-bus, but ...) */
struct dma_pool *qh_pool; /* qh per active urb */
@@ -113,10 +207,6 @@ struct ehci_hcd { /* one per controller */
struct dma_pool *itd_pool; /* itd per iso urb */
struct dma_pool *sitd_pool; /* sitd per split iso urb */
- struct timer_list iaa_watchdog;
- struct timer_list watchdog;
- unsigned long actions;
- unsigned stamp;
unsigned random_frame;
unsigned long next_statechange;
ktime_t last_periodic_enable;
@@ -127,10 +217,15 @@ struct ehci_hcd { /* one per controller */
unsigned has_fsl_port_bug:1; /* FreeScale */
unsigned big_endian_mmio:1;
unsigned big_endian_desc:1;
+ unsigned big_endian_capbase:1;
unsigned has_amcc_usb23:1;
unsigned need_io_watchdog:1;
- unsigned broken_periodic:1;
- unsigned fs_i_thresh:1; /* Intel iso scheduling */
+ unsigned amd_pll_fix:1;
+ unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/
+ unsigned has_synopsys_hc_bug:1; /* Synopsys HC */
+ unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
+ unsigned need_oc_pp_cycle:1; /* MPC834X port power */
+ unsigned imx28_write_fix:1; /* For Freescale i.MX28 */
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
@@ -141,7 +236,7 @@ struct ehci_hcd { /* one per controller */
#define OHCI_HCCTRL_LEN 0x4
__hc32 *ohci_hcctrl_reg;
unsigned has_hostpc:1;
- unsigned has_lpm:1; /* support link power management */
+ unsigned has_tdi_phy_lpm:1;
unsigned has_ppcd:1; /* support per-port change bits */
u8 sbrn; /* packed release number */
@@ -154,9 +249,21 @@ struct ehci_hcd { /* one per controller */
#endif
/* debug files */
-#ifdef DEBUG
+#ifdef CONFIG_DYNAMIC_DEBUG
struct dentry *debug_dir;
#endif
+
+ /* bandwidth usage */
+#define EHCI_BANDWIDTH_SIZE 64
+#define EHCI_BANDWIDTH_FRAMES (EHCI_BANDWIDTH_SIZE >> 3)
+ u8 bandwidth[EHCI_BANDWIDTH_SIZE];
+ /* us allocated per uframe */
+ u8 tt_budget[EHCI_BANDWIDTH_SIZE];
+ /* us budgeted per uframe */
+ struct list_head tt_list;
+
+ /* platform-specific data -- must come last */
+ unsigned long priv[0] __aligned(sizeof(s64));
};
/* convert between an HCD pointer and the corresponding EHCI_HCD */
@@ -169,34 +276,6 @@ static inline struct usb_hcd *ehci_to_hcd (struct ehci_hcd *ehci)
return container_of ((void *) ehci, struct usb_hcd, hcd_priv);
}
-
-static inline void
-iaa_watchdog_start(struct ehci_hcd *ehci)
-{
- WARN_ON(timer_pending(&ehci->iaa_watchdog));
- mod_timer(&ehci->iaa_watchdog,
- jiffies + msecs_to_jiffies(EHCI_IAA_MSECS));
-}
-
-static inline void iaa_watchdog_done(struct ehci_hcd *ehci)
-{
- del_timer(&ehci->iaa_watchdog);
-}
-
-enum ehci_timer_action {
- TIMER_IO_WATCHDOG,
- TIMER_ASYNC_SHRINK,
- TIMER_ASYNC_OFF,
-};
-
-static inline void
-timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action)
-{
- clear_bit (action, &ehci->actions);
-}
-
-static void free_cached_lists(struct ehci_hcd *ehci);
-
/*-------------------------------------------------------------------------*/
#include <linux/usb/ehci_def.h>
@@ -306,7 +385,13 @@ union ehci_shadow {
struct ehci_qh_hw {
__hc32 hw_next; /* see EHCI 3.6.1 */
__hc32 hw_info1; /* see EHCI 3.6.2 */
-#define QH_HEAD 0x00008000
+#define QH_CONTROL_EP (1 << 27) /* FS/LS control endpoint */
+#define QH_HEAD (1 << 15) /* Head of async reclamation list */
+#define QH_TOGGLE_CTL (1 << 14) /* Data toggle control */
+#define QH_HIGH_SPEED (2 << 12) /* Endpoint speed */
+#define QH_LOW_SPEED (1 << 12)
+#define QH_FULL_SPEED (0 << 12)
+#define QH_INACTIVATE (1 << 7) /* Inactivate on next transaction */
__hc32 hw_info2; /* see EHCI 3.6.2 */
#define QH_SMASK 0x000000ff
#define QH_CMASK 0x0000ff00
@@ -324,47 +409,35 @@ struct ehci_qh_hw {
} __attribute__ ((aligned(32)));
struct ehci_qh {
- struct ehci_qh_hw *hw;
+ struct ehci_qh_hw *hw; /* Must come first */
/* the rest is HCD-private */
dma_addr_t qh_dma; /* address of qh */
union ehci_shadow qh_next; /* ptr to qh; or periodic */
struct list_head qtd_list; /* sw qtd list */
+ struct list_head intr_node; /* list of intr QHs */
struct ehci_qtd *dummy;
- struct ehci_qh *reclaim; /* next to reclaim */
-
- struct ehci_hcd *ehci;
+ struct list_head unlink_node;
+ struct ehci_per_sched ps; /* scheduling info */
- /*
- * Do NOT use atomic operations for QH refcounting. On some CPUs
- * (PPC7448 for example), atomic operations cannot be performed on
- * memory that is cache-inhibited (i.e. being used for DMA).
- * Spinlocks are used to protect all QH fields.
- */
- u32 refcount;
- unsigned stamp;
+ unsigned unlink_cycle;
- u8 needs_rescan; /* Dequeue during giveback */
u8 qh_state;
#define QH_STATE_LINKED 1 /* HC sees this */
#define QH_STATE_UNLINK 2 /* HC may still see this */
#define QH_STATE_IDLE 3 /* HC doesn't see this */
-#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on reclaim q */
+#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on unlink q */
#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
u8 xacterrs; /* XactErr retry counter */
#define QH_XACTERR_MAX 32 /* XactErr retry limit */
- /* periodic schedule info */
- u8 usecs; /* intr bandwidth */
u8 gap_uf; /* uframes split/csplit gap */
- u8 c_usecs; /* ... split completion bw */
- u16 tt_usecs; /* tt downstream bandwidth */
- unsigned short period; /* polling interval */
- unsigned short start; /* where polling starts */
-#define NO_FRAME ((unsigned short)~0) /* pick new start */
- struct usb_device *dev; /* access to TT */
+ unsigned is_out:1; /* bulk or intr OUT */
unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
+ unsigned dequeue_during_giveback:1;
+ unsigned exception:1; /* got a fault, or an unlink
+ was requested */
};
/*-------------------------------------------------------------------------*/
@@ -386,6 +459,7 @@ struct ehci_iso_packet {
struct ehci_iso_sched {
struct list_head td_list;
unsigned span;
+ unsigned first_packet;
struct ehci_iso_packet packet [0];
};
@@ -397,27 +471,21 @@ struct ehci_iso_stream {
/* first field matches ehci_hq, but is NULL */
struct ehci_qh_hw *hw;
- u32 refcount;
u8 bEndpointAddress;
u8 highspeed;
struct list_head td_list; /* queued itds/sitds */
struct list_head free_list; /* list of unused itds/sitds */
- struct usb_device *udev;
- struct usb_host_endpoint *ep;
/* output of (re)scheduling */
- int next_uframe;
+ struct ehci_per_sched ps; /* scheduling info */
+ unsigned next_uframe;
__hc32 splits;
/* the rest is derived from the endpoint descriptor,
- * trusting urb->interval == f(epdesc->bInterval) and
* including the extra info for hw_bufp[0..2]
*/
- u8 usecs, c_usecs;
- u16 interval;
- u16 tt_usecs;
+ u16 uperiod; /* period in uframes */
u16 maxp;
- u16 raw_mask;
unsigned bandwidth;
/* This is used to initialize iTD's hw_bufp fields */
@@ -532,6 +600,35 @@ struct ehci_fstn {
/*-------------------------------------------------------------------------*/
+/*
+ * USB-2.0 Specification Sections 11.14 and 11.18
+ * Scheduling and budgeting split transactions using TTs
+ *
+ * A hub can have a single TT for all its ports, or multiple TTs (one for each
+ * port). The bandwidth and budgeting information for the full/low-speed bus
+ * below each TT is self-contained and independent of the other TTs or the
+ * high-speed bus.
+ *
+ * "Bandwidth" refers to the number of microseconds on the FS/LS bus allocated
+ * to an interrupt or isochronous endpoint for each frame. "Budget" refers to
+ * the best-case estimate of the number of full-speed bytes allocated to an
+ * endpoint for each microframe within an allocated frame.
+ *
+ * Removal of an endpoint invalidates a TT's budget. Instead of trying to
+ * keep an up-to-date record, we recompute the budget when it is needed.
+ */
+
+struct ehci_tt {
+ u16 bandwidth[EHCI_BANDWIDTH_FRAMES];
+
+ struct list_head tt_list; /* List of all ehci_tt's */
+ struct list_head ps_list; /* Items using this TT */
+ struct usb_tt *usb_tt;
+ int tt_port; /* TT port number */
+};
+
+/*-------------------------------------------------------------------------*/
+
/* Prepare the PORTSC wakeup flags during controller suspend/resume */
#define ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup) \
@@ -597,12 +694,18 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
* This attempts to support either format at compile time without a
* runtime penalty, or both formats with the additional overhead
* of checking a flag bit.
+ *
+ * ehci_big_endian_capbase is a special quirk for controllers that
+ * implement the HC capability registers as separate registers and not
+ * as fields of a 32-bit register.
*/
#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
#define ehci_big_endian_mmio(e) ((e)->big_endian_mmio)
+#define ehci_big_endian_capbase(e) ((e)->big_endian_capbase)
#else
#define ehci_big_endian_mmio(e) 0
+#define ehci_big_endian_capbase(e) 0
#endif
/*
@@ -626,6 +729,18 @@ static inline unsigned int ehci_readl(const struct ehci_hcd *ehci,
#endif
}
+#ifdef CONFIG_SOC_IMX28
+static inline void imx28_ehci_writel(const unsigned int val,
+ volatile __u32 __iomem *addr)
+{
+ __asm__ ("swp %0, %0, [%1]" : : "r"(val), "r"(addr));
+}
+#else
+static inline void imx28_ehci_writel(const unsigned int val,
+ volatile __u32 __iomem *addr)
+{
+}
+#endif
static inline void ehci_writel(const struct ehci_hcd *ehci,
const unsigned int val, __u32 __iomem *regs)
{
@@ -634,14 +749,17 @@ static inline void ehci_writel(const struct ehci_hcd *ehci,
writel_be(val, regs) :
writel(val, regs);
#else
- writel(val, regs);
+ if (ehci->imx28_write_fix)
+ imx28_ehci_writel(val, regs);
+ else
+ writel(val, regs);
#endif
}
/*
* On certain ppc-44x SoC there is a HW issue, that could only worked around with
* explicit suspend/operate of OHCI. This function hereby makes sense only on that arch.
- * Other common bits are dependant on has_amcc_usb23 quirk flag.
+ * Other common bits are dependent on has_amcc_usb23 quirk flag.
*/
#ifdef CONFIG_44x
static inline void set_ohci_hcfs(struct ehci_hcd *ehci, int operational)
@@ -720,10 +838,41 @@ static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
/*-------------------------------------------------------------------------*/
-#ifndef DEBUG
+#define ehci_dbg(ehci, fmt, args...) \
+ dev_dbg(ehci_to_hcd(ehci)->self.controller , fmt , ## args)
+#define ehci_err(ehci, fmt, args...) \
+ dev_err(ehci_to_hcd(ehci)->self.controller , fmt , ## args)
+#define ehci_info(ehci, fmt, args...) \
+ dev_info(ehci_to_hcd(ehci)->self.controller , fmt , ## args)
+#define ehci_warn(ehci, fmt, args...) \
+ dev_warn(ehci_to_hcd(ehci)->self.controller , fmt , ## args)
+
+
+#ifndef CONFIG_DYNAMIC_DEBUG
#define STUB_DEBUG_FILES
-#endif /* DEBUG */
+#endif
/*-------------------------------------------------------------------------*/
+/* Declarations of things exported for use by ehci platform drivers */
+
+struct ehci_driver_overrides {
+ size_t extra_priv_size;
+ int (*reset)(struct usb_hcd *hcd);
+};
+
+extern void ehci_init_driver(struct hc_driver *drv,
+ const struct ehci_driver_overrides *over);
+extern int ehci_setup(struct usb_hcd *hcd);
+extern int ehci_handshake(struct ehci_hcd *ehci, void __iomem *ptr,
+ u32 mask, u32 done, int usec);
+
+#ifdef CONFIG_PM
+extern int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup);
+extern int ehci_resume(struct usb_hcd *hcd, bool hibernated);
+#endif /* CONFIG_PM */
+
+extern int ehci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength);
+
#endif /* __LINUX_EHCI_HCD_H */
diff --git a/drivers/usb/host/fhci-dbg.c b/drivers/usb/host/fhci-dbg.c
index 6fe55004911..f238cb37305 100644
--- a/drivers/usb/host/fhci-dbg.c
+++ b/drivers/usb/host/fhci-dbg.c
@@ -41,7 +41,7 @@ void fhci_dbg_isr(struct fhci_hcd *fhci, int usb_er)
static int fhci_dfs_regs_show(struct seq_file *s, void *v)
{
struct fhci_hcd *fhci = s->private;
- struct fhci_regs __iomem *regs = fhci->regs;
+ struct qe_usb_ctlr __iomem *regs = fhci->regs;
seq_printf(s,
"mode: 0x%x\n" "addr: 0x%x\n"
@@ -50,11 +50,11 @@ static int fhci_dfs_regs_show(struct seq_file *s, void *v)
"status: 0x%x\n" "SOF timer: %d\n"
"frame number: %d\n"
"lines status: 0x%x\n",
- in_8(&regs->usb_mod), in_8(&regs->usb_addr),
- in_8(&regs->usb_comm), in_be16(&regs->usb_ep[0]),
- in_be16(&regs->usb_event), in_be16(&regs->usb_mask),
- in_8(&regs->usb_status), in_be16(&regs->usb_sof_tmr),
- in_be16(&regs->usb_frame_num),
+ in_8(&regs->usb_usmod), in_8(&regs->usb_usadr),
+ in_8(&regs->usb_uscom), in_be16(&regs->usb_usep[0]),
+ in_be16(&regs->usb_usber), in_be16(&regs->usb_usbmr),
+ in_8(&regs->usb_usbs), in_be16(&regs->usb_ussft),
+ in_be16(&regs->usb_usfrn),
fhci_ioports_check_bus_state(fhci));
return 0;
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 20092a27a1e..1cf68eaf2ed 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -26,6 +26,8 @@
#include <linux/io.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
@@ -40,8 +42,8 @@ void fhci_start_sof_timer(struct fhci_hcd *fhci)
/* clear frame_n */
out_be16(&fhci->pram->frame_num, 0);
- out_be16(&fhci->regs->usb_sof_tmr, 0);
- setbits8(&fhci->regs->usb_mod, USB_MODE_SFTE);
+ out_be16(&fhci->regs->usb_ussft, 0);
+ setbits8(&fhci->regs->usb_usmod, USB_MODE_SFTE);
fhci_dbg(fhci, "<- %s\n", __func__);
}
@@ -50,7 +52,7 @@ void fhci_stop_sof_timer(struct fhci_hcd *fhci)
{
fhci_dbg(fhci, "-> %s\n", __func__);
- clrbits8(&fhci->regs->usb_mod, USB_MODE_SFTE);
+ clrbits8(&fhci->regs->usb_usmod, USB_MODE_SFTE);
gtm_stop_timer16(fhci->timer);
fhci_dbg(fhci, "<- %s\n", __func__);
@@ -58,7 +60,7 @@ void fhci_stop_sof_timer(struct fhci_hcd *fhci)
u16 fhci_get_sof_timer_count(struct fhci_usb *usb)
{
- return be16_to_cpu(in_be16(&usb->fhci->regs->usb_sof_tmr) / 12);
+ return be16_to_cpu(in_be16(&usb->fhci->regs->usb_ussft) / 12);
}
/* initialize the endpoint zero */
@@ -88,8 +90,8 @@ void fhci_usb_enable_interrupt(struct fhci_usb *usb)
enable_irq(fhci_to_hcd(fhci)->irq);
/* initialize the event register and mask register */
- out_be16(&usb->fhci->regs->usb_event, 0xffff);
- out_be16(&usb->fhci->regs->usb_mask, usb->saved_msk);
+ out_be16(&usb->fhci->regs->usb_usber, 0xffff);
+ out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
/* enable the timer interrupts */
enable_irq(fhci->timer->irq);
@@ -98,18 +100,18 @@ void fhci_usb_enable_interrupt(struct fhci_usb *usb)
usb->intr_nesting_cnt--;
}
-/* diable the usb interrupt */
+/* disable the usb interrupt */
void fhci_usb_disable_interrupt(struct fhci_usb *usb)
{
struct fhci_hcd *fhci = usb->fhci;
if (usb->intr_nesting_cnt == 0) {
- /* diable the timer interrupt */
+ /* disable the timer interrupt */
disable_irq_nosync(fhci->timer->irq);
/* disable the usb interrupt */
disable_irq_nosync(fhci_to_hcd(fhci)->irq);
- out_be16(&usb->fhci->regs->usb_mask, 0);
+ out_be16(&usb->fhci->regs->usb_usbmr, 0);
}
usb->intr_nesting_cnt++;
}
@@ -119,9 +121,9 @@ static u32 fhci_usb_enable(struct fhci_hcd *fhci)
{
struct fhci_usb *usb = fhci->usb_lld;
- out_be16(&usb->fhci->regs->usb_event, 0xffff);
- out_be16(&usb->fhci->regs->usb_mask, usb->saved_msk);
- setbits8(&usb->fhci->regs->usb_mod, USB_MODE_EN);
+ out_be16(&usb->fhci->regs->usb_usber, 0xffff);
+ out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
+ setbits8(&usb->fhci->regs->usb_usmod, USB_MODE_EN);
mdelay(100);
@@ -141,7 +143,7 @@ static u32 fhci_usb_disable(struct fhci_hcd *fhci)
usb->port_status == FHCI_PORT_LOW)
fhci_device_disconnected_interrupt(fhci);
- clrbits8(&usb->fhci->regs->usb_mod, USB_MODE_EN);
+ clrbits8(&usb->fhci->regs->usb_usmod, USB_MODE_EN);
return 0;
}
@@ -285,13 +287,13 @@ static int fhci_usb_init(struct fhci_hcd *fhci)
USB_E_IDLE_MASK |
USB_E_RESET_MASK | USB_E_SFT_MASK | USB_E_MSF_MASK);
- out_8(&usb->fhci->regs->usb_mod, USB_MODE_HOST | USB_MODE_EN);
+ out_8(&usb->fhci->regs->usb_usmod, USB_MODE_HOST | USB_MODE_EN);
/* clearing the mask register */
- out_be16(&usb->fhci->regs->usb_mask, 0);
+ out_be16(&usb->fhci->regs->usb_usbmr, 0);
/* initialing the event register */
- out_be16(&usb->fhci->regs->usb_event, 0xffff);
+ out_be16(&usb->fhci->regs->usb_usber, 0xffff);
if (endpoint_zero_init(usb, DEFAULT_DATA_MEM, DEFAULT_RING_LEN) != 0) {
fhci_usb_free(usb);
@@ -401,7 +403,7 @@ static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
/* 1 td fro setup,1 for ack */
size = 2;
case PIPE_BULK:
- /* one td for every 4096 bytes(can be upto 8k) */
+ /* one td for every 4096 bytes(can be up to 8k) */
size += urb->transfer_buffer_length / 4096;
/* ...add for any remaining bytes... */
if ((urb->transfer_buffer_length % 4096) != 0)
@@ -561,8 +563,7 @@ static const struct hc_driver fhci_driver = {
.hub_control = fhci_hub_control,
};
-static int __devinit of_fhci_probe(struct platform_device *ofdev,
- const struct of_device_id *ofid)
+static int of_fhci_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *node = dev->of_node;
@@ -606,7 +607,7 @@ static int __devinit of_fhci_probe(struct platform_device *ofdev,
goto err_regs;
}
- hcd->regs = ioremap(usb_regs.start, usb_regs.end - usb_regs.start + 1);
+ hcd->regs = ioremap(usb_regs.start, resource_size(&usb_regs));
if (!hcd->regs) {
dev_err(dev, "could not ioremap regs\n");
ret = -ENOMEM;
@@ -622,12 +623,15 @@ static int __devinit of_fhci_probe(struct platform_device *ofdev,
goto err_pram;
}
- pram_addr = cpm_muram_alloc_fixed(iprop[2], FHCI_PRAM_SIZE);
+ pram_addr = cpm_muram_alloc(FHCI_PRAM_SIZE, 64);
if (IS_ERR_VALUE(pram_addr)) {
dev_err(dev, "failed to allocate usb pram\n");
ret = -ENOMEM;
goto err_pram;
}
+
+ qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, QE_CR_SUBBLOCK_USB,
+ QE_CR_PROTOCOL_UNSPECIFIED, pram_addr);
fhci->pram = cpm_muram_addr(pram_addr);
/* GPIOs and pins */
@@ -687,7 +691,7 @@ static int __devinit of_fhci_probe(struct platform_device *ofdev,
}
ret = request_irq(fhci->timer->irq, fhci_frame_limit_timer_irq,
- IRQF_DISABLED, "qe timer (usb)", hcd);
+ 0, "qe timer (usb)", hcd);
if (ret) {
dev_err(dev, "failed to request timer irq");
goto err_timer_irq;
@@ -743,13 +747,15 @@ static int __devinit of_fhci_probe(struct platform_device *ofdev,
}
/* Clear and disable any pending interrupts. */
- out_be16(&fhci->regs->usb_event, 0xffff);
- out_be16(&fhci->regs->usb_mask, 0);
+ out_be16(&fhci->regs->usb_usber, 0xffff);
+ out_be16(&fhci->regs->usb_usbmr, 0);
- ret = usb_add_hcd(hcd, usb_irq, IRQF_DISABLED);
+ ret = usb_add_hcd(hcd, usb_irq, 0);
if (ret < 0)
goto err_add_hcd;
+ device_wakeup_enable(hcd->self.controller);
+
fhci_dfs_create(fhci);
return 0;
@@ -778,7 +784,7 @@ err_regs:
return ret;
}
-static int __devexit fhci_remove(struct device *dev)
+static int fhci_remove(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct fhci_hcd *fhci = hcd_to_fhci(hcd);
@@ -801,7 +807,7 @@ static int __devexit fhci_remove(struct device *dev)
return 0;
}
-static int __devexit of_fhci_remove(struct platform_device *ofdev)
+static int of_fhci_remove(struct platform_device *ofdev)
{
return fhci_remove(&ofdev->dev);
}
@@ -812,27 +818,17 @@ static const struct of_device_id of_fhci_match[] = {
};
MODULE_DEVICE_TABLE(of, of_fhci_match);
-static struct of_platform_driver of_fhci_driver = {
+static struct platform_driver of_fhci_driver = {
.driver = {
.name = "fsl,usb-fhci",
.owner = THIS_MODULE,
.of_match_table = of_fhci_match,
},
.probe = of_fhci_probe,
- .remove = __devexit_p(of_fhci_remove),
+ .remove = of_fhci_remove,
};
-static int __init fhci_module_init(void)
-{
- return of_register_platform_driver(&of_fhci_driver);
-}
-module_init(fhci_module_init);
-
-static void __exit fhci_module_exit(void)
-{
- of_unregister_platform_driver(&of_fhci_driver);
-}
-module_exit(fhci_module_exit);
+module_platform_driver(of_fhci_driver);
MODULE_DESCRIPTION("USB Freescale Host Controller Interface Driver");
MODULE_AUTHOR("Shlomi Gridish <gridish@freescale.com>, "
diff --git a/drivers/usb/host/fhci-hub.c b/drivers/usb/host/fhci-hub.c
index 348fe62e94f..6af2512f837 100644
--- a/drivers/usb/host/fhci-hub.c
+++ b/drivers/usb/host/fhci-hub.c
@@ -97,7 +97,7 @@ void fhci_port_disable(struct fhci_hcd *fhci)
/* Enable IDLE since we want to know if something comes along */
usb->saved_msk |= USB_E_IDLE_MASK;
- out_be16(&usb->fhci->regs->usb_mask, usb->saved_msk);
+ out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
/* check if during the disconnection process attached new device */
if (port_status == FHCI_PORT_WAITING)
@@ -158,21 +158,21 @@ void fhci_port_reset(void *lld)
fhci_stop_sof_timer(fhci);
/* disable the USB controller */
- mode = in_8(&fhci->regs->usb_mod);
- out_8(&fhci->regs->usb_mod, mode & (~USB_MODE_EN));
+ mode = in_8(&fhci->regs->usb_usmod);
+ out_8(&fhci->regs->usb_usmod, mode & (~USB_MODE_EN));
/* disable idle interrupts */
- mask = in_be16(&fhci->regs->usb_mask);
- out_be16(&fhci->regs->usb_mask, mask & (~USB_E_IDLE_MASK));
+ mask = in_be16(&fhci->regs->usb_usbmr);
+ out_be16(&fhci->regs->usb_usbmr, mask & (~USB_E_IDLE_MASK));
fhci_io_port_generate_reset(fhci);
/* enable interrupt on this endpoint */
- out_be16(&fhci->regs->usb_mask, mask);
+ out_be16(&fhci->regs->usb_usbmr, mask);
/* enable the USB controller */
- mode = in_8(&fhci->regs->usb_mod);
- out_8(&fhci->regs->usb_mod, mode | USB_MODE_EN);
+ mode = in_8(&fhci->regs->usb_usmod);
+ out_8(&fhci->regs->usb_usmod, mode | USB_MODE_EN);
fhci_start_sof_timer(fhci);
fhci_dbg(fhci, "<- %s\n", __func__);
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
index a42ef380e91..95ca5986e67 100644
--- a/drivers/usb/host/fhci-sched.c
+++ b/drivers/usb/host/fhci-sched.c
@@ -1,7 +1,7 @@
/*
* Freescale QUICC Engine USB Host Controller Driver
*
- * Copyright (c) Freescale Semicondutor, Inc. 2006.
+ * Copyright (c) Freescale Semicondutor, Inc. 2006, 2011.
* Shlomi Gridish <gridish@freescale.com>
* Jerry Huang <Chang-Ming.Huang@freescale.com>
* Copyright (c) Logic Product Development, Inc. 2007
@@ -132,8 +132,8 @@ void fhci_flush_all_transmissions(struct fhci_usb *usb)
u8 mode;
struct td *td;
- mode = in_8(&usb->fhci->regs->usb_mod);
- clrbits8(&usb->fhci->regs->usb_mod, USB_MODE_EN);
+ mode = in_8(&usb->fhci->regs->usb_usmod);
+ clrbits8(&usb->fhci->regs->usb_usmod, USB_MODE_EN);
fhci_flush_bds(usb);
@@ -147,9 +147,9 @@ void fhci_flush_all_transmissions(struct fhci_usb *usb)
usb->actual_frame->frame_status = FRAME_END_TRANSMISSION;
/* reset the event register */
- out_be16(&usb->fhci->regs->usb_event, 0xffff);
+ out_be16(&usb->fhci->regs->usb_usber, 0xffff);
/* enable the USB controller */
- out_8(&usb->fhci->regs->usb_mod, mode | USB_MODE_EN);
+ out_8(&usb->fhci->regs->usb_usmod, mode | USB_MODE_EN);
}
/*
@@ -261,8 +261,7 @@ static void move_head_to_tail(struct list_head *list)
struct list_head *node = list->next;
if (!list_empty(list)) {
- list_del(node);
- list_add_tail(node, list);
+ list_move_tail(node, list);
}
}
@@ -414,7 +413,7 @@ static void sof_interrupt(struct fhci_hcd *fhci)
usb->port_status = FHCI_PORT_FULL;
/* Disable IDLE */
usb->saved_msk &= ~USB_E_IDLE_MASK;
- out_be16(&usb->fhci->regs->usb_mask, usb->saved_msk);
+ out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
}
gtm_set_exact_timer16(fhci->timer, usb->max_frame_usage, false);
@@ -433,14 +432,14 @@ void fhci_device_disconnected_interrupt(struct fhci_hcd *fhci)
fhci_dbg(fhci, "-> %s\n", __func__);
fhci_usb_disable_interrupt(usb);
- clrbits8(&usb->fhci->regs->usb_mod, USB_MODE_LSS);
+ clrbits8(&usb->fhci->regs->usb_usmod, USB_MODE_LSS);
usb->port_status = FHCI_PORT_DISABLED;
fhci_stop_sof_timer(fhci);
/* Enable IDLE since we want to know if something comes along */
usb->saved_msk |= USB_E_IDLE_MASK;
- out_be16(&usb->fhci->regs->usb_mask, usb->saved_msk);
+ out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
usb->vroot_hub->port.wPortStatus &= ~USB_PORT_STAT_CONNECTION;
usb->vroot_hub->port.wPortChange |= USB_PORT_STAT_C_CONNECTION;
@@ -473,7 +472,7 @@ void fhci_device_connected_interrupt(struct fhci_hcd *fhci)
}
usb->port_status = FHCI_PORT_LOW;
- setbits8(&usb->fhci->regs->usb_mod, USB_MODE_LSS);
+ setbits8(&usb->fhci->regs->usb_usmod, USB_MODE_LSS);
usb->vroot_hub->port.wPortStatus |=
(USB_PORT_STAT_LOW_SPEED |
USB_PORT_STAT_CONNECTION);
@@ -491,7 +490,7 @@ void fhci_device_connected_interrupt(struct fhci_hcd *fhci)
}
usb->port_status = FHCI_PORT_FULL;
- clrbits8(&usb->fhci->regs->usb_mod, USB_MODE_LSS);
+ clrbits8(&usb->fhci->regs->usb_usmod, USB_MODE_LSS);
usb->vroot_hub->port.wPortStatus &=
~USB_PORT_STAT_LOW_SPEED;
usb->vroot_hub->port.wPortStatus |=
@@ -535,7 +534,7 @@ static void abort_transmission(struct fhci_usb *usb)
/* issue stop Tx command */
qe_issue_cmd(QE_USB_STOP_TX, QE_CR_SUBBLOCK_USB, EP_ZERO, 0);
/* flush Tx FIFOs */
- out_8(&usb->fhci->regs->usb_comm, USB_CMD_FLUSH_FIFO | EP_ZERO);
+ out_8(&usb->fhci->regs->usb_uscom, USB_CMD_FLUSH_FIFO | EP_ZERO);
udelay(1000);
/* reset Tx BDs */
fhci_flush_bds(usb);
@@ -555,11 +554,11 @@ irqreturn_t fhci_irq(struct usb_hcd *hcd)
usb = fhci->usb_lld;
- usb_er |= in_be16(&usb->fhci->regs->usb_event) &
- in_be16(&usb->fhci->regs->usb_mask);
+ usb_er |= in_be16(&usb->fhci->regs->usb_usber) &
+ in_be16(&usb->fhci->regs->usb_usbmr);
/* clear event bits for next time */
- out_be16(&usb->fhci->regs->usb_event, usb_er);
+ out_be16(&usb->fhci->regs->usb_usber, usb_er);
fhci_dbg_isr(fhci, usb_er);
@@ -573,7 +572,7 @@ irqreturn_t fhci_irq(struct usb_hcd *hcd)
/* Turn on IDLE since we want to disconnect */
usb->saved_msk |= USB_E_IDLE_MASK;
- out_be16(&usb->fhci->regs->usb_event,
+ out_be16(&usb->fhci->regs->usb_usber,
usb->saved_msk);
} else if (usb->port_status == FHCI_PORT_DISABLED) {
if (fhci_ioports_check_bus_state(fhci) == 1)
@@ -611,7 +610,7 @@ irqreturn_t fhci_irq(struct usb_hcd *hcd)
/* XXX usb->port_status = FHCI_PORT_WAITING; */
/* Disable IDLE */
usb->saved_msk &= ~USB_E_IDLE_MASK;
- out_be16(&usb->fhci->regs->usb_mask,
+ out_be16(&usb->fhci->regs->usb_usbmr,
usb->saved_msk);
} else {
fhci_dbg_isr(fhci, -1);
@@ -740,9 +739,13 @@ void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
}
/* for ISO transfer calculate start frame index */
- if (ed->mode == FHCI_TF_ISO && urb->transfer_flags & URB_ISO_ASAP)
- urb->start_frame = ed->td_head ? ed->last_iso + 1 :
+ if (ed->mode == FHCI_TF_ISO) {
+ /* Ignore the possibility of underruns */
+ urb->start_frame = ed->td_head ? ed->next_iso :
get_frame_num(fhci);
+ ed->next_iso = (urb->start_frame + urb->interval *
+ urb->number_of_packets) & 0x07ff;
+ }
/*
* OHCI handles the DATA toggle itself,we just use the USB
@@ -810,9 +813,11 @@ void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
ed->dev_addr = usb_pipedevice(urb->pipe);
ed->max_pkt_size = usb_maxpacket(urb->dev, urb->pipe,
usb_pipeout(urb->pipe));
+ /* setup stage */
td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++, FHCI_TA_SETUP,
USB_TD_TOGGLE_DATA0, urb->setup_packet, 8, 0, 0, true);
+ /* data stage */
if (data_len > 0) {
td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
usb_pipeout(urb->pipe) ? FHCI_TA_OUT :
@@ -820,9 +825,18 @@ void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
USB_TD_TOGGLE_DATA1, data, data_len, 0, 0,
true);
}
- td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
- usb_pipeout(urb->pipe) ? FHCI_TA_IN : FHCI_TA_OUT,
- USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
+
+ /* status stage */
+ if (data_len > 0)
+ td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
+ (usb_pipeout(urb->pipe) ? FHCI_TA_IN :
+ FHCI_TA_OUT),
+ USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
+ else
+ td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
+ FHCI_TA_IN,
+ USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
+
urb_state = US_CTRL_SETUP;
break;
case FHCI_TF_ISO:
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index 7be548ca218..1498061f0ae 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -40,7 +40,7 @@
#define TD_RXER 0x0020 /* Rx error or not */
#define TD_NAK 0x0010 /* No ack. */
-#define TD_STAL 0x0008 /* Stall recieved */
+#define TD_STAL 0x0008 /* Stall received */
#define TD_TO 0x0004 /* time out */
#define TD_UN 0x0002 /* underrun */
#define TD_NO 0x0010 /* Rx Non Octet Aligned Packet */
@@ -155,7 +155,7 @@ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem,
struct endpoint *ep;
struct usb_td __iomem *td;
unsigned long ep_offset;
- char *err_for = "enpoint PRAM";
+ char *err_for = "endpoint PRAM";
int ep_mem_size;
u32 i;
@@ -249,7 +249,7 @@ void fhci_init_ep_registers(struct fhci_usb *usb, struct endpoint *ep,
u8 rt;
/* set the endpoint registers according to the endpoint */
- out_be16(&usb->fhci->regs->usb_ep[0],
+ out_be16(&usb->fhci->regs->usb_usep[0],
USB_TRANS_CTR | USB_EP_MF | USB_EP_RTE);
out_be16(&usb->fhci->pram->ep_ptr[0],
cpm_muram_offset(ep->ep_pram_ptr));
@@ -271,10 +271,10 @@ void fhci_init_ep_registers(struct fhci_usb *usb, struct endpoint *ep,
/*
* Collect the submitted frames and inform the application about them
- * It is also prepearing the TDs for new frames. If the Tx interrupts
- * are diabled, the application should call that routine to get
+ * It is also preparing the TDs for new frames. If the Tx interrupts
+ * are disabled, the application should call that routine to get
* confirmation about the submitted frames. Otherwise, the routine is
- * called frome the interrupt service routine during the Tx interrupt.
+ * called from the interrupt service routine during the Tx interrupt.
* In that case the application is informed by calling the application
* specific 'fhci_transaction_confirm' routine
*/
@@ -337,7 +337,7 @@ static void fhci_td_transaction_confirm(struct fhci_usb *usb)
pkt->status = USB_TD_RX_ER_NONOCT;
else
fhci_err(usb->fhci, "illegal error "
- "occured\n");
+ "occurred\n");
} else if (td_status & TD_NAK)
pkt->status = USB_TD_TX_ER_NAK;
else if (td_status & TD_TO)
@@ -347,7 +347,7 @@ static void fhci_td_transaction_confirm(struct fhci_usb *usb)
else if (td_status & TD_STAL)
pkt->status = USB_TD_TX_ER_STALL;
else
- fhci_err(usb->fhci, "illegal error occured\n");
+ fhci_err(usb->fhci, "illegal error occurred\n");
} else if ((extra_data & TD_TOK_IN) &&
pkt->len > td_length - CRC_SIZE) {
pkt->status = USB_TD_RX_DATA_UNDERUN;
@@ -463,7 +463,7 @@ u32 fhci_host_transaction(struct fhci_usb *usb,
cq_put(&ep->conf_frame_Q, pkt);
if (cq_howmany(&ep->conf_frame_Q) == 1)
- out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO);
+ out_8(&usb->fhci->regs->usb_uscom, USB_CMD_STR_FIFO);
return 0;
}
@@ -535,8 +535,8 @@ void fhci_flush_actual_frame(struct fhci_usb *usb)
struct endpoint *ep = usb->ep0;
/* disable the USB controller */
- mode = in_8(&usb->fhci->regs->usb_mod);
- out_8(&usb->fhci->regs->usb_mod, mode & ~USB_MODE_EN);
+ mode = in_8(&usb->fhci->regs->usb_usmod);
+ out_8(&usb->fhci->regs->usb_usmod, mode & ~USB_MODE_EN);
tb_ptr = in_be16(&ep->ep_pram_ptr->tx_bd_ptr);
td = cpm_muram_addr(tb_ptr);
@@ -571,9 +571,9 @@ void fhci_flush_actual_frame(struct fhci_usb *usb)
usb->actual_frame->frame_status = FRAME_TIMER_END_TRANSMISSION;
/* reset the event register */
- out_be16(&usb->fhci->regs->usb_event, 0xffff);
+ out_be16(&usb->fhci->regs->usb_usber, 0xffff);
/* enable the USB controller */
- out_8(&usb->fhci->regs->usb_mod, mode | USB_MODE_EN);
+ out_8(&usb->fhci->regs->usb_usmod, mode | USB_MODE_EN);
}
/* handles Tx confirm and Tx error interrupt */
@@ -613,7 +613,7 @@ void fhci_host_transmit_actual_frame(struct fhci_usb *usb)
/* start transmit only if we have something in the TDs */
if (in_be16(&td->status) & TD_R)
- out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO);
+ out_8(&usb->fhci->regs->usb_uscom, USB_CMD_STR_FIFO);
if (in_be32(&ep->conf_td->buf_ptr) == DUMMY_BD_BUFFER) {
out_be32(&old_td->buf_ptr, 0);
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
index 71c3caaea4c..154e6a00772 100644
--- a/drivers/usb/host/fhci.h
+++ b/drivers/usb/host/fhci.h
@@ -28,6 +28,7 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <asm/qe.h>
+#include <asm/immap_qe.h>
#define USB_CLOCK 48000000
@@ -82,7 +83,7 @@
#define USB_TD_RX_ER_NONOCT 0x40000000 /* Tx Non Octet Aligned Packet */
#define USB_TD_RX_ER_BITSTUFF 0x20000000 /* Frame Aborted-Received pkt */
#define USB_TD_RX_ER_CRC 0x10000000 /* CRC error */
-#define USB_TD_RX_ER_OVERUN 0x08000000 /* Over - run occured */
+#define USB_TD_RX_ER_OVERUN 0x08000000 /* Over - run occurred */
#define USB_TD_RX_ER_PID 0x04000000 /* wrong PID received */
#define USB_TD_RX_DATA_UNDERUN 0x02000000 /* shorter than expected */
#define USB_TD_RX_DATA_OVERUN 0x01000000 /* longer than expected */
@@ -173,25 +174,6 @@
#define USB_E_TXB_MASK 0x0002
#define USB_E_RXB_MASK 0x0001
-/* Freescale USB Host controller registers */
-struct fhci_regs {
- u8 usb_mod; /* mode register */
- u8 usb_addr; /* address register */
- u8 usb_comm; /* command register */
- u8 reserved1[1];
- __be16 usb_ep[4]; /* endpoint register */
- u8 reserved2[4];
- __be16 usb_event; /* event register */
- u8 reserved3[2];
- __be16 usb_mask; /* mask register */
- u8 reserved4[1];
- u8 usb_status; /* status register */
- __be16 usb_sof_tmr; /* Start Of Frame timer */
- u8 reserved5[2];
- __be16 usb_frame_num; /* frame number register */
- u8 reserved6[1];
-};
-
/* Freescale USB HOST */
struct fhci_pram {
__be16 ep_ptr[4]; /* Endpoint porter reg */
@@ -267,7 +249,7 @@ struct fhci_hcd {
int gpios[NUM_GPIOS];
bool alow_gpios[NUM_GPIOS];
- struct fhci_regs __iomem *regs; /* I/O memory used to communicate */
+ struct qe_usb_ctlr __iomem *regs; /* I/O memory used to communicate */
struct fhci_pram __iomem *pram; /* Parameter RAM */
struct gtm_timer *timer;
@@ -356,14 +338,14 @@ struct ed {
/* read only parameters, should be cleared upon initialization */
u8 toggle_carry; /* toggle carry from the last TD submitted */
- u32 last_iso; /* time stamp of last queued ISO transfer */
+ u16 next_iso; /* time stamp of next queued ISO transfer */
struct td *td_head; /* a pointer to the current TD handled */
};
struct td {
void *data; /* a pointer to the data buffer */
unsigned int len; /* length of the data to be submitted */
- unsigned int actual_len; /* actual bytes transfered on this td */
+ unsigned int actual_len; /* actual bytes transferred on this td */
enum fhci_ta_type type; /* transaction type */
u8 toggle; /* toggle for next trans. within this TD */
u16 iso_index; /* ISO transaction index */
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
new file mode 100644
index 00000000000..98a89d16cc3
--- /dev/null
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -0,0 +1,5981 @@
+/*
+ * Faraday FOTG210 EHCI-like driver
+ *
+ * Copyright (c) 2013 Faraday Technology Corporation
+ *
+ * Author: Yuan-Hsin Chen <yhchen@faraday-tech.com>
+ * Feng-Hsin Chiang <john453@faraday-tech.com>
+ * Po-Yu Chuang <ratbert.chuang@gmail.com>
+ *
+ * Most of code borrowed from the Linux-3.7 EHCI driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/hrtimer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+
+/*-------------------------------------------------------------------------*/
+#define DRIVER_AUTHOR "Yuan-Hsin Chen"
+#define DRIVER_DESC "FOTG210 Host Controller (EHCI) Driver"
+
+static const char hcd_name[] = "fotg210_hcd";
+
+#undef FOTG210_URB_TRACE
+
+#define FOTG210_STATS
+
+/* magic numbers that can affect system performance */
+#define FOTG210_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
+#define FOTG210_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
+#define FOTG210_TUNE_RL_TT 0
+#define FOTG210_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
+#define FOTG210_TUNE_MULT_TT 1
+/*
+ * Some drivers think it's safe to schedule isochronous transfers more than
+ * 256 ms into the future (partly as a result of an old bug in the scheduling
+ * code). In an attempt to avoid trouble, we will use a minimum scheduling
+ * length of 512 frames instead of 256.
+ */
+#define FOTG210_TUNE_FLS 1 /* (medium) 512-frame schedule */
+
+/* Initial IRQ latency: faster than hw default */
+static int log2_irq_thresh; /* 0 to 6 */
+module_param(log2_irq_thresh, int, S_IRUGO);
+MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
+
+/* initial park setting: slower than hw default */
+static unsigned park;
+module_param(park, uint, S_IRUGO);
+MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
+
+/* for link power management(LPM) feature */
+static unsigned int hird;
+module_param(hird, int, S_IRUGO);
+MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us");
+
+#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
+
+#include "fotg210.h"
+
+/*-------------------------------------------------------------------------*/
+
+#define fotg210_dbg(fotg210, fmt, args...) \
+ dev_dbg(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args)
+#define fotg210_err(fotg210, fmt, args...) \
+ dev_err(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args)
+#define fotg210_info(fotg210, fmt, args...) \
+ dev_info(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args)
+#define fotg210_warn(fotg210, fmt, args...) \
+ dev_warn(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args)
+
+/* check the values in the HCSPARAMS register
+ * (host controller _Structural_ parameters)
+ * see EHCI spec, Table 2-4 for each value
+ */
+static void dbg_hcs_params(struct fotg210_hcd *fotg210, char *label)
+{
+ u32 params = fotg210_readl(fotg210, &fotg210->caps->hcs_params);
+
+ fotg210_dbg(fotg210,
+ "%s hcs_params 0x%x ports=%d\n",
+ label, params,
+ HCS_N_PORTS(params)
+ );
+}
+
+/* check the values in the HCCPARAMS register
+ * (host controller _Capability_ parameters)
+ * see EHCI Spec, Table 2-5 for each value
+ * */
+static void dbg_hcc_params(struct fotg210_hcd *fotg210, char *label)
+{
+ u32 params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
+
+ fotg210_dbg(fotg210,
+ "%s hcc_params %04x uframes %s%s\n",
+ label,
+ params,
+ HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
+ HCC_CANPARK(params) ? " park" : "");
+}
+
+static void __maybe_unused
+dbg_qtd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd)
+{
+ fotg210_dbg(fotg210, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
+ hc32_to_cpup(fotg210, &qtd->hw_next),
+ hc32_to_cpup(fotg210, &qtd->hw_alt_next),
+ hc32_to_cpup(fotg210, &qtd->hw_token),
+ hc32_to_cpup(fotg210, &qtd->hw_buf[0]));
+ if (qtd->hw_buf[1])
+ fotg210_dbg(fotg210, " p1=%08x p2=%08x p3=%08x p4=%08x\n",
+ hc32_to_cpup(fotg210, &qtd->hw_buf[1]),
+ hc32_to_cpup(fotg210, &qtd->hw_buf[2]),
+ hc32_to_cpup(fotg210, &qtd->hw_buf[3]),
+ hc32_to_cpup(fotg210, &qtd->hw_buf[4]));
+}
+
+static void __maybe_unused
+dbg_qh(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ struct fotg210_qh_hw *hw = qh->hw;
+
+ fotg210_dbg(fotg210, "%s qh %p n%08x info %x %x qtd %x\n", label,
+ qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current);
+ dbg_qtd("overlay", fotg210, (struct fotg210_qtd *) &hw->hw_qtd_next);
+}
+
+static void __maybe_unused
+dbg_itd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
+{
+ fotg210_dbg(fotg210, "%s[%d] itd %p, next %08x, urb %p\n",
+ label, itd->frame, itd, hc32_to_cpu(fotg210, itd->hw_next),
+ itd->urb);
+ fotg210_dbg(fotg210,
+ " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ hc32_to_cpu(fotg210, itd->hw_transaction[0]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[1]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[2]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[3]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[4]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[5]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[6]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[7]));
+ fotg210_dbg(fotg210,
+ " buf: %08x %08x %08x %08x %08x %08x %08x\n",
+ hc32_to_cpu(fotg210, itd->hw_bufp[0]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[1]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[2]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[3]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[4]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[5]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[6]));
+ fotg210_dbg(fotg210, " index: %d %d %d %d %d %d %d %d\n",
+ itd->index[0], itd->index[1], itd->index[2],
+ itd->index[3], itd->index[4], itd->index[5],
+ itd->index[6], itd->index[7]);
+}
+
+static int __maybe_unused
+dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
+{
+ return scnprintf(buf, len,
+ "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
+ label, label[0] ? " " : "", status,
+ (status & STS_ASS) ? " Async" : "",
+ (status & STS_PSS) ? " Periodic" : "",
+ (status & STS_RECL) ? " Recl" : "",
+ (status & STS_HALT) ? " Halt" : "",
+ (status & STS_IAA) ? " IAA" : "",
+ (status & STS_FATAL) ? " FATAL" : "",
+ (status & STS_FLR) ? " FLR" : "",
+ (status & STS_PCD) ? " PCD" : "",
+ (status & STS_ERR) ? " ERR" : "",
+ (status & STS_INT) ? " INT" : ""
+ );
+}
+
+static int __maybe_unused
+dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
+{
+ return scnprintf(buf, len,
+ "%s%sintrenable %02x%s%s%s%s%s%s",
+ label, label[0] ? " " : "", enable,
+ (enable & STS_IAA) ? " IAA" : "",
+ (enable & STS_FATAL) ? " FATAL" : "",
+ (enable & STS_FLR) ? " FLR" : "",
+ (enable & STS_PCD) ? " PCD" : "",
+ (enable & STS_ERR) ? " ERR" : "",
+ (enable & STS_INT) ? " INT" : ""
+ );
+}
+
+static const char *const fls_strings[] = { "1024", "512", "256", "??" };
+
+static int
+dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
+{
+ return scnprintf(buf, len,
+ "%s%scommand %07x %s=%d ithresh=%d%s%s%s "
+ "period=%s%s %s",
+ label, label[0] ? " " : "", command,
+ (command & CMD_PARK) ? " park" : "(park)",
+ CMD_PARK_CNT(command),
+ (command >> 16) & 0x3f,
+ (command & CMD_IAAD) ? " IAAD" : "",
+ (command & CMD_ASE) ? " Async" : "",
+ (command & CMD_PSE) ? " Periodic" : "",
+ fls_strings[(command >> 2) & 0x3],
+ (command & CMD_RESET) ? " Reset" : "",
+ (command & CMD_RUN) ? "RUN" : "HALT"
+ );
+}
+
+static char
+*dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
+{
+ char *sig;
+
+ /* signaling state */
+ switch (status & (3 << 10)) {
+ case 0 << 10:
+ sig = "se0";
+ break;
+ case 1 << 10:
+ sig = "k";
+ break; /* low speed */
+ case 2 << 10:
+ sig = "j";
+ break;
+ default:
+ sig = "?";
+ break;
+ }
+
+ scnprintf(buf, len,
+ "%s%sport:%d status %06x %d "
+ "sig=%s%s%s%s%s%s%s%s",
+ label, label[0] ? " " : "", port, status,
+ status>>25,/*device address */
+ sig,
+ (status & PORT_RESET) ? " RESET" : "",
+ (status & PORT_SUSPEND) ? " SUSPEND" : "",
+ (status & PORT_RESUME) ? " RESUME" : "",
+ (status & PORT_PEC) ? " PEC" : "",
+ (status & PORT_PE) ? " PE" : "",
+ (status & PORT_CSC) ? " CSC" : "",
+ (status & PORT_CONNECT) ? " CONNECT" : "");
+ return buf;
+}
+
+/* functions have the "wrong" filename when they're output... */
+#define dbg_status(fotg210, label, status) { \
+ char _buf[80]; \
+ dbg_status_buf(_buf, sizeof(_buf), label, status); \
+ fotg210_dbg(fotg210, "%s\n", _buf); \
+}
+
+#define dbg_cmd(fotg210, label, command) { \
+ char _buf[80]; \
+ dbg_command_buf(_buf, sizeof(_buf), label, command); \
+ fotg210_dbg(fotg210, "%s\n", _buf); \
+}
+
+#define dbg_port(fotg210, label, port, status) { \
+ char _buf[80]; \
+ fotg210_dbg(fotg210, "%s\n", dbg_port_buf(_buf, sizeof(_buf), label, port, status) ); \
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* troubleshooting help: expose state in debugfs */
+
+static int debug_async_open(struct inode *, struct file *);
+static int debug_periodic_open(struct inode *, struct file *);
+static int debug_registers_open(struct inode *, struct file *);
+static int debug_async_open(struct inode *, struct file *);
+
+static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
+static int debug_close(struct inode *, struct file *);
+
+static const struct file_operations debug_async_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_async_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
+static const struct file_operations debug_periodic_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_periodic_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
+static const struct file_operations debug_registers_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_registers_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
+
+static struct dentry *fotg210_debug_root;
+
+struct debug_buffer {
+ ssize_t (*fill_func)(struct debug_buffer *); /* fill method */
+ struct usb_bus *bus;
+ struct mutex mutex; /* protect filling of buffer */
+ size_t count; /* number of characters filled into buffer */
+ char *output_buf;
+ size_t alloc_size;
+};
+
+#define speed_char(info1)({ char tmp; \
+ switch (info1 & (3 << 12)) { \
+ case QH_FULL_SPEED: \
+ tmp = 'f'; break; \
+ case QH_LOW_SPEED: \
+ tmp = 'l'; break; \
+ case QH_HIGH_SPEED: \
+ tmp = 'h'; break; \
+ default: \
+ tmp = '?'; break; \
+ } tmp; })
+
+static inline char token_mark(struct fotg210_hcd *fotg210, __hc32 token)
+{
+ __u32 v = hc32_to_cpu(fotg210, token);
+
+ if (v & QTD_STS_ACTIVE)
+ return '*';
+ if (v & QTD_STS_HALT)
+ return '-';
+ if (!IS_SHORT_READ(v))
+ return ' ';
+ /* tries to advance through hw_alt_next */
+ return '/';
+}
+
+static void qh_lines(
+ struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh,
+ char **nextp,
+ unsigned *sizep
+)
+{
+ u32 scratch;
+ u32 hw_curr;
+ struct fotg210_qtd *td;
+ unsigned temp;
+ unsigned size = *sizep;
+ char *next = *nextp;
+ char mark;
+ __le32 list_end = FOTG210_LIST_END(fotg210);
+ struct fotg210_qh_hw *hw = qh->hw;
+
+ if (hw->hw_qtd_next == list_end) /* NEC does this */
+ mark = '@';
+ else
+ mark = token_mark(fotg210, hw->hw_token);
+ if (mark == '/') { /* qh_alt_next controls qh advance? */
+ if ((hw->hw_alt_next & QTD_MASK(fotg210))
+ == fotg210->async->hw->hw_alt_next)
+ mark = '#'; /* blocked */
+ else if (hw->hw_alt_next == list_end)
+ mark = '.'; /* use hw_qtd_next */
+ /* else alt_next points to some other qtd */
+ }
+ scratch = hc32_to_cpup(fotg210, &hw->hw_info1);
+ hw_curr = (mark == '*') ? hc32_to_cpup(fotg210, &hw->hw_current) : 0;
+ temp = scnprintf(next, size,
+ "qh/%p dev%d %cs ep%d %08x %08x(%08x%c %s nak%d)",
+ qh, scratch & 0x007f,
+ speed_char(scratch),
+ (scratch >> 8) & 0x000f,
+ scratch, hc32_to_cpup(fotg210, &hw->hw_info2),
+ hc32_to_cpup(fotg210, &hw->hw_token), mark,
+ (cpu_to_hc32(fotg210, QTD_TOGGLE) & hw->hw_token)
+ ? "data1" : "data0",
+ (hc32_to_cpup(fotg210, &hw->hw_alt_next) >> 1) & 0x0f);
+ size -= temp;
+ next += temp;
+
+ /* hc may be modifying the list as we read it ... */
+ list_for_each_entry(td, &qh->qtd_list, qtd_list) {
+ scratch = hc32_to_cpup(fotg210, &td->hw_token);
+ mark = ' ';
+ if (hw_curr == td->qtd_dma)
+ mark = '*';
+ else if (hw->hw_qtd_next == cpu_to_hc32(fotg210, td->qtd_dma))
+ mark = '+';
+ else if (QTD_LENGTH(scratch)) {
+ if (td->hw_alt_next == fotg210->async->hw->hw_alt_next)
+ mark = '#';
+ else if (td->hw_alt_next != list_end)
+ mark = '/';
+ }
+ temp = snprintf(next, size,
+ "\n\t%p%c%s len=%d %08x urb %p",
+ td, mark, ({ char *tmp;
+ switch ((scratch>>8)&0x03) {
+ case 0:
+ tmp = "out";
+ break;
+ case 1:
+ tmp = "in";
+ break;
+ case 2:
+ tmp = "setup";
+ break;
+ default:
+ tmp = "?";
+ break;
+ } tmp; }),
+ (scratch >> 16) & 0x7fff,
+ scratch,
+ td->urb);
+ if (size < temp)
+ temp = size;
+ size -= temp;
+ next += temp;
+ if (temp == size)
+ goto done;
+ }
+
+ temp = snprintf(next, size, "\n");
+ if (size < temp)
+ temp = size;
+ size -= temp;
+ next += temp;
+
+done:
+ *sizep = size;
+ *nextp = next;
+}
+
+static ssize_t fill_async_buffer(struct debug_buffer *buf)
+{
+ struct usb_hcd *hcd;
+ struct fotg210_hcd *fotg210;
+ unsigned long flags;
+ unsigned temp, size;
+ char *next;
+ struct fotg210_qh *qh;
+
+ hcd = bus_to_hcd(buf->bus);
+ fotg210 = hcd_to_fotg210(hcd);
+ next = buf->output_buf;
+ size = buf->alloc_size;
+
+ *next = 0;
+
+ /* dumps a snapshot of the async schedule.
+ * usually empty except for long-term bulk reads, or head.
+ * one QH per line, and TDs we know about
+ */
+ spin_lock_irqsave(&fotg210->lock, flags);
+ for (qh = fotg210->async->qh_next.qh; size > 0 && qh;
+ qh = qh->qh_next.qh)
+ qh_lines(fotg210, qh, &next, &size);
+ if (fotg210->async_unlink && size > 0) {
+ temp = scnprintf(next, size, "\nunlink =\n");
+ size -= temp;
+ next += temp;
+
+ for (qh = fotg210->async_unlink; size > 0 && qh;
+ qh = qh->unlink_next)
+ qh_lines(fotg210, qh, &next, &size);
+ }
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+
+ return strlen(buf->output_buf);
+}
+
+#define DBG_SCHED_LIMIT 64
+static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
+{
+ struct usb_hcd *hcd;
+ struct fotg210_hcd *fotg210;
+ unsigned long flags;
+ union fotg210_shadow p, *seen;
+ unsigned temp, size, seen_count;
+ char *next;
+ unsigned i;
+ __hc32 tag;
+
+ seen = kmalloc(DBG_SCHED_LIMIT * sizeof(*seen), GFP_ATOMIC);
+ if (!seen)
+ return 0;
+ seen_count = 0;
+
+ hcd = bus_to_hcd(buf->bus);
+ fotg210 = hcd_to_fotg210(hcd);
+ next = buf->output_buf;
+ size = buf->alloc_size;
+
+ temp = scnprintf(next, size, "size = %d\n", fotg210->periodic_size);
+ size -= temp;
+ next += temp;
+
+ /* dump a snapshot of the periodic schedule.
+ * iso changes, interrupt usually doesn't.
+ */
+ spin_lock_irqsave(&fotg210->lock, flags);
+ for (i = 0; i < fotg210->periodic_size; i++) {
+ p = fotg210->pshadow[i];
+ if (likely(!p.ptr))
+ continue;
+ tag = Q_NEXT_TYPE(fotg210, fotg210->periodic[i]);
+
+ temp = scnprintf(next, size, "%4d: ", i);
+ size -= temp;
+ next += temp;
+
+ do {
+ struct fotg210_qh_hw *hw;
+
+ switch (hc32_to_cpu(fotg210, tag)) {
+ case Q_TYPE_QH:
+ hw = p.qh->hw;
+ temp = scnprintf(next, size, " qh%d-%04x/%p",
+ p.qh->period,
+ hc32_to_cpup(fotg210,
+ &hw->hw_info2)
+ /* uframe masks */
+ & (QH_CMASK | QH_SMASK),
+ p.qh);
+ size -= temp;
+ next += temp;
+ /* don't repeat what follows this qh */
+ for (temp = 0; temp < seen_count; temp++) {
+ if (seen[temp].ptr != p.ptr)
+ continue;
+ if (p.qh->qh_next.ptr) {
+ temp = scnprintf(next, size,
+ " ...");
+ size -= temp;
+ next += temp;
+ }
+ break;
+ }
+ /* show more info the first time around */
+ if (temp == seen_count) {
+ u32 scratch = hc32_to_cpup(fotg210,
+ &hw->hw_info1);
+ struct fotg210_qtd *qtd;
+ char *type = "";
+
+ /* count tds, get ep direction */
+ temp = 0;
+ list_for_each_entry(qtd,
+ &p.qh->qtd_list,
+ qtd_list) {
+ temp++;
+ switch (0x03 & (hc32_to_cpu(
+ fotg210,
+ qtd->hw_token) >> 8)) {
+ case 0:
+ type = "out";
+ continue;
+ case 1:
+ type = "in";
+ continue;
+ }
+ }
+
+ temp = scnprintf(next, size,
+ "(%c%d ep%d%s "
+ "[%d/%d] q%d p%d)",
+ speed_char(scratch),
+ scratch & 0x007f,
+ (scratch >> 8) & 0x000f, type,
+ p.qh->usecs, p.qh->c_usecs,
+ temp,
+ 0x7ff & (scratch >> 16));
+
+ if (seen_count < DBG_SCHED_LIMIT)
+ seen[seen_count++].qh = p.qh;
+ } else
+ temp = 0;
+ tag = Q_NEXT_TYPE(fotg210, hw->hw_next);
+ p = p.qh->qh_next;
+ break;
+ case Q_TYPE_FSTN:
+ temp = scnprintf(next, size,
+ " fstn-%8x/%p", p.fstn->hw_prev,
+ p.fstn);
+ tag = Q_NEXT_TYPE(fotg210, p.fstn->hw_next);
+ p = p.fstn->fstn_next;
+ break;
+ case Q_TYPE_ITD:
+ temp = scnprintf(next, size,
+ " itd/%p", p.itd);
+ tag = Q_NEXT_TYPE(fotg210, p.itd->hw_next);
+ p = p.itd->itd_next;
+ break;
+ }
+ size -= temp;
+ next += temp;
+ } while (p.ptr);
+
+ temp = scnprintf(next, size, "\n");
+ size -= temp;
+ next += temp;
+ }
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ kfree(seen);
+
+ return buf->alloc_size - size;
+}
+#undef DBG_SCHED_LIMIT
+
+static const char *rh_state_string(struct fotg210_hcd *fotg210)
+{
+ switch (fotg210->rh_state) {
+ case FOTG210_RH_HALTED:
+ return "halted";
+ case FOTG210_RH_SUSPENDED:
+ return "suspended";
+ case FOTG210_RH_RUNNING:
+ return "running";
+ case FOTG210_RH_STOPPING:
+ return "stopping";
+ }
+ return "?";
+}
+
+static ssize_t fill_registers_buffer(struct debug_buffer *buf)
+{
+ struct usb_hcd *hcd;
+ struct fotg210_hcd *fotg210;
+ unsigned long flags;
+ unsigned temp, size, i;
+ char *next, scratch[80];
+ static const char fmt[] = "%*s\n";
+ static const char label[] = "";
+
+ hcd = bus_to_hcd(buf->bus);
+ fotg210 = hcd_to_fotg210(hcd);
+ next = buf->output_buf;
+ size = buf->alloc_size;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
+ size = scnprintf(next, size,
+ "bus %s, device %s\n"
+ "%s\n"
+ "SUSPENDED(no register access)\n",
+ hcd->self.controller->bus->name,
+ dev_name(hcd->self.controller),
+ hcd->product_desc);
+ goto done;
+ }
+
+ /* Capability Registers */
+ i = HC_VERSION(fotg210, fotg210_readl(fotg210,
+ &fotg210->caps->hc_capbase));
+ temp = scnprintf(next, size,
+ "bus %s, device %s\n"
+ "%s\n"
+ "EHCI %x.%02x, rh state %s\n",
+ hcd->self.controller->bus->name,
+ dev_name(hcd->self.controller),
+ hcd->product_desc,
+ i >> 8, i & 0x0ff, rh_state_string(fotg210));
+ size -= temp;
+ next += temp;
+
+ /* FIXME interpret both types of params */
+ i = fotg210_readl(fotg210, &fotg210->caps->hcs_params);
+ temp = scnprintf(next, size, "structural params 0x%08x\n", i);
+ size -= temp;
+ next += temp;
+
+ i = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
+ temp = scnprintf(next, size, "capability params 0x%08x\n", i);
+ size -= temp;
+ next += temp;
+
+ /* Operational Registers */
+ temp = dbg_status_buf(scratch, sizeof(scratch), label,
+ fotg210_readl(fotg210, &fotg210->regs->status));
+ temp = scnprintf(next, size, fmt, temp, scratch);
+ size -= temp;
+ next += temp;
+
+ temp = dbg_command_buf(scratch, sizeof(scratch), label,
+ fotg210_readl(fotg210, &fotg210->regs->command));
+ temp = scnprintf(next, size, fmt, temp, scratch);
+ size -= temp;
+ next += temp;
+
+ temp = dbg_intr_buf(scratch, sizeof(scratch), label,
+ fotg210_readl(fotg210, &fotg210->regs->intr_enable));
+ temp = scnprintf(next, size, fmt, temp, scratch);
+ size -= temp;
+ next += temp;
+
+ temp = scnprintf(next, size, "uframe %04x\n",
+ fotg210_read_frame_index(fotg210));
+ size -= temp;
+ next += temp;
+
+ if (fotg210->async_unlink) {
+ temp = scnprintf(next, size, "async unlink qh %p\n",
+ fotg210->async_unlink);
+ size -= temp;
+ next += temp;
+ }
+
+#ifdef FOTG210_STATS
+ temp = scnprintf(next, size,
+ "irq normal %ld err %ld iaa %ld(lost %ld)\n",
+ fotg210->stats.normal, fotg210->stats.error, fotg210->stats.iaa,
+ fotg210->stats.lost_iaa);
+ size -= temp;
+ next += temp;
+
+ temp = scnprintf(next, size, "complete %ld unlink %ld\n",
+ fotg210->stats.complete, fotg210->stats.unlink);
+ size -= temp;
+ next += temp;
+#endif
+
+done:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+
+ return buf->alloc_size - size;
+}
+
+static struct debug_buffer *alloc_buffer(struct usb_bus *bus,
+ ssize_t (*fill_func)(struct debug_buffer *))
+{
+ struct debug_buffer *buf;
+
+ buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
+
+ if (buf) {
+ buf->bus = bus;
+ buf->fill_func = fill_func;
+ mutex_init(&buf->mutex);
+ buf->alloc_size = PAGE_SIZE;
+ }
+
+ return buf;
+}
+
+static int fill_buffer(struct debug_buffer *buf)
+{
+ int ret = 0;
+
+ if (!buf->output_buf)
+ buf->output_buf = vmalloc(buf->alloc_size);
+
+ if (!buf->output_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = buf->fill_func(buf);
+
+ if (ret >= 0) {
+ buf->count = ret;
+ ret = 0;
+ }
+
+out:
+ return ret;
+}
+
+static ssize_t debug_output(struct file *file, char __user *user_buf,
+ size_t len, loff_t *offset)
+{
+ struct debug_buffer *buf = file->private_data;
+ int ret = 0;
+
+ mutex_lock(&buf->mutex);
+ if (buf->count == 0) {
+ ret = fill_buffer(buf);
+ if (ret != 0) {
+ mutex_unlock(&buf->mutex);
+ goto out;
+ }
+ }
+ mutex_unlock(&buf->mutex);
+
+ ret = simple_read_from_buffer(user_buf, len, offset,
+ buf->output_buf, buf->count);
+
+out:
+ return ret;
+
+}
+
+static int debug_close(struct inode *inode, struct file *file)
+{
+ struct debug_buffer *buf = file->private_data;
+
+ if (buf) {
+ vfree(buf->output_buf);
+ kfree(buf);
+ }
+
+ return 0;
+}
+static int debug_async_open(struct inode *inode, struct file *file)
+{
+ file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
+
+ return file->private_data ? 0 : -ENOMEM;
+}
+
+static int debug_periodic_open(struct inode *inode, struct file *file)
+{
+ struct debug_buffer *buf;
+ buf = alloc_buffer(inode->i_private, fill_periodic_buffer);
+ if (!buf)
+ return -ENOMEM;
+
+ buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE;
+ file->private_data = buf;
+ return 0;
+}
+
+static int debug_registers_open(struct inode *inode, struct file *file)
+{
+ file->private_data = alloc_buffer(inode->i_private,
+ fill_registers_buffer);
+
+ return file->private_data ? 0 : -ENOMEM;
+}
+
+static inline void create_debug_files(struct fotg210_hcd *fotg210)
+{
+ struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self;
+
+ fotg210->debug_dir = debugfs_create_dir(bus->bus_name,
+ fotg210_debug_root);
+ if (!fotg210->debug_dir)
+ return;
+
+ if (!debugfs_create_file("async", S_IRUGO, fotg210->debug_dir, bus,
+ &debug_async_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("periodic", S_IRUGO, fotg210->debug_dir, bus,
+ &debug_periodic_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("registers", S_IRUGO, fotg210->debug_dir, bus,
+ &debug_registers_fops))
+ goto file_error;
+
+ return;
+
+file_error:
+ debugfs_remove_recursive(fotg210->debug_dir);
+}
+
+static inline void remove_debug_files(struct fotg210_hcd *fotg210)
+{
+ debugfs_remove_recursive(fotg210->debug_dir);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * handshake - spin reading hc until handshake completes or fails
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @usec: timeout in microseconds
+ *
+ * Returns negative errno, or zero on success
+ *
+ * Success happens when the "mask" bits have the specified value (hardware
+ * handshake done). There are two failure modes: "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ *
+ * That last failure should_only happen in cases like physical cardbus eject
+ * before driver shutdown. But it also seems to be caused by bugs in cardbus
+ * bridge shutdown: shutting down the bridge before the devices using it.
+ */
+static int handshake(struct fotg210_hcd *fotg210, void __iomem *ptr,
+ u32 mask, u32 done, int usec)
+{
+ u32 result;
+
+ do {
+ result = fotg210_readl(fotg210, ptr);
+ if (result == ~(u32)0) /* card removed */
+ return -ENODEV;
+ result &= mask;
+ if (result == done)
+ return 0;
+ udelay(1);
+ usec--;
+ } while (usec > 0);
+ return -ETIMEDOUT;
+}
+
+/*
+ * Force HC to halt state from unknown (EHCI spec section 2.3).
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static int fotg210_halt(struct fotg210_hcd *fotg210)
+{
+ u32 temp;
+
+ spin_lock_irq(&fotg210->lock);
+
+ /* disable any irqs left enabled by previous code */
+ fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
+
+ /*
+ * This routine gets called during probe before fotg210->command
+ * has been initialized, so we can't rely on its value.
+ */
+ fotg210->command &= ~CMD_RUN;
+ temp = fotg210_readl(fotg210, &fotg210->regs->command);
+ temp &= ~(CMD_RUN | CMD_IAAD);
+ fotg210_writel(fotg210, temp, &fotg210->regs->command);
+
+ spin_unlock_irq(&fotg210->lock);
+ synchronize_irq(fotg210_to_hcd(fotg210)->irq);
+
+ return handshake(fotg210, &fotg210->regs->status,
+ STS_HALT, STS_HALT, 16 * 125);
+}
+
+/*
+ * Reset a non-running (STS_HALT == 1) controller.
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static int fotg210_reset(struct fotg210_hcd *fotg210)
+{
+ int retval;
+ u32 command = fotg210_readl(fotg210, &fotg210->regs->command);
+
+ /* If the EHCI debug controller is active, special care must be
+ * taken before and after a host controller reset */
+ if (fotg210->debug && !dbgp_reset_prep(fotg210_to_hcd(fotg210)))
+ fotg210->debug = NULL;
+
+ command |= CMD_RESET;
+ dbg_cmd(fotg210, "reset", command);
+ fotg210_writel(fotg210, command, &fotg210->regs->command);
+ fotg210->rh_state = FOTG210_RH_HALTED;
+ fotg210->next_statechange = jiffies;
+ retval = handshake(fotg210, &fotg210->regs->command,
+ CMD_RESET, 0, 250 * 1000);
+
+ if (retval)
+ return retval;
+
+ if (fotg210->debug)
+ dbgp_external_startup(fotg210_to_hcd(fotg210));
+
+ fotg210->port_c_suspend = fotg210->suspended_ports =
+ fotg210->resuming_ports = 0;
+ return retval;
+}
+
+/*
+ * Idle the controller (turn off the schedules).
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static void fotg210_quiesce(struct fotg210_hcd *fotg210)
+{
+ u32 temp;
+
+ if (fotg210->rh_state != FOTG210_RH_RUNNING)
+ return;
+
+ /* wait for any schedule enables/disables to take effect */
+ temp = (fotg210->command << 10) & (STS_ASS | STS_PSS);
+ handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, temp,
+ 16 * 125);
+
+ /* then disable anything that's still active */
+ spin_lock_irq(&fotg210->lock);
+ fotg210->command &= ~(CMD_ASE | CMD_PSE);
+ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
+ spin_unlock_irq(&fotg210->lock);
+
+ /* hardware can take 16 microframes to turn off ... */
+ handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, 0,
+ 16 * 125);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void end_unlink_async(struct fotg210_hcd *fotg210);
+static void unlink_empty_async(struct fotg210_hcd *fotg210);
+static void fotg210_work(struct fotg210_hcd *fotg210);
+static void start_unlink_intr(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh);
+static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
+
+/*-------------------------------------------------------------------------*/
+
+/* Set a bit in the USBCMD register */
+static void fotg210_set_command_bit(struct fotg210_hcd *fotg210, u32 bit)
+{
+ fotg210->command |= bit;
+ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
+
+ /* unblock posted write */
+ fotg210_readl(fotg210, &fotg210->regs->command);
+}
+
+/* Clear a bit in the USBCMD register */
+static void fotg210_clear_command_bit(struct fotg210_hcd *fotg210, u32 bit)
+{
+ fotg210->command &= ~bit;
+ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
+
+ /* unblock posted write */
+ fotg210_readl(fotg210, &fotg210->regs->command);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI timer support... Now using hrtimers.
+ *
+ * Lots of different events are triggered from fotg210->hrtimer. Whenever
+ * the timer routine runs, it checks each possible event; events that are
+ * currently enabled and whose expiration time has passed get handled.
+ * The set of enabled events is stored as a collection of bitflags in
+ * fotg210->enabled_hrtimer_events, and they are numbered in order of
+ * increasing delay values (ranging between 1 ms and 100 ms).
+ *
+ * Rather than implementing a sorted list or tree of all pending events,
+ * we keep track only of the lowest-numbered pending event, in
+ * fotg210->next_hrtimer_event. Whenever fotg210->hrtimer gets restarted, its
+ * expiration time is set to the timeout value for this event.
+ *
+ * As a result, events might not get handled right away; the actual delay
+ * could be anywhere up to twice the requested delay. This doesn't
+ * matter, because none of the events are especially time-critical. The
+ * ones that matter most all have a delay of 1 ms, so they will be
+ * handled after 2 ms at most, which is okay. In addition to this, we
+ * allow for an expiration range of 1 ms.
+ */
+
+/*
+ * Delay lengths for the hrtimer event types.
+ * Keep this list sorted by delay length, in the same order as
+ * the event types indexed by enum fotg210_hrtimer_event in fotg210.h.
+ */
+static unsigned event_delays_ns[] = {
+ 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_ASS */
+ 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_PSS */
+ 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_DEAD */
+ 1125 * NSEC_PER_USEC, /* FOTG210_HRTIMER_UNLINK_INTR */
+ 2 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_FREE_ITDS */
+ 6 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_ASYNC_UNLINKS */
+ 10 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_IAA_WATCHDOG */
+ 10 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_DISABLE_PERIODIC */
+ 15 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_DISABLE_ASYNC */
+ 100 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_IO_WATCHDOG */
+};
+
+/* Enable a pending hrtimer event */
+static void fotg210_enable_event(struct fotg210_hcd *fotg210, unsigned event,
+ bool resched)
+{
+ ktime_t *timeout = &fotg210->hr_timeouts[event];
+
+ if (resched)
+ *timeout = ktime_add(ktime_get(),
+ ktime_set(0, event_delays_ns[event]));
+ fotg210->enabled_hrtimer_events |= (1 << event);
+
+ /* Track only the lowest-numbered pending event */
+ if (event < fotg210->next_hrtimer_event) {
+ fotg210->next_hrtimer_event = event;
+ hrtimer_start_range_ns(&fotg210->hrtimer, *timeout,
+ NSEC_PER_MSEC, HRTIMER_MODE_ABS);
+ }
+}
+
+
+/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
+static void fotg210_poll_ASS(struct fotg210_hcd *fotg210)
+{
+ unsigned actual, want;
+
+ /* Don't enable anything if the controller isn't running (e.g., died) */
+ if (fotg210->rh_state != FOTG210_RH_RUNNING)
+ return;
+
+ want = (fotg210->command & CMD_ASE) ? STS_ASS : 0;
+ actual = fotg210_readl(fotg210, &fotg210->regs->status) & STS_ASS;
+
+ if (want != actual) {
+
+ /* Poll again later, but give up after about 20 ms */
+ if (fotg210->ASS_poll_count++ < 20) {
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_ASS,
+ true);
+ return;
+ }
+ fotg210_dbg(fotg210, "Waited too long for the async schedule status (%x/%x), giving up\n",
+ want, actual);
+ }
+ fotg210->ASS_poll_count = 0;
+
+ /* The status is up-to-date; restart or stop the schedule as needed */
+ if (want == 0) { /* Stopped */
+ if (fotg210->async_count > 0)
+ fotg210_set_command_bit(fotg210, CMD_ASE);
+
+ } else { /* Running */
+ if (fotg210->async_count == 0) {
+
+ /* Turn off the schedule after a while */
+ fotg210_enable_event(fotg210,
+ FOTG210_HRTIMER_DISABLE_ASYNC,
+ true);
+ }
+ }
+}
+
+/* Turn off the async schedule after a brief delay */
+static void fotg210_disable_ASE(struct fotg210_hcd *fotg210)
+{
+ fotg210_clear_command_bit(fotg210, CMD_ASE);
+}
+
+
+/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
+static void fotg210_poll_PSS(struct fotg210_hcd *fotg210)
+{
+ unsigned actual, want;
+
+ /* Don't do anything if the controller isn't running (e.g., died) */
+ if (fotg210->rh_state != FOTG210_RH_RUNNING)
+ return;
+
+ want = (fotg210->command & CMD_PSE) ? STS_PSS : 0;
+ actual = fotg210_readl(fotg210, &fotg210->regs->status) & STS_PSS;
+
+ if (want != actual) {
+
+ /* Poll again later, but give up after about 20 ms */
+ if (fotg210->PSS_poll_count++ < 20) {
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_PSS,
+ true);
+ return;
+ }
+ fotg210_dbg(fotg210, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
+ want, actual);
+ }
+ fotg210->PSS_poll_count = 0;
+
+ /* The status is up-to-date; restart or stop the schedule as needed */
+ if (want == 0) { /* Stopped */
+ if (fotg210->periodic_count > 0)
+ fotg210_set_command_bit(fotg210, CMD_PSE);
+
+ } else { /* Running */
+ if (fotg210->periodic_count == 0) {
+
+ /* Turn off the schedule after a while */
+ fotg210_enable_event(fotg210,
+ FOTG210_HRTIMER_DISABLE_PERIODIC,
+ true);
+ }
+ }
+}
+
+/* Turn off the periodic schedule after a brief delay */
+static void fotg210_disable_PSE(struct fotg210_hcd *fotg210)
+{
+ fotg210_clear_command_bit(fotg210, CMD_PSE);
+}
+
+
+/* Poll the STS_HALT status bit; see when a dead controller stops */
+static void fotg210_handle_controller_death(struct fotg210_hcd *fotg210)
+{
+ if (!(fotg210_readl(fotg210, &fotg210->regs->status) & STS_HALT)) {
+
+ /* Give up after a few milliseconds */
+ if (fotg210->died_poll_count++ < 5) {
+ /* Try again later */
+ fotg210_enable_event(fotg210,
+ FOTG210_HRTIMER_POLL_DEAD, true);
+ return;
+ }
+ fotg210_warn(fotg210, "Waited too long for the controller to stop, giving up\n");
+ }
+
+ /* Clean up the mess */
+ fotg210->rh_state = FOTG210_RH_HALTED;
+ fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
+ fotg210_work(fotg210);
+ end_unlink_async(fotg210);
+
+ /* Not in process context, so don't try to reset the controller */
+}
+
+
+/* Handle unlinked interrupt QHs once they are gone from the hardware */
+static void fotg210_handle_intr_unlinks(struct fotg210_hcd *fotg210)
+{
+ bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING);
+
+ /*
+ * Process all the QHs on the intr_unlink list that were added
+ * before the current unlink cycle began. The list is in
+ * temporal order, so stop when we reach the first entry in the
+ * current cycle. But if the root hub isn't running then
+ * process all the QHs on the list.
+ */
+ fotg210->intr_unlinking = true;
+ while (fotg210->intr_unlink) {
+ struct fotg210_qh *qh = fotg210->intr_unlink;
+
+ if (!stopped && qh->unlink_cycle == fotg210->intr_unlink_cycle)
+ break;
+ fotg210->intr_unlink = qh->unlink_next;
+ qh->unlink_next = NULL;
+ end_unlink_intr(fotg210, qh);
+ }
+
+ /* Handle remaining entries later */
+ if (fotg210->intr_unlink) {
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR,
+ true);
+ ++fotg210->intr_unlink_cycle;
+ }
+ fotg210->intr_unlinking = false;
+}
+
+
+/* Start another free-iTDs/siTDs cycle */
+static void start_free_itds(struct fotg210_hcd *fotg210)
+{
+ if (!(fotg210->enabled_hrtimer_events &
+ BIT(FOTG210_HRTIMER_FREE_ITDS))) {
+ fotg210->last_itd_to_free = list_entry(
+ fotg210->cached_itd_list.prev,
+ struct fotg210_itd, itd_list);
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_FREE_ITDS, true);
+ }
+}
+
+/* Wait for controller to stop using old iTDs and siTDs */
+static void end_free_itds(struct fotg210_hcd *fotg210)
+{
+ struct fotg210_itd *itd, *n;
+
+ if (fotg210->rh_state < FOTG210_RH_RUNNING)
+ fotg210->last_itd_to_free = NULL;
+
+ list_for_each_entry_safe(itd, n, &fotg210->cached_itd_list, itd_list) {
+ list_del(&itd->itd_list);
+ dma_pool_free(fotg210->itd_pool, itd, itd->itd_dma);
+ if (itd == fotg210->last_itd_to_free)
+ break;
+ }
+
+ if (!list_empty(&fotg210->cached_itd_list))
+ start_free_itds(fotg210);
+}
+
+
+/* Handle lost (or very late) IAA interrupts */
+static void fotg210_iaa_watchdog(struct fotg210_hcd *fotg210)
+{
+ if (fotg210->rh_state != FOTG210_RH_RUNNING)
+ return;
+
+ /*
+ * Lost IAA irqs wedge things badly; seen first with a vt8235.
+ * So we need this watchdog, but must protect it against both
+ * (a) SMP races against real IAA firing and retriggering, and
+ * (b) clean HC shutdown, when IAA watchdog was pending.
+ */
+ if (fotg210->async_iaa) {
+ u32 cmd, status;
+
+ /* If we get here, IAA is *REALLY* late. It's barely
+ * conceivable that the system is so busy that CMD_IAAD
+ * is still legitimately set, so let's be sure it's
+ * clear before we read STS_IAA. (The HC should clear
+ * CMD_IAAD when it sets STS_IAA.)
+ */
+ cmd = fotg210_readl(fotg210, &fotg210->regs->command);
+
+ /*
+ * If IAA is set here it either legitimately triggered
+ * after the watchdog timer expired (_way_ late, so we'll
+ * still count it as lost) ... or a silicon erratum:
+ * - VIA seems to set IAA without triggering the IRQ;
+ * - IAAD potentially cleared without setting IAA.
+ */
+ status = fotg210_readl(fotg210, &fotg210->regs->status);
+ if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
+ COUNT(fotg210->stats.lost_iaa);
+ fotg210_writel(fotg210, STS_IAA,
+ &fotg210->regs->status);
+ }
+
+ fotg210_dbg(fotg210, "IAA watchdog: status %x cmd %x\n",
+ status, cmd);
+ end_unlink_async(fotg210);
+ }
+}
+
+
+/* Enable the I/O watchdog, if appropriate */
+static void turn_on_io_watchdog(struct fotg210_hcd *fotg210)
+{
+ /* Not needed if the controller isn't running or it's already enabled */
+ if (fotg210->rh_state != FOTG210_RH_RUNNING ||
+ (fotg210->enabled_hrtimer_events &
+ BIT(FOTG210_HRTIMER_IO_WATCHDOG)))
+ return;
+
+ /*
+ * Isochronous transfers always need the watchdog.
+ * For other sorts we use it only if the flag is set.
+ */
+ if (fotg210->isoc_count > 0 || (fotg210->need_io_watchdog &&
+ fotg210->async_count + fotg210->intr_count > 0))
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_IO_WATCHDOG,
+ true);
+}
+
+
+/*
+ * Handler functions for the hrtimer event types.
+ * Keep this array in the same order as the event types indexed by
+ * enum fotg210_hrtimer_event in fotg210.h.
+ */
+static void (*event_handlers[])(struct fotg210_hcd *) = {
+ fotg210_poll_ASS, /* FOTG210_HRTIMER_POLL_ASS */
+ fotg210_poll_PSS, /* FOTG210_HRTIMER_POLL_PSS */
+ fotg210_handle_controller_death, /* FOTG210_HRTIMER_POLL_DEAD */
+ fotg210_handle_intr_unlinks, /* FOTG210_HRTIMER_UNLINK_INTR */
+ end_free_itds, /* FOTG210_HRTIMER_FREE_ITDS */
+ unlink_empty_async, /* FOTG210_HRTIMER_ASYNC_UNLINKS */
+ fotg210_iaa_watchdog, /* FOTG210_HRTIMER_IAA_WATCHDOG */
+ fotg210_disable_PSE, /* FOTG210_HRTIMER_DISABLE_PERIODIC */
+ fotg210_disable_ASE, /* FOTG210_HRTIMER_DISABLE_ASYNC */
+ fotg210_work, /* FOTG210_HRTIMER_IO_WATCHDOG */
+};
+
+static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t)
+{
+ struct fotg210_hcd *fotg210 =
+ container_of(t, struct fotg210_hcd, hrtimer);
+ ktime_t now;
+ unsigned long events;
+ unsigned long flags;
+ unsigned e;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ events = fotg210->enabled_hrtimer_events;
+ fotg210->enabled_hrtimer_events = 0;
+ fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT;
+
+ /*
+ * Check each pending event. If its time has expired, handle
+ * the event; otherwise re-enable it.
+ */
+ now = ktime_get();
+ for_each_set_bit(e, &events, FOTG210_HRTIMER_NUM_EVENTS) {
+ if (now.tv64 >= fotg210->hr_timeouts[e].tv64)
+ event_handlers[e](fotg210);
+ else
+ fotg210_enable_event(fotg210, e, false);
+ }
+
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return HRTIMER_NORESTART;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define fotg210_bus_suspend NULL
+#define fotg210_bus_resume NULL
+
+/*-------------------------------------------------------------------------*/
+
+static int check_reset_complete(
+ struct fotg210_hcd *fotg210,
+ int index,
+ u32 __iomem *status_reg,
+ int port_status
+) {
+ if (!(port_status & PORT_CONNECT))
+ return port_status;
+
+ /* if reset finished and it's still not enabled -- handoff */
+ if (!(port_status & PORT_PE)) {
+ /* with integrated TT, there's nobody to hand it to! */
+ fotg210_dbg(fotg210,
+ "Failed to enable port %d on root hub TT\n",
+ index+1);
+ return port_status;
+ } else {
+ fotg210_dbg(fotg210, "port %d reset complete, port enabled\n",
+ index + 1);
+ }
+
+ return port_status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+
+/* build "status change" packet (one or two bytes) from HC registers */
+
+static int
+fotg210_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ u32 temp, status;
+ u32 mask;
+ int retval = 1;
+ unsigned long flags;
+
+ /* init status to no-changes */
+ buf[0] = 0;
+
+ /* Inform the core about resumes-in-progress by returning
+ * a non-zero value even if there are no status changes.
+ */
+ status = fotg210->resuming_ports;
+
+ mask = PORT_CSC | PORT_PEC;
+ /* PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND */
+
+ /* no hub change reports (bit 0) for now (power, ...) */
+
+ /* port N changes (bit N)? */
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ temp = fotg210_readl(fotg210, &fotg210->regs->port_status);
+
+ /*
+ * Return status information even for ports with OWNER set.
+ * Otherwise khubd wouldn't see the disconnect event when a
+ * high-speed device is switched over to the companion
+ * controller by the user.
+ */
+
+ if ((temp & mask) != 0 || test_bit(0, &fotg210->port_c_suspend)
+ || (fotg210->reset_done[0] && time_after_eq(
+ jiffies, fotg210->reset_done[0]))) {
+ buf[0] |= 1 << 1;
+ status = STS_PCD;
+ }
+ /* FIXME autosuspend idle root hubs */
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return status ? retval : 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+fotg210_hub_descriptor(
+ struct fotg210_hcd *fotg210,
+ struct usb_hub_descriptor *desc
+) {
+ int ports = HCS_N_PORTS(fotg210->hcs_params);
+ u16 temp;
+
+ desc->bDescriptorType = 0x29;
+ desc->bPwrOn2PwrGood = 10; /* fotg210 1.0, 2.3.9 says 20ms max */
+ desc->bHubContrCurrent = 0;
+
+ desc->bNbrPorts = ports;
+ temp = 1 + (ports / 8);
+ desc->bDescLength = 7 + 2 * temp;
+
+ /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+ memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+ memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
+
+ temp = 0x0008; /* per-port overcurrent reporting */
+ temp |= 0x0002; /* no power switching */
+ desc->wHubCharacteristics = cpu_to_le16(temp);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int fotg210_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength
+) {
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ int ports = HCS_N_PORTS(fotg210->hcs_params);
+ u32 __iomem *status_reg = &fotg210->regs->port_status;
+ u32 temp, temp1, status;
+ unsigned long flags;
+ int retval = 0;
+ unsigned selector;
+
+ /*
+ * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
+ * HCS_INDICATOR may say we can change LEDs to off/amber/green.
+ * (track current state ourselves) ... blink for diagnostics,
+ * power, "this is the one", etc. EHCI spec supports this.
+ */
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+ switch (typeReq) {
+ case ClearHubFeature:
+ switch (wValue) {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* no hub-wide feature/status flags */
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case ClearPortFeature:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ temp = fotg210_readl(fotg210, status_reg);
+ temp &= ~PORT_RWC_BITS;
+
+ /*
+ * Even if OWNER is set, so the port is owned by the
+ * companion controller, khubd needs to be able to clear
+ * the port-change status bits (especially
+ * USB_PORT_STAT_C_CONNECTION).
+ */
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ fotg210_writel(fotg210, temp & ~PORT_PE, status_reg);
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ fotg210_writel(fotg210, temp | PORT_PEC, status_reg);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ if (temp & PORT_RESET)
+ goto error;
+ if (!(temp & PORT_SUSPEND))
+ break;
+ if ((temp & PORT_PE) == 0)
+ goto error;
+
+ /* resume signaling for 20 msec */
+ fotg210_writel(fotg210, temp | PORT_RESUME, status_reg);
+ fotg210->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(20);
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ clear_bit(wIndex, &fotg210->port_c_suspend);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ fotg210_writel(fotg210, temp | PORT_CSC, status_reg);
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ fotg210_writel(fotg210, temp | OTGISR_OVC,
+ &fotg210->regs->otgisr);
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ /* GetPortStatus clears reset */
+ break;
+ default:
+ goto error;
+ }
+ fotg210_readl(fotg210, &fotg210->regs->command);
+ break;
+ case GetHubDescriptor:
+ fotg210_hub_descriptor(fotg210, (struct usb_hub_descriptor *)
+ buf);
+ break;
+ case GetHubStatus:
+ /* no hub-wide feature/status flags */
+ memset(buf, 0, 4);
+ /*cpu_to_le32s ((u32 *) buf); */
+ break;
+ case GetPortStatus:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ status = 0;
+ temp = fotg210_readl(fotg210, status_reg);
+
+ /* wPortChange bits */
+ if (temp & PORT_CSC)
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
+ if (temp & PORT_PEC)
+ status |= USB_PORT_STAT_C_ENABLE << 16;
+
+ temp1 = fotg210_readl(fotg210, &fotg210->regs->otgisr);
+ if (temp1 & OTGISR_OVC)
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+
+ /* whoever resumes must GetPortStatus to complete it!! */
+ if (temp & PORT_RESUME) {
+
+ /* Remote Wakeup received? */
+ if (!fotg210->reset_done[wIndex]) {
+ /* resume signaling for 20 msec */
+ fotg210->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(20);
+ /* check the port again */
+ mod_timer(&fotg210_to_hcd(fotg210)->rh_timer,
+ fotg210->reset_done[wIndex]);
+ }
+
+ /* resume completed? */
+ else if (time_after_eq(jiffies,
+ fotg210->reset_done[wIndex])) {
+ clear_bit(wIndex, &fotg210->suspended_ports);
+ set_bit(wIndex, &fotg210->port_c_suspend);
+ fotg210->reset_done[wIndex] = 0;
+
+ /* stop resume signaling */
+ temp = fotg210_readl(fotg210, status_reg);
+ fotg210_writel(fotg210,
+ temp & ~(PORT_RWC_BITS | PORT_RESUME),
+ status_reg);
+ clear_bit(wIndex, &fotg210->resuming_ports);
+ retval = handshake(fotg210, status_reg,
+ PORT_RESUME, 0, 2000 /* 2msec */);
+ if (retval != 0) {
+ fotg210_err(fotg210,
+ "port %d resume error %d\n",
+ wIndex + 1, retval);
+ goto error;
+ }
+ temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
+ }
+ }
+
+ /* whoever resets must GetPortStatus to complete it!! */
+ if ((temp & PORT_RESET)
+ && time_after_eq(jiffies,
+ fotg210->reset_done[wIndex])) {
+ status |= USB_PORT_STAT_C_RESET << 16;
+ fotg210->reset_done[wIndex] = 0;
+ clear_bit(wIndex, &fotg210->resuming_ports);
+
+ /* force reset to complete */
+ fotg210_writel(fotg210,
+ temp & ~(PORT_RWC_BITS | PORT_RESET),
+ status_reg);
+ /* REVISIT: some hardware needs 550+ usec to clear
+ * this bit; seems too long to spin routinely...
+ */
+ retval = handshake(fotg210, status_reg,
+ PORT_RESET, 0, 1000);
+ if (retval != 0) {
+ fotg210_err(fotg210, "port %d reset error %d\n",
+ wIndex + 1, retval);
+ goto error;
+ }
+
+ /* see what we found out */
+ temp = check_reset_complete(fotg210, wIndex, status_reg,
+ fotg210_readl(fotg210, status_reg));
+ }
+
+ if (!(temp & (PORT_RESUME|PORT_RESET))) {
+ fotg210->reset_done[wIndex] = 0;
+ clear_bit(wIndex, &fotg210->resuming_ports);
+ }
+
+ /* transfer dedicated ports to the companion hc */
+ if ((temp & PORT_CONNECT) &&
+ test_bit(wIndex, &fotg210->companion_ports)) {
+ temp &= ~PORT_RWC_BITS;
+ fotg210_writel(fotg210, temp, status_reg);
+ fotg210_dbg(fotg210, "port %d --> companion\n",
+ wIndex + 1);
+ temp = fotg210_readl(fotg210, status_reg);
+ }
+
+ /*
+ * Even if OWNER is set, there's no harm letting khubd
+ * see the wPortStatus values (they should all be 0 except
+ * for PORT_POWER anyway).
+ */
+
+ if (temp & PORT_CONNECT) {
+ status |= USB_PORT_STAT_CONNECTION;
+ status |= fotg210_port_speed(fotg210, temp);
+ }
+ if (temp & PORT_PE)
+ status |= USB_PORT_STAT_ENABLE;
+
+ /* maybe the port was unsuspended without our knowledge */
+ if (temp & (PORT_SUSPEND|PORT_RESUME)) {
+ status |= USB_PORT_STAT_SUSPEND;
+ } else if (test_bit(wIndex, &fotg210->suspended_ports)) {
+ clear_bit(wIndex, &fotg210->suspended_ports);
+ clear_bit(wIndex, &fotg210->resuming_ports);
+ fotg210->reset_done[wIndex] = 0;
+ if (temp & PORT_PE)
+ set_bit(wIndex, &fotg210->port_c_suspend);
+ }
+
+ temp1 = fotg210_readl(fotg210, &fotg210->regs->otgisr);
+ if (temp1 & OTGISR_OVC)
+ status |= USB_PORT_STAT_OVERCURRENT;
+ if (temp & PORT_RESET)
+ status |= USB_PORT_STAT_RESET;
+ if (test_bit(wIndex, &fotg210->port_c_suspend))
+ status |= USB_PORT_STAT_C_SUSPEND << 16;
+
+ if (status & ~0xffff) /* only if wPortChange is interesting */
+ dbg_port(fotg210, "GetStatus", wIndex + 1, temp);
+ put_unaligned_le32(status, buf);
+ break;
+ case SetHubFeature:
+ switch (wValue) {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* no hub-wide feature/status flags */
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case SetPortFeature:
+ selector = wIndex >> 8;
+ wIndex &= 0xff;
+
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ temp = fotg210_readl(fotg210, status_reg);
+ temp &= ~PORT_RWC_BITS;
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ if ((temp & PORT_PE) == 0
+ || (temp & PORT_RESET) != 0)
+ goto error;
+
+ /* After above check the port must be connected.
+ * Set appropriate bit thus could put phy into low power
+ * mode if we have hostpc feature
+ */
+ fotg210_writel(fotg210, temp | PORT_SUSPEND,
+ status_reg);
+ set_bit(wIndex, &fotg210->suspended_ports);
+ break;
+ case USB_PORT_FEAT_RESET:
+ if (temp & PORT_RESUME)
+ goto error;
+ /* line status bits may report this as low speed,
+ * which can be fine if this root hub has a
+ * transaction translator built in.
+ */
+ fotg210_dbg(fotg210, "port %d reset\n", wIndex + 1);
+ temp |= PORT_RESET;
+ temp &= ~PORT_PE;
+
+ /*
+ * caller must wait, then call GetPortStatus
+ * usb 2.0 spec says 50 ms resets on root
+ */
+ fotg210->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(50);
+ fotg210_writel(fotg210, temp, status_reg);
+ break;
+
+ /* For downstream facing ports (these): one hub port is put
+ * into test mode according to USB2 11.24.2.13, then the hub
+ * must be reset (which for root hub now means rmmod+modprobe,
+ * or else system reboot). See EHCI 2.3.9 and 4.14 for info
+ * about the EHCI-specific stuff.
+ */
+ case USB_PORT_FEAT_TEST:
+ if (!selector || selector > 5)
+ goto error;
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ fotg210_quiesce(fotg210);
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ /* Put all enabled ports into suspend */
+ temp = fotg210_readl(fotg210, status_reg) &
+ ~PORT_RWC_BITS;
+ if (temp & PORT_PE)
+ fotg210_writel(fotg210, temp | PORT_SUSPEND,
+ status_reg);
+
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ fotg210_halt(fotg210);
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ temp = fotg210_readl(fotg210, status_reg);
+ temp |= selector << 16;
+ fotg210_writel(fotg210, temp, status_reg);
+ break;
+
+ default:
+ goto error;
+ }
+ fotg210_readl(fotg210, &fotg210->regs->command);
+ break;
+
+ default:
+error:
+ /* "stall" on error */
+ retval = -EPIPE;
+ }
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return retval;
+}
+
+static void __maybe_unused fotg210_relinquish_port(struct usb_hcd *hcd,
+ int portnum)
+{
+ return;
+}
+
+static int __maybe_unused fotg210_port_handed_over(struct usb_hcd *hcd,
+ int portnum)
+{
+ return 0;
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * There's basically three types of memory:
+ * - data used only by the HCD ... kmalloc is fine
+ * - async and periodic schedules, shared by HC and HCD ... these
+ * need to use dma_pool or dma_alloc_coherent
+ * - driver buffers, read/written by HC ... single shot DMA mapped
+ *
+ * There's also "register" data (e.g. PCI or SOC), which is memory mapped.
+ * No memory seen by this driver is pageable.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/* Allocate the key transfer structures from the previously allocated pool */
+
+static inline void fotg210_qtd_init(struct fotg210_hcd *fotg210,
+ struct fotg210_qtd *qtd, dma_addr_t dma)
+{
+ memset(qtd, 0, sizeof(*qtd));
+ qtd->qtd_dma = dma;
+ qtd->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT);
+ qtd->hw_next = FOTG210_LIST_END(fotg210);
+ qtd->hw_alt_next = FOTG210_LIST_END(fotg210);
+ INIT_LIST_HEAD(&qtd->qtd_list);
+}
+
+static struct fotg210_qtd *fotg210_qtd_alloc(struct fotg210_hcd *fotg210,
+ gfp_t flags)
+{
+ struct fotg210_qtd *qtd;
+ dma_addr_t dma;
+
+ qtd = dma_pool_alloc(fotg210->qtd_pool, flags, &dma);
+ if (qtd != NULL)
+ fotg210_qtd_init(fotg210, qtd, dma);
+
+ return qtd;
+}
+
+static inline void fotg210_qtd_free(struct fotg210_hcd *fotg210,
+ struct fotg210_qtd *qtd)
+{
+ dma_pool_free(fotg210->qtd_pool, qtd, qtd->qtd_dma);
+}
+
+
+static void qh_destroy(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ /* clean qtds first, and know this is not linked */
+ if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
+ fotg210_dbg(fotg210, "unused qh not empty!\n");
+ BUG();
+ }
+ if (qh->dummy)
+ fotg210_qtd_free(fotg210, qh->dummy);
+ dma_pool_free(fotg210->qh_pool, qh->hw, qh->qh_dma);
+ kfree(qh);
+}
+
+static struct fotg210_qh *fotg210_qh_alloc(struct fotg210_hcd *fotg210,
+ gfp_t flags)
+{
+ struct fotg210_qh *qh;
+ dma_addr_t dma;
+
+ qh = kzalloc(sizeof(*qh), GFP_ATOMIC);
+ if (!qh)
+ goto done;
+ qh->hw = (struct fotg210_qh_hw *)
+ dma_pool_alloc(fotg210->qh_pool, flags, &dma);
+ if (!qh->hw)
+ goto fail;
+ memset(qh->hw, 0, sizeof(*qh->hw));
+ qh->qh_dma = dma;
+ INIT_LIST_HEAD(&qh->qtd_list);
+
+ /* dummy td enables safe urb queuing */
+ qh->dummy = fotg210_qtd_alloc(fotg210, flags);
+ if (qh->dummy == NULL) {
+ fotg210_dbg(fotg210, "no dummy td\n");
+ goto fail1;
+ }
+done:
+ return qh;
+fail1:
+ dma_pool_free(fotg210->qh_pool, qh->hw, qh->qh_dma);
+fail:
+ kfree(qh);
+ return NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* The queue heads and transfer descriptors are managed from pools tied
+ * to each of the "per device" structures.
+ * This is the initialisation and cleanup code.
+ */
+
+static void fotg210_mem_cleanup(struct fotg210_hcd *fotg210)
+{
+ if (fotg210->async)
+ qh_destroy(fotg210, fotg210->async);
+ fotg210->async = NULL;
+
+ if (fotg210->dummy)
+ qh_destroy(fotg210, fotg210->dummy);
+ fotg210->dummy = NULL;
+
+ /* DMA consistent memory and pools */
+ if (fotg210->qtd_pool)
+ dma_pool_destroy(fotg210->qtd_pool);
+ fotg210->qtd_pool = NULL;
+
+ if (fotg210->qh_pool) {
+ dma_pool_destroy(fotg210->qh_pool);
+ fotg210->qh_pool = NULL;
+ }
+
+ if (fotg210->itd_pool)
+ dma_pool_destroy(fotg210->itd_pool);
+ fotg210->itd_pool = NULL;
+
+ if (fotg210->periodic)
+ dma_free_coherent(fotg210_to_hcd(fotg210)->self.controller,
+ fotg210->periodic_size * sizeof(u32),
+ fotg210->periodic, fotg210->periodic_dma);
+ fotg210->periodic = NULL;
+
+ /* shadow periodic table */
+ kfree(fotg210->pshadow);
+ fotg210->pshadow = NULL;
+}
+
+/* remember to add cleanup code (above) if you add anything here */
+static int fotg210_mem_init(struct fotg210_hcd *fotg210, gfp_t flags)
+{
+ int i;
+
+ /* QTDs for control/bulk/intr transfers */
+ fotg210->qtd_pool = dma_pool_create("fotg210_qtd",
+ fotg210_to_hcd(fotg210)->self.controller,
+ sizeof(struct fotg210_qtd),
+ 32 /* byte alignment (for hw parts) */,
+ 4096 /* can't cross 4K */);
+ if (!fotg210->qtd_pool)
+ goto fail;
+
+ /* QHs for control/bulk/intr transfers */
+ fotg210->qh_pool = dma_pool_create("fotg210_qh",
+ fotg210_to_hcd(fotg210)->self.controller,
+ sizeof(struct fotg210_qh_hw),
+ 32 /* byte alignment (for hw parts) */,
+ 4096 /* can't cross 4K */);
+ if (!fotg210->qh_pool)
+ goto fail;
+
+ fotg210->async = fotg210_qh_alloc(fotg210, flags);
+ if (!fotg210->async)
+ goto fail;
+
+ /* ITD for high speed ISO transfers */
+ fotg210->itd_pool = dma_pool_create("fotg210_itd",
+ fotg210_to_hcd(fotg210)->self.controller,
+ sizeof(struct fotg210_itd),
+ 64 /* byte alignment (for hw parts) */,
+ 4096 /* can't cross 4K */);
+ if (!fotg210->itd_pool)
+ goto fail;
+
+ /* Hardware periodic table */
+ fotg210->periodic = (__le32 *)
+ dma_alloc_coherent(fotg210_to_hcd(fotg210)->self.controller,
+ fotg210->periodic_size * sizeof(__le32),
+ &fotg210->periodic_dma, 0);
+ if (fotg210->periodic == NULL)
+ goto fail;
+
+ for (i = 0; i < fotg210->periodic_size; i++)
+ fotg210->periodic[i] = FOTG210_LIST_END(fotg210);
+
+ /* software shadow of hardware table */
+ fotg210->pshadow = kcalloc(fotg210->periodic_size, sizeof(void *),
+ flags);
+ if (fotg210->pshadow != NULL)
+ return 0;
+
+fail:
+ fotg210_dbg(fotg210, "couldn't init memory\n");
+ fotg210_mem_cleanup(fotg210);
+ return -ENOMEM;
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
+ *
+ * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
+ * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
+ * buffers needed for the larger number). We use one QH per endpoint, queue
+ * multiple urbs (all three types) per endpoint. URBs may need several qtds.
+ *
+ * ISO traffic uses "ISO TD" (itd) records, and (along with
+ * interrupts) needs careful scheduling. Performance improvements can be
+ * an ongoing challenge. That's in "ehci-sched.c".
+ *
+ * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
+ * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
+ * (b) special fields in qh entries or (c) split iso entries. TTs will
+ * buffer low/full speed data so the host collects it at high speed.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/* fill a qtd, returning how much of the buffer we were able to queue up */
+
+static int
+qtd_fill(struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd, dma_addr_t buf,
+ size_t len, int token, int maxpacket)
+{
+ int i, count;
+ u64 addr = buf;
+
+ /* one buffer entry per 4K ... first might be short or unaligned */
+ qtd->hw_buf[0] = cpu_to_hc32(fotg210, (u32)addr);
+ qtd->hw_buf_hi[0] = cpu_to_hc32(fotg210, (u32)(addr >> 32));
+ count = 0x1000 - (buf & 0x0fff); /* rest of that page */
+ if (likely(len < count)) /* ... iff needed */
+ count = len;
+ else {
+ buf += 0x1000;
+ buf &= ~0x0fff;
+
+ /* per-qtd limit: from 16K to 20K (best alignment) */
+ for (i = 1; count < len && i < 5; i++) {
+ addr = buf;
+ qtd->hw_buf[i] = cpu_to_hc32(fotg210, (u32)addr);
+ qtd->hw_buf_hi[i] = cpu_to_hc32(fotg210,
+ (u32)(addr >> 32));
+ buf += 0x1000;
+ if ((count + 0x1000) < len)
+ count += 0x1000;
+ else
+ count = len;
+ }
+
+ /* short packets may only terminate transfers */
+ if (count != len)
+ count -= (count % maxpacket);
+ }
+ qtd->hw_token = cpu_to_hc32(fotg210, (count << 16) | token);
+ qtd->length = count;
+
+ return count;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline void
+qh_update(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
+ struct fotg210_qtd *qtd)
+{
+ struct fotg210_qh_hw *hw = qh->hw;
+
+ /* writes to an active overlay are unsafe */
+ BUG_ON(qh->qh_state != QH_STATE_IDLE);
+
+ hw->hw_qtd_next = QTD_NEXT(fotg210, qtd->qtd_dma);
+ hw->hw_alt_next = FOTG210_LIST_END(fotg210);
+
+ /* Except for control endpoints, we make hardware maintain data
+ * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
+ * and set the pseudo-toggle in udev. Only usb_clear_halt() will
+ * ever clear it.
+ */
+ if (!(hw->hw_info1 & cpu_to_hc32(fotg210, QH_TOGGLE_CTL))) {
+ unsigned is_out, epnum;
+
+ is_out = qh->is_out;
+ epnum = (hc32_to_cpup(fotg210, &hw->hw_info1) >> 8) & 0x0f;
+ if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
+ hw->hw_token &= ~cpu_to_hc32(fotg210, QTD_TOGGLE);
+ usb_settoggle(qh->dev, epnum, is_out, 1);
+ }
+ }
+
+ hw->hw_token &= cpu_to_hc32(fotg210, QTD_TOGGLE | QTD_STS_PING);
+}
+
+/* if it weren't for a common silicon quirk (writing the dummy into the qh
+ * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
+ * recovery (including urb dequeue) would need software changes to a QH...
+ */
+static void
+qh_refresh(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ struct fotg210_qtd *qtd;
+
+ if (list_empty(&qh->qtd_list))
+ qtd = qh->dummy;
+ else {
+ qtd = list_entry(qh->qtd_list.next,
+ struct fotg210_qtd, qtd_list);
+ /*
+ * first qtd may already be partially processed.
+ * If we come here during unlink, the QH overlay region
+ * might have reference to the just unlinked qtd. The
+ * qtd is updated in qh_completions(). Update the QH
+ * overlay here.
+ */
+ if (cpu_to_hc32(fotg210, qtd->qtd_dma) == qh->hw->hw_current) {
+ qh->hw->hw_qtd_next = qtd->hw_next;
+ qtd = NULL;
+ }
+ }
+
+ if (qtd)
+ qh_update(fotg210, qh, qtd);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
+
+static void fotg210_clear_tt_buffer_complete(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct fotg210_qh *qh = ep->hcpriv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+ qh->clearing_tt = 0;
+ if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
+ && fotg210->rh_state == FOTG210_RH_RUNNING)
+ qh_link_async(fotg210, qh);
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+}
+
+static void fotg210_clear_tt_buffer(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh,
+ struct urb *urb, u32 token)
+{
+
+ /* If an async split transaction gets an error or is unlinked,
+ * the TT buffer may be left in an indeterminate state. We
+ * have to clear the TT buffer.
+ *
+ * Note: this routine is never called for Isochronous transfers.
+ */
+ if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
+ struct usb_device *tt = urb->dev->tt->hub;
+ dev_dbg(&tt->dev,
+ "clear tt buffer port %d, a%d ep%d t%08x\n",
+ urb->dev->ttport, urb->dev->devnum,
+ usb_pipeendpoint(urb->pipe), token);
+
+ if (urb->dev->tt->hub !=
+ fotg210_to_hcd(fotg210)->self.root_hub) {
+ if (usb_hub_clear_tt_buffer(urb) == 0)
+ qh->clearing_tt = 1;
+ }
+ }
+}
+
+static int qtd_copy_status(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ size_t length,
+ u32 token
+)
+{
+ int status = -EINPROGRESS;
+
+ /* count IN/OUT bytes, not SETUP (even short packets) */
+ if (likely(QTD_PID(token) != 2))
+ urb->actual_length += length - QTD_LENGTH(token);
+
+ /* don't modify error codes */
+ if (unlikely(urb->unlinked))
+ return status;
+
+ /* force cleanup after short read; not always an error */
+ if (unlikely(IS_SHORT_READ(token)))
+ status = -EREMOTEIO;
+
+ /* serious "can't proceed" faults reported by the hardware */
+ if (token & QTD_STS_HALT) {
+ if (token & QTD_STS_BABBLE) {
+ /* FIXME "must" disable babbling device's port too */
+ status = -EOVERFLOW;
+ /* CERR nonzero + halt --> stall */
+ } else if (QTD_CERR(token)) {
+ status = -EPIPE;
+
+ /* In theory, more than one of the following bits can be set
+ * since they are sticky and the transaction is retried.
+ * Which to test first is rather arbitrary.
+ */
+ } else if (token & QTD_STS_MMF) {
+ /* fs/ls interrupt xfer missed the complete-split */
+ status = -EPROTO;
+ } else if (token & QTD_STS_DBE) {
+ status = (QTD_PID(token) == 1) /* IN ? */
+ ? -ENOSR /* hc couldn't read data */
+ : -ECOMM; /* hc couldn't write data */
+ } else if (token & QTD_STS_XACT) {
+ /* timeout, bad CRC, wrong PID, etc */
+ fotg210_dbg(fotg210, "devpath %s ep%d%s 3strikes\n",
+ urb->dev->devpath,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out");
+ status = -EPROTO;
+ } else { /* unknown */
+ status = -EPROTO;
+ }
+
+ fotg210_dbg(fotg210,
+ "dev%d ep%d%s qtd token %08x --> status %d\n",
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ token, status);
+ }
+
+ return status;
+}
+
+static void
+fotg210_urb_done(struct fotg210_hcd *fotg210, struct urb *urb, int status)
+__releases(fotg210->lock)
+__acquires(fotg210->lock)
+{
+ if (likely(urb->hcpriv != NULL)) {
+ struct fotg210_qh *qh = (struct fotg210_qh *) urb->hcpriv;
+
+ /* S-mask in a QH means it's an interrupt urb */
+ if ((qh->hw->hw_info2 & cpu_to_hc32(fotg210, QH_SMASK)) != 0) {
+
+ /* ... update hc-wide periodic stats (for usbfs) */
+ fotg210_to_hcd(fotg210)->self.bandwidth_int_reqs--;
+ }
+ }
+
+ if (unlikely(urb->unlinked)) {
+ COUNT(fotg210->stats.unlink);
+ } else {
+ /* report non-error and short read status as zero */
+ if (status == -EINPROGRESS || status == -EREMOTEIO)
+ status = 0;
+ COUNT(fotg210->stats.complete);
+ }
+
+#ifdef FOTG210_URB_TRACE
+ fotg210_dbg(fotg210,
+ "%s %s urb %p ep%d%s status %d len %d/%d\n",
+ __func__, urb->dev->devpath, urb,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ status,
+ urb->actual_length, urb->transfer_buffer_length);
+#endif
+
+ /* complete() can reenter this HCD */
+ usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
+ spin_unlock(&fotg210->lock);
+ usb_hcd_giveback_urb(fotg210_to_hcd(fotg210), urb, status);
+ spin_lock(&fotg210->lock);
+}
+
+static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
+
+/*
+ * Process and free completed qtds for a qh, returning URBs to drivers.
+ * Chases up to qh->hw_current. Returns number of completions called,
+ * indicating how much "real" work we did.
+ */
+static unsigned
+qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ struct fotg210_qtd *last, *end = qh->dummy;
+ struct list_head *entry, *tmp;
+ int last_status;
+ int stopped;
+ unsigned count = 0;
+ u8 state;
+ struct fotg210_qh_hw *hw = qh->hw;
+
+ if (unlikely(list_empty(&qh->qtd_list)))
+ return count;
+
+ /* completions (or tasks on other cpus) must never clobber HALT
+ * till we've gone through and cleaned everything up, even when
+ * they add urbs to this qh's queue or mark them for unlinking.
+ *
+ * NOTE: unlinking expects to be done in queue order.
+ *
+ * It's a bug for qh->qh_state to be anything other than
+ * QH_STATE_IDLE, unless our caller is scan_async() or
+ * scan_intr().
+ */
+ state = qh->qh_state;
+ qh->qh_state = QH_STATE_COMPLETING;
+ stopped = (state == QH_STATE_IDLE);
+
+ rescan:
+ last = NULL;
+ last_status = -EINPROGRESS;
+ qh->needs_rescan = 0;
+
+ /* remove de-activated QTDs from front of queue.
+ * after faults (including short reads), cleanup this urb
+ * then let the queue advance.
+ * if queue is stopped, handles unlinks.
+ */
+ list_for_each_safe(entry, tmp, &qh->qtd_list) {
+ struct fotg210_qtd *qtd;
+ struct urb *urb;
+ u32 token = 0;
+
+ qtd = list_entry(entry, struct fotg210_qtd, qtd_list);
+ urb = qtd->urb;
+
+ /* clean up any state from previous QTD ...*/
+ if (last) {
+ if (likely(last->urb != urb)) {
+ fotg210_urb_done(fotg210, last->urb,
+ last_status);
+ count++;
+ last_status = -EINPROGRESS;
+ }
+ fotg210_qtd_free(fotg210, last);
+ last = NULL;
+ }
+
+ /* ignore urbs submitted during completions we reported */
+ if (qtd == end)
+ break;
+
+ /* hardware copies qtd out of qh overlay */
+ rmb();
+ token = hc32_to_cpu(fotg210, qtd->hw_token);
+
+ /* always clean up qtds the hc de-activated */
+ retry_xacterr:
+ if ((token & QTD_STS_ACTIVE) == 0) {
+
+ /* Report Data Buffer Error: non-fatal but useful */
+ if (token & QTD_STS_DBE)
+ fotg210_dbg(fotg210,
+ "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ urb,
+ usb_endpoint_num(&urb->ep->desc),
+ usb_endpoint_dir_in(&urb->ep->desc)
+ ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd,
+ qh);
+
+ /* on STALL, error, and short reads this urb must
+ * complete and all its qtds must be recycled.
+ */
+ if ((token & QTD_STS_HALT) != 0) {
+
+ /* retry transaction errors until we
+ * reach the software xacterr limit
+ */
+ if ((token & QTD_STS_XACT) &&
+ QTD_CERR(token) == 0 &&
+ ++qh->xacterrs < QH_XACTERR_MAX &&
+ !urb->unlinked) {
+ fotg210_dbg(fotg210,
+ "detected XactErr len %zu/%zu retry %d\n",
+ qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
+
+ /* reset the token in the qtd and the
+ * qh overlay (which still contains
+ * the qtd) so that we pick up from
+ * where we left off
+ */
+ token &= ~QTD_STS_HALT;
+ token |= QTD_STS_ACTIVE |
+ (FOTG210_TUNE_CERR << 10);
+ qtd->hw_token = cpu_to_hc32(fotg210,
+ token);
+ wmb();
+ hw->hw_token = cpu_to_hc32(fotg210,
+ token);
+ goto retry_xacterr;
+ }
+ stopped = 1;
+
+ /* magic dummy for some short reads; qh won't advance.
+ * that silicon quirk can kick in with this dummy too.
+ *
+ * other short reads won't stop the queue, including
+ * control transfers (status stage handles that) or
+ * most other single-qtd reads ... the queue stops if
+ * URB_SHORT_NOT_OK was set so the driver submitting
+ * the urbs could clean it up.
+ */
+ } else if (IS_SHORT_READ(token)
+ && !(qtd->hw_alt_next
+ & FOTG210_LIST_END(fotg210))) {
+ stopped = 1;
+ }
+
+ /* stop scanning when we reach qtds the hc is using */
+ } else if (likely(!stopped
+ && fotg210->rh_state >= FOTG210_RH_RUNNING)) {
+ break;
+
+ /* scan the whole queue for unlinks whenever it stops */
+ } else {
+ stopped = 1;
+
+ /* cancel everything if we halt, suspend, etc */
+ if (fotg210->rh_state < FOTG210_RH_RUNNING)
+ last_status = -ESHUTDOWN;
+
+ /* this qtd is active; skip it unless a previous qtd
+ * for its urb faulted, or its urb was canceled.
+ */
+ else if (last_status == -EINPROGRESS && !urb->unlinked)
+ continue;
+
+ /* qh unlinked; token in overlay may be most current */
+ if (state == QH_STATE_IDLE
+ && cpu_to_hc32(fotg210, qtd->qtd_dma)
+ == hw->hw_current) {
+ token = hc32_to_cpu(fotg210, hw->hw_token);
+
+ /* An unlink may leave an incomplete
+ * async transaction in the TT buffer.
+ * We have to clear it.
+ */
+ fotg210_clear_tt_buffer(fotg210, qh, urb,
+ token);
+ }
+ }
+
+ /* unless we already know the urb's status, collect qtd status
+ * and update count of bytes transferred. in common short read
+ * cases with only one data qtd (including control transfers),
+ * queue processing won't halt. but with two or more qtds (for
+ * example, with a 32 KB transfer), when the first qtd gets a
+ * short read the second must be removed by hand.
+ */
+ if (last_status == -EINPROGRESS) {
+ last_status = qtd_copy_status(fotg210, urb,
+ qtd->length, token);
+ if (last_status == -EREMOTEIO
+ && (qtd->hw_alt_next
+ & FOTG210_LIST_END(fotg210)))
+ last_status = -EINPROGRESS;
+
+ /* As part of low/full-speed endpoint-halt processing
+ * we must clear the TT buffer (11.17.5).
+ */
+ if (unlikely(last_status != -EINPROGRESS &&
+ last_status != -EREMOTEIO)) {
+ /* The TT's in some hubs malfunction when they
+ * receive this request following a STALL (they
+ * stop sending isochronous packets). Since a
+ * STALL can't leave the TT buffer in a busy
+ * state (if you believe Figures 11-48 - 11-51
+ * in the USB 2.0 spec), we won't clear the TT
+ * buffer in this case. Strictly speaking this
+ * is a violation of the spec.
+ */
+ if (last_status != -EPIPE)
+ fotg210_clear_tt_buffer(fotg210, qh,
+ urb, token);
+ }
+ }
+
+ /* if we're removing something not at the queue head,
+ * patch the hardware queue pointer.
+ */
+ if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
+ last = list_entry(qtd->qtd_list.prev,
+ struct fotg210_qtd, qtd_list);
+ last->hw_next = qtd->hw_next;
+ }
+
+ /* remove qtd; it's recycled after possible urb completion */
+ list_del(&qtd->qtd_list);
+ last = qtd;
+
+ /* reinit the xacterr counter for the next qtd */
+ qh->xacterrs = 0;
+ }
+
+ /* last urb's completion might still need calling */
+ if (likely(last != NULL)) {
+ fotg210_urb_done(fotg210, last->urb, last_status);
+ count++;
+ fotg210_qtd_free(fotg210, last);
+ }
+
+ /* Do we need to rescan for URBs dequeued during a giveback? */
+ if (unlikely(qh->needs_rescan)) {
+ /* If the QH is already unlinked, do the rescan now. */
+ if (state == QH_STATE_IDLE)
+ goto rescan;
+
+ /* Otherwise we have to wait until the QH is fully unlinked.
+ * Our caller will start an unlink if qh->needs_rescan is
+ * set. But if an unlink has already started, nothing needs
+ * to be done.
+ */
+ if (state != QH_STATE_LINKED)
+ qh->needs_rescan = 0;
+ }
+
+ /* restore original state; caller must unlink or relink */
+ qh->qh_state = state;
+
+ /* be sure the hardware's done with the qh before refreshing
+ * it after fault cleanup, or recovering from silicon wrongly
+ * overlaying the dummy qtd (which reduces DMA chatter).
+ */
+ if (stopped != 0 || hw->hw_qtd_next == FOTG210_LIST_END(fotg210)) {
+ switch (state) {
+ case QH_STATE_IDLE:
+ qh_refresh(fotg210, qh);
+ break;
+ case QH_STATE_LINKED:
+ /* We won't refresh a QH that's linked (after the HC
+ * stopped the queue). That avoids a race:
+ * - HC reads first part of QH;
+ * - CPU updates that first part and the token;
+ * - HC reads rest of that QH, including token
+ * Result: HC gets an inconsistent image, and then
+ * DMAs to/from the wrong memory (corrupting it).
+ *
+ * That should be rare for interrupt transfers,
+ * except maybe high bandwidth ...
+ */
+
+ /* Tell the caller to start an unlink */
+ qh->needs_rescan = 1;
+ break;
+ /* otherwise, unlink already started */
+ }
+ }
+
+ return count;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
+#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
+/* ... and packet size, for any kind of endpoint descriptor */
+#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
+
+/*
+ * reverse of qh_urb_transaction: free a list of TDs.
+ * used for cleanup after errors, before HC sees an URB's TDs.
+ */
+static void qtd_list_free(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ struct list_head *qtd_list
+) {
+ struct list_head *entry, *temp;
+
+ list_for_each_safe(entry, temp, qtd_list) {
+ struct fotg210_qtd *qtd;
+
+ qtd = list_entry(entry, struct fotg210_qtd, qtd_list);
+ list_del(&qtd->qtd_list);
+ fotg210_qtd_free(fotg210, qtd);
+ }
+}
+
+/*
+ * create a list of filled qtds for this URB; won't link into qh.
+ */
+static struct list_head *
+qh_urb_transaction(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ struct list_head *head,
+ gfp_t flags
+) {
+ struct fotg210_qtd *qtd, *qtd_prev;
+ dma_addr_t buf;
+ int len, this_sg_len, maxpacket;
+ int is_input;
+ u32 token;
+ int i;
+ struct scatterlist *sg;
+
+ /*
+ * URBs map to sequences of QTDs: one logical transaction
+ */
+ qtd = fotg210_qtd_alloc(fotg210, flags);
+ if (unlikely(!qtd))
+ return NULL;
+ list_add_tail(&qtd->qtd_list, head);
+ qtd->urb = urb;
+
+ token = QTD_STS_ACTIVE;
+ token |= (FOTG210_TUNE_CERR << 10);
+ /* for split transactions, SplitXState initialized to zero */
+
+ len = urb->transfer_buffer_length;
+ is_input = usb_pipein(urb->pipe);
+ if (usb_pipecontrol(urb->pipe)) {
+ /* SETUP pid */
+ qtd_fill(fotg210, qtd, urb->setup_dma,
+ sizeof(struct usb_ctrlrequest),
+ token | (2 /* "setup" */ << 8), 8);
+
+ /* ... and always at least one more pid */
+ token ^= QTD_TOGGLE;
+ qtd_prev = qtd;
+ qtd = fotg210_qtd_alloc(fotg210, flags);
+ if (unlikely(!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
+ list_add_tail(&qtd->qtd_list, head);
+
+ /* for zero length DATA stages, STATUS is always IN */
+ if (len == 0)
+ token |= (1 /* "in" */ << 8);
+ }
+
+ /*
+ * data transfer stage: buffer setup
+ */
+ i = urb->num_mapped_sgs;
+ if (len > 0 && i > 0) {
+ sg = urb->sg;
+ buf = sg_dma_address(sg);
+
+ /* urb->transfer_buffer_length may be smaller than the
+ * size of the scatterlist (or vice versa)
+ */
+ this_sg_len = min_t(int, sg_dma_len(sg), len);
+ } else {
+ sg = NULL;
+ buf = urb->transfer_dma;
+ this_sg_len = len;
+ }
+
+ if (is_input)
+ token |= (1 /* "in" */ << 8);
+ /* else it's already initted to "out" pid (0 << 8) */
+
+ maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
+
+ /*
+ * buffer gets wrapped in one or more qtds;
+ * last one may be "short" (including zero len)
+ * and may serve as a control status ack
+ */
+ for (;;) {
+ int this_qtd_len;
+
+ this_qtd_len = qtd_fill(fotg210, qtd, buf, this_sg_len, token,
+ maxpacket);
+ this_sg_len -= this_qtd_len;
+ len -= this_qtd_len;
+ buf += this_qtd_len;
+
+ /*
+ * short reads advance to a "magic" dummy instead of the next
+ * qtd ... that forces the queue to stop, for manual cleanup.
+ * (this will usually be overridden later.)
+ */
+ if (is_input)
+ qtd->hw_alt_next = fotg210->async->hw->hw_alt_next;
+
+ /* qh makes control packets use qtd toggle; maybe switch it */
+ if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
+ token ^= QTD_TOGGLE;
+
+ if (likely(this_sg_len <= 0)) {
+ if (--i <= 0 || len <= 0)
+ break;
+ sg = sg_next(sg);
+ buf = sg_dma_address(sg);
+ this_sg_len = min_t(int, sg_dma_len(sg), len);
+ }
+
+ qtd_prev = qtd;
+ qtd = fotg210_qtd_alloc(fotg210, flags);
+ if (unlikely(!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
+ list_add_tail(&qtd->qtd_list, head);
+ }
+
+ /*
+ * unless the caller requires manual cleanup after short reads,
+ * have the alt_next mechanism keep the queue running after the
+ * last data qtd (the only one, for control and most other cases).
+ */
+ if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
+ || usb_pipecontrol(urb->pipe)))
+ qtd->hw_alt_next = FOTG210_LIST_END(fotg210);
+
+ /*
+ * control requests may need a terminating data "status" ack;
+ * other OUT ones may need a terminating short packet
+ * (zero length).
+ */
+ if (likely(urb->transfer_buffer_length != 0)) {
+ int one_more = 0;
+
+ if (usb_pipecontrol(urb->pipe)) {
+ one_more = 1;
+ token ^= 0x0100; /* "in" <--> "out" */
+ token |= QTD_TOGGLE; /* force DATA1 */
+ } else if (usb_pipeout(urb->pipe)
+ && (urb->transfer_flags & URB_ZERO_PACKET)
+ && !(urb->transfer_buffer_length % maxpacket)) {
+ one_more = 1;
+ }
+ if (one_more) {
+ qtd_prev = qtd;
+ qtd = fotg210_qtd_alloc(fotg210, flags);
+ if (unlikely(!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
+ list_add_tail(&qtd->qtd_list, head);
+
+ /* never any data in such packets */
+ qtd_fill(fotg210, qtd, 0, 0, token, 0);
+ }
+ }
+
+ /* by default, enable interrupt on urb completion */
+ if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT)))
+ qtd->hw_token |= cpu_to_hc32(fotg210, QTD_IOC);
+ return head;
+
+cleanup:
+ qtd_list_free(fotg210, urb, head);
+ return NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+/*
+ * Would be best to create all qh's from config descriptors,
+ * when each interface/altsetting is established. Unlink
+ * any previous qh and cancel its urbs first; endpoints are
+ * implicitly reset then (data toggle too).
+ * That'd mean updating how usbcore talks to HCDs. (2.7?)
+*/
+
+
+/*
+ * Each QH holds a qtd list; a QH is used for everything except iso.
+ *
+ * For interrupt urbs, the scheduler must set the microframe scheduling
+ * mask(s) each time the QH gets scheduled. For highspeed, that's
+ * just one microframe in the s-mask. For split interrupt transactions
+ * there are additional complications: c-mask, maybe FSTNs.
+ */
+static struct fotg210_qh *
+qh_make(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ gfp_t flags
+) {
+ struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags);
+ u32 info1 = 0, info2 = 0;
+ int is_input, type;
+ int maxp = 0;
+ struct usb_tt *tt = urb->dev->tt;
+ struct fotg210_qh_hw *hw;
+
+ if (!qh)
+ return qh;
+
+ /*
+ * init endpoint/device data for this QH
+ */
+ info1 |= usb_pipeendpoint(urb->pipe) << 8;
+ info1 |= usb_pipedevice(urb->pipe) << 0;
+
+ is_input = usb_pipein(urb->pipe);
+ type = usb_pipetype(urb->pipe);
+ maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+
+ /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
+ * acts like up to 3KB, but is built from smaller packets.
+ */
+ if (max_packet(maxp) > 1024) {
+ fotg210_dbg(fotg210, "bogus qh maxpacket %d\n",
+ max_packet(maxp));
+ goto done;
+ }
+
+ /* Compute interrupt scheduling parameters just once, and save.
+ * - allowing for high bandwidth, how many nsec/uframe are used?
+ * - split transactions need a second CSPLIT uframe; same question
+ * - splits also need a schedule gap (for full/low speed I/O)
+ * - qh has a polling interval
+ *
+ * For control/bulk requests, the HC or TT handles these.
+ */
+ if (type == PIPE_INTERRUPT) {
+ qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
+ is_input, 0,
+ hb_mult(maxp) * max_packet(maxp)));
+ qh->start = NO_FRAME;
+
+ if (urb->dev->speed == USB_SPEED_HIGH) {
+ qh->c_usecs = 0;
+ qh->gap_uf = 0;
+
+ qh->period = urb->interval >> 3;
+ if (qh->period == 0 && urb->interval != 1) {
+ /* NOTE interval 2 or 4 uframes could work.
+ * But interval 1 scheduling is simpler, and
+ * includes high bandwidth.
+ */
+ urb->interval = 1;
+ } else if (qh->period > fotg210->periodic_size) {
+ qh->period = fotg210->periodic_size;
+ urb->interval = qh->period << 3;
+ }
+ } else {
+ int think_time;
+
+ /* gap is f(FS/LS transfer times) */
+ qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
+ is_input, 0, maxp) / (125 * 1000);
+
+ /* FIXME this just approximates SPLIT/CSPLIT times */
+ if (is_input) { /* SPLIT, gap, CSPLIT+DATA */
+ qh->c_usecs = qh->usecs + HS_USECS(0);
+ qh->usecs = HS_USECS(1);
+ } else { /* SPLIT+DATA, gap, CSPLIT */
+ qh->usecs += HS_USECS(1);
+ qh->c_usecs = HS_USECS(0);
+ }
+
+ think_time = tt ? tt->think_time : 0;
+ qh->tt_usecs = NS_TO_US(think_time +
+ usb_calc_bus_time(urb->dev->speed,
+ is_input, 0, max_packet(maxp)));
+ qh->period = urb->interval;
+ if (qh->period > fotg210->periodic_size) {
+ qh->period = fotg210->periodic_size;
+ urb->interval = qh->period;
+ }
+ }
+ }
+
+ /* support for tt scheduling, and access to toggles */
+ qh->dev = urb->dev;
+
+ /* using TT? */
+ switch (urb->dev->speed) {
+ case USB_SPEED_LOW:
+ info1 |= QH_LOW_SPEED;
+ /* FALL THROUGH */
+
+ case USB_SPEED_FULL:
+ /* EPS 0 means "full" */
+ if (type != PIPE_INTERRUPT)
+ info1 |= (FOTG210_TUNE_RL_TT << 28);
+ if (type == PIPE_CONTROL) {
+ info1 |= QH_CONTROL_EP; /* for TT */
+ info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
+ }
+ info1 |= maxp << 16;
+
+ info2 |= (FOTG210_TUNE_MULT_TT << 30);
+
+ /* Some Freescale processors have an erratum in which the
+ * port number in the queue head was 0..N-1 instead of 1..N.
+ */
+ if (fotg210_has_fsl_portno_bug(fotg210))
+ info2 |= (urb->dev->ttport-1) << 23;
+ else
+ info2 |= urb->dev->ttport << 23;
+
+ /* set the address of the TT; for TDI's integrated
+ * root hub tt, leave it zeroed.
+ */
+ if (tt && tt->hub != fotg210_to_hcd(fotg210)->self.root_hub)
+ info2 |= tt->hub->devnum << 16;
+
+ /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
+
+ break;
+
+ case USB_SPEED_HIGH: /* no TT involved */
+ info1 |= QH_HIGH_SPEED;
+ if (type == PIPE_CONTROL) {
+ info1 |= (FOTG210_TUNE_RL_HS << 28);
+ info1 |= 64 << 16; /* usb2 fixed maxpacket */
+ info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
+ info2 |= (FOTG210_TUNE_MULT_HS << 30);
+ } else if (type == PIPE_BULK) {
+ info1 |= (FOTG210_TUNE_RL_HS << 28);
+ /* The USB spec says that high speed bulk endpoints
+ * always use 512 byte maxpacket. But some device
+ * vendors decided to ignore that, and MSFT is happy
+ * to help them do so. So now people expect to use
+ * such nonconformant devices with Linux too; sigh.
+ */
+ info1 |= max_packet(maxp) << 16;
+ info2 |= (FOTG210_TUNE_MULT_HS << 30);
+ } else { /* PIPE_INTERRUPT */
+ info1 |= max_packet(maxp) << 16;
+ info2 |= hb_mult(maxp) << 30;
+ }
+ break;
+ default:
+ fotg210_dbg(fotg210, "bogus dev %p speed %d\n", urb->dev,
+ urb->dev->speed);
+done:
+ qh_destroy(fotg210, qh);
+ return NULL;
+ }
+
+ /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
+
+ /* init as live, toggle clear, advance to dummy */
+ qh->qh_state = QH_STATE_IDLE;
+ hw = qh->hw;
+ hw->hw_info1 = cpu_to_hc32(fotg210, info1);
+ hw->hw_info2 = cpu_to_hc32(fotg210, info2);
+ qh->is_out = !is_input;
+ usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1);
+ qh_refresh(fotg210, qh);
+ return qh;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void enable_async(struct fotg210_hcd *fotg210)
+{
+ if (fotg210->async_count++)
+ return;
+
+ /* Stop waiting to turn off the async schedule */
+ fotg210->enabled_hrtimer_events &= ~BIT(FOTG210_HRTIMER_DISABLE_ASYNC);
+
+ /* Don't start the schedule until ASS is 0 */
+ fotg210_poll_ASS(fotg210);
+ turn_on_io_watchdog(fotg210);
+}
+
+static void disable_async(struct fotg210_hcd *fotg210)
+{
+ if (--fotg210->async_count)
+ return;
+
+ /* The async schedule and async_unlink list are supposed to be empty */
+ WARN_ON(fotg210->async->qh_next.qh || fotg210->async_unlink);
+
+ /* Don't turn off the schedule until ASS is 1 */
+ fotg210_poll_ASS(fotg210);
+}
+
+/* move qh (and its qtds) onto async queue; maybe enable queue. */
+
+static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ __hc32 dma = QH_NEXT(fotg210, qh->qh_dma);
+ struct fotg210_qh *head;
+
+ /* Don't link a QH if there's a Clear-TT-Buffer pending */
+ if (unlikely(qh->clearing_tt))
+ return;
+
+ WARN_ON(qh->qh_state != QH_STATE_IDLE);
+
+ /* clear halt and/or toggle; and maybe recover from silicon quirk */
+ qh_refresh(fotg210, qh);
+
+ /* splice right after start */
+ head = fotg210->async;
+ qh->qh_next = head->qh_next;
+ qh->hw->hw_next = head->hw->hw_next;
+ wmb();
+
+ head->qh_next.qh = qh;
+ head->hw->hw_next = dma;
+
+ qh->xacterrs = 0;
+ qh->qh_state = QH_STATE_LINKED;
+ /* qtd completions reported later by interrupt */
+
+ enable_async(fotg210);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * For control/bulk/interrupt, return QH with these TDs appended.
+ * Allocates and initializes the QH if necessary.
+ * Returns null if it can't allocate a QH it needs to.
+ * If the QH has TDs (urbs) already, that's great.
+ */
+static struct fotg210_qh *qh_append_tds(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ struct list_head *qtd_list,
+ int epnum,
+ void **ptr
+)
+{
+ struct fotg210_qh *qh = NULL;
+ __hc32 qh_addr_mask = cpu_to_hc32(fotg210, 0x7f);
+
+ qh = (struct fotg210_qh *) *ptr;
+ if (unlikely(qh == NULL)) {
+ /* can't sleep here, we have fotg210->lock... */
+ qh = qh_make(fotg210, urb, GFP_ATOMIC);
+ *ptr = qh;
+ }
+ if (likely(qh != NULL)) {
+ struct fotg210_qtd *qtd;
+
+ if (unlikely(list_empty(qtd_list)))
+ qtd = NULL;
+ else
+ qtd = list_entry(qtd_list->next, struct fotg210_qtd,
+ qtd_list);
+
+ /* control qh may need patching ... */
+ if (unlikely(epnum == 0)) {
+ /* usb_reset_device() briefly reverts to address 0 */
+ if (usb_pipedevice(urb->pipe) == 0)
+ qh->hw->hw_info1 &= ~qh_addr_mask;
+ }
+
+ /* just one way to queue requests: swap with the dummy qtd.
+ * only hc or qh_refresh() ever modify the overlay.
+ */
+ if (likely(qtd != NULL)) {
+ struct fotg210_qtd *dummy;
+ dma_addr_t dma;
+ __hc32 token;
+
+ /* to avoid racing the HC, use the dummy td instead of
+ * the first td of our list (becomes new dummy). both
+ * tds stay deactivated until we're done, when the
+ * HC is allowed to fetch the old dummy (4.10.2).
+ */
+ token = qtd->hw_token;
+ qtd->hw_token = HALT_BIT(fotg210);
+
+ dummy = qh->dummy;
+
+ dma = dummy->qtd_dma;
+ *dummy = *qtd;
+ dummy->qtd_dma = dma;
+
+ list_del(&qtd->qtd_list);
+ list_add(&dummy->qtd_list, qtd_list);
+ list_splice_tail(qtd_list, &qh->qtd_list);
+
+ fotg210_qtd_init(fotg210, qtd, qtd->qtd_dma);
+ qh->dummy = qtd;
+
+ /* hc must see the new dummy at list end */
+ dma = qtd->qtd_dma;
+ qtd = list_entry(qh->qtd_list.prev,
+ struct fotg210_qtd, qtd_list);
+ qtd->hw_next = QTD_NEXT(fotg210, dma);
+
+ /* let the hc process these next qtds */
+ wmb();
+ dummy->hw_token = token;
+
+ urb->hcpriv = qh;
+ }
+ }
+ return qh;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int
+submit_async(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ struct list_head *qtd_list,
+ gfp_t mem_flags
+) {
+ int epnum;
+ unsigned long flags;
+ struct fotg210_qh *qh = NULL;
+ int rc;
+
+ epnum = urb->ep->desc.bEndpointAddress;
+
+#ifdef FOTG210_URB_TRACE
+ {
+ struct fotg210_qtd *qtd;
+ qtd = list_entry(qtd_list->next, struct fotg210_qtd, qtd_list);
+ fotg210_dbg(fotg210,
+ "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ __func__, urb->dev->devpath, urb,
+ epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd, urb->ep->hcpriv);
+ }
+#endif
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+ if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
+ rc = -ESHUTDOWN;
+ goto done;
+ }
+ rc = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
+ if (unlikely(rc))
+ goto done;
+
+ qh = qh_append_tds(fotg210, urb, qtd_list, epnum, &urb->ep->hcpriv);
+ if (unlikely(qh == NULL)) {
+ usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ /* Control/bulk operations through TTs don't need scheduling,
+ * the HC and TT handle it when the TT has a buffer ready.
+ */
+ if (likely(qh->qh_state == QH_STATE_IDLE))
+ qh_link_async(fotg210, qh);
+ done:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ if (unlikely(qh == NULL))
+ qtd_list_free(fotg210, urb, qtd_list);
+ return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void single_unlink_async(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh)
+{
+ struct fotg210_qh *prev;
+
+ /* Add to the end of the list of QHs waiting for the next IAAD */
+ qh->qh_state = QH_STATE_UNLINK;
+ if (fotg210->async_unlink)
+ fotg210->async_unlink_last->unlink_next = qh;
+ else
+ fotg210->async_unlink = qh;
+ fotg210->async_unlink_last = qh;
+
+ /* Unlink it from the schedule */
+ prev = fotg210->async;
+ while (prev->qh_next.qh != qh)
+ prev = prev->qh_next.qh;
+
+ prev->hw->hw_next = qh->hw->hw_next;
+ prev->qh_next = qh->qh_next;
+ if (fotg210->qh_scan_next == qh)
+ fotg210->qh_scan_next = qh->qh_next.qh;
+}
+
+static void start_iaa_cycle(struct fotg210_hcd *fotg210, bool nested)
+{
+ /*
+ * Do nothing if an IAA cycle is already running or
+ * if one will be started shortly.
+ */
+ if (fotg210->async_iaa || fotg210->async_unlinking)
+ return;
+
+ /* Do all the waiting QHs at once */
+ fotg210->async_iaa = fotg210->async_unlink;
+ fotg210->async_unlink = NULL;
+
+ /* If the controller isn't running, we don't have to wait for it */
+ if (unlikely(fotg210->rh_state < FOTG210_RH_RUNNING)) {
+ if (!nested) /* Avoid recursion */
+ end_unlink_async(fotg210);
+
+ /* Otherwise start a new IAA cycle */
+ } else if (likely(fotg210->rh_state == FOTG210_RH_RUNNING)) {
+ /* Make sure the unlinks are all visible to the hardware */
+ wmb();
+
+ fotg210_writel(fotg210, fotg210->command | CMD_IAAD,
+ &fotg210->regs->command);
+ fotg210_readl(fotg210, &fotg210->regs->command);
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_IAA_WATCHDOG,
+ true);
+ }
+}
+
+/* the async qh for the qtds being unlinked are now gone from the HC */
+
+static void end_unlink_async(struct fotg210_hcd *fotg210)
+{
+ struct fotg210_qh *qh;
+
+ /* Process the idle QHs */
+ restart:
+ fotg210->async_unlinking = true;
+ while (fotg210->async_iaa) {
+ qh = fotg210->async_iaa;
+ fotg210->async_iaa = qh->unlink_next;
+ qh->unlink_next = NULL;
+
+ qh->qh_state = QH_STATE_IDLE;
+ qh->qh_next.qh = NULL;
+
+ qh_completions(fotg210, qh);
+ if (!list_empty(&qh->qtd_list) &&
+ fotg210->rh_state == FOTG210_RH_RUNNING)
+ qh_link_async(fotg210, qh);
+ disable_async(fotg210);
+ }
+ fotg210->async_unlinking = false;
+
+ /* Start a new IAA cycle if any QHs are waiting for it */
+ if (fotg210->async_unlink) {
+ start_iaa_cycle(fotg210, true);
+ if (unlikely(fotg210->rh_state < FOTG210_RH_RUNNING))
+ goto restart;
+ }
+}
+
+static void unlink_empty_async(struct fotg210_hcd *fotg210)
+{
+ struct fotg210_qh *qh, *next;
+ bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING);
+ bool check_unlinks_later = false;
+
+ /* Unlink all the async QHs that have been empty for a timer cycle */
+ next = fotg210->async->qh_next.qh;
+ while (next) {
+ qh = next;
+ next = qh->qh_next.qh;
+
+ if (list_empty(&qh->qtd_list) &&
+ qh->qh_state == QH_STATE_LINKED) {
+ if (!stopped && qh->unlink_cycle ==
+ fotg210->async_unlink_cycle)
+ check_unlinks_later = true;
+ else
+ single_unlink_async(fotg210, qh);
+ }
+ }
+
+ /* Start a new IAA cycle if any QHs are waiting for it */
+ if (fotg210->async_unlink)
+ start_iaa_cycle(fotg210, false);
+
+ /* QHs that haven't been empty for long enough will be handled later */
+ if (check_unlinks_later) {
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_ASYNC_UNLINKS,
+ true);
+ ++fotg210->async_unlink_cycle;
+ }
+}
+
+/* makes sure the async qh will become idle */
+/* caller must own fotg210->lock */
+
+static void start_unlink_async(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh)
+{
+ /*
+ * If the QH isn't linked then there's nothing we can do
+ * unless we were called during a giveback, in which case
+ * qh_completions() has to deal with it.
+ */
+ if (qh->qh_state != QH_STATE_LINKED) {
+ if (qh->qh_state == QH_STATE_COMPLETING)
+ qh->needs_rescan = 1;
+ return;
+ }
+
+ single_unlink_async(fotg210, qh);
+ start_iaa_cycle(fotg210, false);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void scan_async(struct fotg210_hcd *fotg210)
+{
+ struct fotg210_qh *qh;
+ bool check_unlinks_later = false;
+
+ fotg210->qh_scan_next = fotg210->async->qh_next.qh;
+ while (fotg210->qh_scan_next) {
+ qh = fotg210->qh_scan_next;
+ fotg210->qh_scan_next = qh->qh_next.qh;
+ rescan:
+ /* clean any finished work for this qh */
+ if (!list_empty(&qh->qtd_list)) {
+ int temp;
+
+ /*
+ * Unlinks could happen here; completion reporting
+ * drops the lock. That's why fotg210->qh_scan_next
+ * always holds the next qh to scan; if the next qh
+ * gets unlinked then fotg210->qh_scan_next is adjusted
+ * in single_unlink_async().
+ */
+ temp = qh_completions(fotg210, qh);
+ if (qh->needs_rescan) {
+ start_unlink_async(fotg210, qh);
+ } else if (list_empty(&qh->qtd_list)
+ && qh->qh_state == QH_STATE_LINKED) {
+ qh->unlink_cycle = fotg210->async_unlink_cycle;
+ check_unlinks_later = true;
+ } else if (temp != 0)
+ goto rescan;
+ }
+ }
+
+ /*
+ * Unlink empty entries, reducing DMA usage as well
+ * as HCD schedule-scanning costs. Delay for any qh
+ * we just scanned, there's a not-unusual case that it
+ * doesn't stay idle for long.
+ */
+ if (check_unlinks_later && fotg210->rh_state == FOTG210_RH_RUNNING &&
+ !(fotg210->enabled_hrtimer_events &
+ BIT(FOTG210_HRTIMER_ASYNC_UNLINKS))) {
+ fotg210_enable_event(fotg210,
+ FOTG210_HRTIMER_ASYNC_UNLINKS, true);
+ ++fotg210->async_unlink_cycle;
+ }
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * EHCI scheduled transaction support: interrupt, iso, split iso
+ * These are called "periodic" transactions in the EHCI spec.
+ *
+ * Note that for interrupt transfers, the QH/QTD manipulation is shared
+ * with the "asynchronous" transaction support (control/bulk transfers).
+ * The only real difference is in how interrupt transfers are scheduled.
+ *
+ * For ISO, we make an "iso_stream" head to serve the same role as a QH.
+ * It keeps track of every ITD (or SITD) that's linked, and holds enough
+ * pre-calculated schedule data to make appending to the queue be quick.
+ */
+
+static int fotg210_get_frame(struct usb_hcd *hcd);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * periodic_next_shadow - return "next" pointer on shadow list
+ * @periodic: host pointer to qh/itd
+ * @tag: hardware tag for type of this record
+ */
+static union fotg210_shadow *
+periodic_next_shadow(struct fotg210_hcd *fotg210,
+ union fotg210_shadow *periodic, __hc32 tag)
+{
+ switch (hc32_to_cpu(fotg210, tag)) {
+ case Q_TYPE_QH:
+ return &periodic->qh->qh_next;
+ case Q_TYPE_FSTN:
+ return &periodic->fstn->fstn_next;
+ default:
+ return &periodic->itd->itd_next;
+ }
+}
+
+static __hc32 *
+shadow_next_periodic(struct fotg210_hcd *fotg210,
+ union fotg210_shadow *periodic, __hc32 tag)
+{
+ switch (hc32_to_cpu(fotg210, tag)) {
+ /* our fotg210_shadow.qh is actually software part */
+ case Q_TYPE_QH:
+ return &periodic->qh->hw->hw_next;
+ /* others are hw parts */
+ default:
+ return periodic->hw_next;
+ }
+}
+
+/* caller must hold fotg210->lock */
+static void periodic_unlink(struct fotg210_hcd *fotg210, unsigned frame,
+ void *ptr)
+{
+ union fotg210_shadow *prev_p = &fotg210->pshadow[frame];
+ __hc32 *hw_p = &fotg210->periodic[frame];
+ union fotg210_shadow here = *prev_p;
+
+ /* find predecessor of "ptr"; hw and shadow lists are in sync */
+ while (here.ptr && here.ptr != ptr) {
+ prev_p = periodic_next_shadow(fotg210, prev_p,
+ Q_NEXT_TYPE(fotg210, *hw_p));
+ hw_p = shadow_next_periodic(fotg210, &here,
+ Q_NEXT_TYPE(fotg210, *hw_p));
+ here = *prev_p;
+ }
+ /* an interrupt entry (at list end) could have been shared */
+ if (!here.ptr)
+ return;
+
+ /* update shadow and hardware lists ... the old "next" pointers
+ * from ptr may still be in use, the caller updates them.
+ */
+ *prev_p = *periodic_next_shadow(fotg210, &here,
+ Q_NEXT_TYPE(fotg210, *hw_p));
+
+ *hw_p = *shadow_next_periodic(fotg210, &here,
+ Q_NEXT_TYPE(fotg210, *hw_p));
+}
+
+/* how many of the uframe's 125 usecs are allocated? */
+static unsigned short
+periodic_usecs(struct fotg210_hcd *fotg210, unsigned frame, unsigned uframe)
+{
+ __hc32 *hw_p = &fotg210->periodic[frame];
+ union fotg210_shadow *q = &fotg210->pshadow[frame];
+ unsigned usecs = 0;
+ struct fotg210_qh_hw *hw;
+
+ while (q->ptr) {
+ switch (hc32_to_cpu(fotg210, Q_NEXT_TYPE(fotg210, *hw_p))) {
+ case Q_TYPE_QH:
+ hw = q->qh->hw;
+ /* is it in the S-mask? */
+ if (hw->hw_info2 & cpu_to_hc32(fotg210, 1 << uframe))
+ usecs += q->qh->usecs;
+ /* ... or C-mask? */
+ if (hw->hw_info2 & cpu_to_hc32(fotg210,
+ 1 << (8 + uframe)))
+ usecs += q->qh->c_usecs;
+ hw_p = &hw->hw_next;
+ q = &q->qh->qh_next;
+ break;
+ /* case Q_TYPE_FSTN: */
+ default:
+ /* for "save place" FSTNs, count the relevant INTR
+ * bandwidth from the previous frame
+ */
+ if (q->fstn->hw_prev != FOTG210_LIST_END(fotg210))
+ fotg210_dbg(fotg210, "ignoring FSTN cost ...\n");
+
+ hw_p = &q->fstn->hw_next;
+ q = &q->fstn->fstn_next;
+ break;
+ case Q_TYPE_ITD:
+ if (q->itd->hw_transaction[uframe])
+ usecs += q->itd->stream->usecs;
+ hw_p = &q->itd->hw_next;
+ q = &q->itd->itd_next;
+ break;
+ }
+ }
+ if (usecs > fotg210->uframe_periodic_max)
+ fotg210_err(fotg210, "uframe %d sched overrun: %d usecs\n",
+ frame * 8 + uframe, usecs);
+ return usecs;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int same_tt(struct usb_device *dev1, struct usb_device *dev2)
+{
+ if (!dev1->tt || !dev2->tt)
+ return 0;
+ if (dev1->tt != dev2->tt)
+ return 0;
+ if (dev1->tt->multi)
+ return dev1->ttport == dev2->ttport;
+ else
+ return 1;
+}
+
+/* return true iff the device's transaction translator is available
+ * for a periodic transfer starting at the specified frame, using
+ * all the uframes in the mask.
+ */
+static int tt_no_collision(
+ struct fotg210_hcd *fotg210,
+ unsigned period,
+ struct usb_device *dev,
+ unsigned frame,
+ u32 uf_mask
+)
+{
+ if (period == 0) /* error */
+ return 0;
+
+ /* note bandwidth wastage: split never follows csplit
+ * (different dev or endpoint) until the next uframe.
+ * calling convention doesn't make that distinction.
+ */
+ for (; frame < fotg210->periodic_size; frame += period) {
+ union fotg210_shadow here;
+ __hc32 type;
+ struct fotg210_qh_hw *hw;
+
+ here = fotg210->pshadow[frame];
+ type = Q_NEXT_TYPE(fotg210, fotg210->periodic[frame]);
+ while (here.ptr) {
+ switch (hc32_to_cpu(fotg210, type)) {
+ case Q_TYPE_ITD:
+ type = Q_NEXT_TYPE(fotg210, here.itd->hw_next);
+ here = here.itd->itd_next;
+ continue;
+ case Q_TYPE_QH:
+ hw = here.qh->hw;
+ if (same_tt(dev, here.qh->dev)) {
+ u32 mask;
+
+ mask = hc32_to_cpu(fotg210,
+ hw->hw_info2);
+ /* "knows" no gap is needed */
+ mask |= mask >> 8;
+ if (mask & uf_mask)
+ break;
+ }
+ type = Q_NEXT_TYPE(fotg210, hw->hw_next);
+ here = here.qh->qh_next;
+ continue;
+ /* case Q_TYPE_FSTN: */
+ default:
+ fotg210_dbg(fotg210,
+ "periodic frame %d bogus type %d\n",
+ frame, type);
+ }
+
+ /* collision or error */
+ return 0;
+ }
+ }
+
+ /* no collision */
+ return 1;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void enable_periodic(struct fotg210_hcd *fotg210)
+{
+ if (fotg210->periodic_count++)
+ return;
+
+ /* Stop waiting to turn off the periodic schedule */
+ fotg210->enabled_hrtimer_events &=
+ ~BIT(FOTG210_HRTIMER_DISABLE_PERIODIC);
+
+ /* Don't start the schedule until PSS is 0 */
+ fotg210_poll_PSS(fotg210);
+ turn_on_io_watchdog(fotg210);
+}
+
+static void disable_periodic(struct fotg210_hcd *fotg210)
+{
+ if (--fotg210->periodic_count)
+ return;
+
+ /* Don't turn off the schedule until PSS is 1 */
+ fotg210_poll_PSS(fotg210);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* periodic schedule slots have iso tds (normal or split) first, then a
+ * sparse tree for active interrupt transfers.
+ *
+ * this just links in a qh; caller guarantees uframe masks are set right.
+ * no FSTN support (yet; fotg210 0.96+)
+ */
+static void qh_link_periodic(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ unsigned i;
+ unsigned period = qh->period;
+
+ dev_dbg(&qh->dev->dev,
+ "link qh%d-%04x/%p start %d [%d/%d us]\n",
+ period, hc32_to_cpup(fotg210, &qh->hw->hw_info2)
+ & (QH_CMASK | QH_SMASK),
+ qh, qh->start, qh->usecs, qh->c_usecs);
+
+ /* high bandwidth, or otherwise every microframe */
+ if (period == 0)
+ period = 1;
+
+ for (i = qh->start; i < fotg210->periodic_size; i += period) {
+ union fotg210_shadow *prev = &fotg210->pshadow[i];
+ __hc32 *hw_p = &fotg210->periodic[i];
+ union fotg210_shadow here = *prev;
+ __hc32 type = 0;
+
+ /* skip the iso nodes at list head */
+ while (here.ptr) {
+ type = Q_NEXT_TYPE(fotg210, *hw_p);
+ if (type == cpu_to_hc32(fotg210, Q_TYPE_QH))
+ break;
+ prev = periodic_next_shadow(fotg210, prev, type);
+ hw_p = shadow_next_periodic(fotg210, &here, type);
+ here = *prev;
+ }
+
+ /* sorting each branch by period (slow-->fast)
+ * enables sharing interior tree nodes
+ */
+ while (here.ptr && qh != here.qh) {
+ if (qh->period > here.qh->period)
+ break;
+ prev = &here.qh->qh_next;
+ hw_p = &here.qh->hw->hw_next;
+ here = *prev;
+ }
+ /* link in this qh, unless some earlier pass did that */
+ if (qh != here.qh) {
+ qh->qh_next = here;
+ if (here.qh)
+ qh->hw->hw_next = *hw_p;
+ wmb();
+ prev->qh = qh;
+ *hw_p = QH_NEXT(fotg210, qh->qh_dma);
+ }
+ }
+ qh->qh_state = QH_STATE_LINKED;
+ qh->xacterrs = 0;
+
+ /* update per-qh bandwidth for usbfs */
+ fotg210_to_hcd(fotg210)->self.bandwidth_allocated += qh->period
+ ? ((qh->usecs + qh->c_usecs) / qh->period)
+ : (qh->usecs * 8);
+
+ list_add(&qh->intr_node, &fotg210->intr_qh_list);
+
+ /* maybe enable periodic schedule processing */
+ ++fotg210->intr_count;
+ enable_periodic(fotg210);
+}
+
+static void qh_unlink_periodic(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh)
+{
+ unsigned i;
+ unsigned period;
+
+ /*
+ * If qh is for a low/full-speed device, simply unlinking it
+ * could interfere with an ongoing split transaction. To unlink
+ * it safely would require setting the QH_INACTIVATE bit and
+ * waiting at least one frame, as described in EHCI 4.12.2.5.
+ *
+ * We won't bother with any of this. Instead, we assume that the
+ * only reason for unlinking an interrupt QH while the current URB
+ * is still active is to dequeue all the URBs (flush the whole
+ * endpoint queue).
+ *
+ * If rebalancing the periodic schedule is ever implemented, this
+ * approach will no longer be valid.
+ */
+
+ /* high bandwidth, or otherwise part of every microframe */
+ period = qh->period;
+ if (!period)
+ period = 1;
+
+ for (i = qh->start; i < fotg210->periodic_size; i += period)
+ periodic_unlink(fotg210, i, qh);
+
+ /* update per-qh bandwidth for usbfs */
+ fotg210_to_hcd(fotg210)->self.bandwidth_allocated -= qh->period
+ ? ((qh->usecs + qh->c_usecs) / qh->period)
+ : (qh->usecs * 8);
+
+ dev_dbg(&qh->dev->dev,
+ "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
+ qh->period,
+ hc32_to_cpup(fotg210, &qh->hw->hw_info2) &
+ (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs, qh->c_usecs);
+
+ /* qh->qh_next still "live" to HC */
+ qh->qh_state = QH_STATE_UNLINK;
+ qh->qh_next.ptr = NULL;
+
+ if (fotg210->qh_scan_next == qh)
+ fotg210->qh_scan_next = list_entry(qh->intr_node.next,
+ struct fotg210_qh, intr_node);
+ list_del(&qh->intr_node);
+}
+
+static void start_unlink_intr(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh)
+{
+ /* If the QH isn't linked then there's nothing we can do
+ * unless we were called during a giveback, in which case
+ * qh_completions() has to deal with it.
+ */
+ if (qh->qh_state != QH_STATE_LINKED) {
+ if (qh->qh_state == QH_STATE_COMPLETING)
+ qh->needs_rescan = 1;
+ return;
+ }
+
+ qh_unlink_periodic(fotg210, qh);
+
+ /* Make sure the unlinks are visible before starting the timer */
+ wmb();
+
+ /*
+ * The EHCI spec doesn't say how long it takes the controller to
+ * stop accessing an unlinked interrupt QH. The timer delay is
+ * 9 uframes; presumably that will be long enough.
+ */
+ qh->unlink_cycle = fotg210->intr_unlink_cycle;
+
+ /* New entries go at the end of the intr_unlink list */
+ if (fotg210->intr_unlink)
+ fotg210->intr_unlink_last->unlink_next = qh;
+ else
+ fotg210->intr_unlink = qh;
+ fotg210->intr_unlink_last = qh;
+
+ if (fotg210->intr_unlinking)
+ ; /* Avoid recursive calls */
+ else if (fotg210->rh_state < FOTG210_RH_RUNNING)
+ fotg210_handle_intr_unlinks(fotg210);
+ else if (fotg210->intr_unlink == qh) {
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR,
+ true);
+ ++fotg210->intr_unlink_cycle;
+ }
+}
+
+static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ struct fotg210_qh_hw *hw = qh->hw;
+ int rc;
+
+ qh->qh_state = QH_STATE_IDLE;
+ hw->hw_next = FOTG210_LIST_END(fotg210);
+
+ qh_completions(fotg210, qh);
+
+ /* reschedule QH iff another request is queued */
+ if (!list_empty(&qh->qtd_list) &&
+ fotg210->rh_state == FOTG210_RH_RUNNING) {
+ rc = qh_schedule(fotg210, qh);
+
+ /* An error here likely indicates handshake failure
+ * or no space left in the schedule. Neither fault
+ * should happen often ...
+ *
+ * FIXME kill the now-dysfunctional queued urbs
+ */
+ if (rc != 0)
+ fotg210_err(fotg210, "can't reschedule qh %p, err %d\n",
+ qh, rc);
+ }
+
+ /* maybe turn off periodic schedule */
+ --fotg210->intr_count;
+ disable_periodic(fotg210);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int check_period(
+ struct fotg210_hcd *fotg210,
+ unsigned frame,
+ unsigned uframe,
+ unsigned period,
+ unsigned usecs
+) {
+ int claimed;
+
+ /* complete split running into next frame?
+ * given FSTN support, we could sometimes check...
+ */
+ if (uframe >= 8)
+ return 0;
+
+ /* convert "usecs we need" to "max already claimed" */
+ usecs = fotg210->uframe_periodic_max - usecs;
+
+ /* we "know" 2 and 4 uframe intervals were rejected; so
+ * for period 0, check _every_ microframe in the schedule.
+ */
+ if (unlikely(period == 0)) {
+ do {
+ for (uframe = 0; uframe < 7; uframe++) {
+ claimed = periodic_usecs(fotg210, frame,
+ uframe);
+ if (claimed > usecs)
+ return 0;
+ }
+ } while ((frame += 1) < fotg210->periodic_size);
+
+ /* just check the specified uframe, at that period */
+ } else {
+ do {
+ claimed = periodic_usecs(fotg210, frame, uframe);
+ if (claimed > usecs)
+ return 0;
+ } while ((frame += period) < fotg210->periodic_size);
+ }
+
+ /* success! */
+ return 1;
+}
+
+static int check_intr_schedule(
+ struct fotg210_hcd *fotg210,
+ unsigned frame,
+ unsigned uframe,
+ const struct fotg210_qh *qh,
+ __hc32 *c_maskp
+)
+{
+ int retval = -ENOSPC;
+ u8 mask = 0;
+
+ if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
+ goto done;
+
+ if (!check_period(fotg210, frame, uframe, qh->period, qh->usecs))
+ goto done;
+ if (!qh->c_usecs) {
+ retval = 0;
+ *c_maskp = 0;
+ goto done;
+ }
+
+ /* Make sure this tt's buffer is also available for CSPLITs.
+ * We pessimize a bit; probably the typical full speed case
+ * doesn't need the second CSPLIT.
+ *
+ * NOTE: both SPLIT and CSPLIT could be checked in just
+ * one smart pass...
+ */
+ mask = 0x03 << (uframe + qh->gap_uf);
+ *c_maskp = cpu_to_hc32(fotg210, mask << 8);
+
+ mask |= 1 << uframe;
+ if (tt_no_collision(fotg210, qh->period, qh->dev, frame, mask)) {
+ if (!check_period(fotg210, frame, uframe + qh->gap_uf + 1,
+ qh->period, qh->c_usecs))
+ goto done;
+ if (!check_period(fotg210, frame, uframe + qh->gap_uf,
+ qh->period, qh->c_usecs))
+ goto done;
+ retval = 0;
+ }
+done:
+ return retval;
+}
+
+/* "first fit" scheduling policy used the first time through,
+ * or when the previous schedule slot can't be re-used.
+ */
+static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ int status;
+ unsigned uframe;
+ __hc32 c_mask;
+ unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
+ struct fotg210_qh_hw *hw = qh->hw;
+
+ qh_refresh(fotg210, qh);
+ hw->hw_next = FOTG210_LIST_END(fotg210);
+ frame = qh->start;
+
+ /* reuse the previous schedule slots, if we can */
+ if (frame < qh->period) {
+ uframe = ffs(hc32_to_cpup(fotg210, &hw->hw_info2) & QH_SMASK);
+ status = check_intr_schedule(fotg210, frame, --uframe,
+ qh, &c_mask);
+ } else {
+ uframe = 0;
+ c_mask = 0;
+ status = -ENOSPC;
+ }
+
+ /* else scan the schedule to find a group of slots such that all
+ * uframes have enough periodic bandwidth available.
+ */
+ if (status) {
+ /* "normal" case, uframing flexible except with splits */
+ if (qh->period) {
+ int i;
+
+ for (i = qh->period; status && i > 0; --i) {
+ frame = ++fotg210->random_frame % qh->period;
+ for (uframe = 0; uframe < 8; uframe++) {
+ status = check_intr_schedule(fotg210,
+ frame, uframe, qh,
+ &c_mask);
+ if (status == 0)
+ break;
+ }
+ }
+
+ /* qh->period == 0 means every uframe */
+ } else {
+ frame = 0;
+ status = check_intr_schedule(fotg210, 0, 0, qh,
+ &c_mask);
+ }
+ if (status)
+ goto done;
+ qh->start = frame;
+
+ /* reset S-frame and (maybe) C-frame masks */
+ hw->hw_info2 &= cpu_to_hc32(fotg210, ~(QH_CMASK | QH_SMASK));
+ hw->hw_info2 |= qh->period
+ ? cpu_to_hc32(fotg210, 1 << uframe)
+ : cpu_to_hc32(fotg210, QH_SMASK);
+ hw->hw_info2 |= c_mask;
+ } else
+ fotg210_dbg(fotg210, "reused qh %p schedule\n", qh);
+
+ /* stuff into the periodic schedule */
+ qh_link_periodic(fotg210, qh);
+done:
+ return status;
+}
+
+static int intr_submit(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ struct list_head *qtd_list,
+ gfp_t mem_flags
+) {
+ unsigned epnum;
+ unsigned long flags;
+ struct fotg210_qh *qh;
+ int status;
+ struct list_head empty;
+
+ /* get endpoint and transfer/schedule data */
+ epnum = urb->ep->desc.bEndpointAddress;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
+ status = -ESHUTDOWN;
+ goto done_not_linked;
+ }
+ status = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
+ if (unlikely(status))
+ goto done_not_linked;
+
+ /* get qh and force any scheduling errors */
+ INIT_LIST_HEAD(&empty);
+ qh = qh_append_tds(fotg210, urb, &empty, epnum, &urb->ep->hcpriv);
+ if (qh == NULL) {
+ status = -ENOMEM;
+ goto done;
+ }
+ if (qh->qh_state == QH_STATE_IDLE) {
+ status = qh_schedule(fotg210, qh);
+ if (status)
+ goto done;
+ }
+
+ /* then queue the urb's tds to the qh */
+ qh = qh_append_tds(fotg210, urb, qtd_list, epnum, &urb->ep->hcpriv);
+ BUG_ON(qh == NULL);
+
+ /* ... update usbfs periodic stats */
+ fotg210_to_hcd(fotg210)->self.bandwidth_int_reqs++;
+
+done:
+ if (unlikely(status))
+ usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
+done_not_linked:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ if (status)
+ qtd_list_free(fotg210, urb, qtd_list);
+
+ return status;
+}
+
+static void scan_intr(struct fotg210_hcd *fotg210)
+{
+ struct fotg210_qh *qh;
+
+ list_for_each_entry_safe(qh, fotg210->qh_scan_next,
+ &fotg210->intr_qh_list, intr_node) {
+ rescan:
+ /* clean any finished work for this qh */
+ if (!list_empty(&qh->qtd_list)) {
+ int temp;
+
+ /*
+ * Unlinks could happen here; completion reporting
+ * drops the lock. That's why fotg210->qh_scan_next
+ * always holds the next qh to scan; if the next qh
+ * gets unlinked then fotg210->qh_scan_next is adjusted
+ * in qh_unlink_periodic().
+ */
+ temp = qh_completions(fotg210, qh);
+ if (unlikely(qh->needs_rescan ||
+ (list_empty(&qh->qtd_list) &&
+ qh->qh_state == QH_STATE_LINKED)))
+ start_unlink_intr(fotg210, qh);
+ else if (temp != 0)
+ goto rescan;
+ }
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* fotg210_iso_stream ops work with both ITD and SITD */
+
+static struct fotg210_iso_stream *
+iso_stream_alloc(gfp_t mem_flags)
+{
+ struct fotg210_iso_stream *stream;
+
+ stream = kzalloc(sizeof(*stream), mem_flags);
+ if (likely(stream != NULL)) {
+ INIT_LIST_HEAD(&stream->td_list);
+ INIT_LIST_HEAD(&stream->free_list);
+ stream->next_uframe = -1;
+ }
+ return stream;
+}
+
+static void
+iso_stream_init(
+ struct fotg210_hcd *fotg210,
+ struct fotg210_iso_stream *stream,
+ struct usb_device *dev,
+ int pipe,
+ unsigned interval
+)
+{
+ u32 buf1;
+ unsigned epnum, maxp;
+ int is_input;
+ long bandwidth;
+ unsigned multi;
+
+ /*
+ * this might be a "high bandwidth" highspeed endpoint,
+ * as encoded in the ep descriptor's wMaxPacket field
+ */
+ epnum = usb_pipeendpoint(pipe);
+ is_input = usb_pipein(pipe) ? USB_DIR_IN : 0;
+ maxp = usb_maxpacket(dev, pipe, !is_input);
+ if (is_input)
+ buf1 = (1 << 11);
+ else
+ buf1 = 0;
+
+ maxp = max_packet(maxp);
+ multi = hb_mult(maxp);
+ buf1 |= maxp;
+ maxp *= multi;
+
+ stream->buf0 = cpu_to_hc32(fotg210, (epnum << 8) | dev->devnum);
+ stream->buf1 = cpu_to_hc32(fotg210, buf1);
+ stream->buf2 = cpu_to_hc32(fotg210, multi);
+
+ /* usbfs wants to report the average usecs per frame tied up
+ * when transfers on this endpoint are scheduled ...
+ */
+ if (dev->speed == USB_SPEED_FULL) {
+ interval <<= 3;
+ stream->usecs = NS_TO_US(usb_calc_bus_time(dev->speed,
+ is_input, 1, maxp));
+ stream->usecs /= 8;
+ } else {
+ stream->highspeed = 1;
+ stream->usecs = HS_USECS_ISO(maxp);
+ }
+ bandwidth = stream->usecs * 8;
+ bandwidth /= interval;
+
+ stream->bandwidth = bandwidth;
+ stream->udev = dev;
+ stream->bEndpointAddress = is_input | epnum;
+ stream->interval = interval;
+ stream->maxp = maxp;
+}
+
+static struct fotg210_iso_stream *
+iso_stream_find(struct fotg210_hcd *fotg210, struct urb *urb)
+{
+ unsigned epnum;
+ struct fotg210_iso_stream *stream;
+ struct usb_host_endpoint *ep;
+ unsigned long flags;
+
+ epnum = usb_pipeendpoint(urb->pipe);
+ if (usb_pipein(urb->pipe))
+ ep = urb->dev->ep_in[epnum];
+ else
+ ep = urb->dev->ep_out[epnum];
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+ stream = ep->hcpriv;
+
+ if (unlikely(stream == NULL)) {
+ stream = iso_stream_alloc(GFP_ATOMIC);
+ if (likely(stream != NULL)) {
+ ep->hcpriv = stream;
+ stream->ep = ep;
+ iso_stream_init(fotg210, stream, urb->dev, urb->pipe,
+ urb->interval);
+ }
+
+ /* if dev->ep[epnum] is a QH, hw is set */
+ } else if (unlikely(stream->hw != NULL)) {
+ fotg210_dbg(fotg210, "dev %s ep%d%s, not iso??\n",
+ urb->dev->devpath, epnum,
+ usb_pipein(urb->pipe) ? "in" : "out");
+ stream = NULL;
+ }
+
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return stream;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* fotg210_iso_sched ops can be ITD-only or SITD-only */
+
+static struct fotg210_iso_sched *
+iso_sched_alloc(unsigned packets, gfp_t mem_flags)
+{
+ struct fotg210_iso_sched *iso_sched;
+ int size = sizeof(*iso_sched);
+
+ size += packets * sizeof(struct fotg210_iso_packet);
+ iso_sched = kzalloc(size, mem_flags);
+ if (likely(iso_sched != NULL))
+ INIT_LIST_HEAD(&iso_sched->td_list);
+
+ return iso_sched;
+}
+
+static inline void
+itd_sched_init(
+ struct fotg210_hcd *fotg210,
+ struct fotg210_iso_sched *iso_sched,
+ struct fotg210_iso_stream *stream,
+ struct urb *urb
+)
+{
+ unsigned i;
+ dma_addr_t dma = urb->transfer_dma;
+
+ /* how many uframes are needed for these transfers */
+ iso_sched->span = urb->number_of_packets * stream->interval;
+
+ /* figure out per-uframe itd fields that we'll need later
+ * when we fit new itds into the schedule.
+ */
+ for (i = 0; i < urb->number_of_packets; i++) {
+ struct fotg210_iso_packet *uframe = &iso_sched->packet[i];
+ unsigned length;
+ dma_addr_t buf;
+ u32 trans;
+
+ length = urb->iso_frame_desc[i].length;
+ buf = dma + urb->iso_frame_desc[i].offset;
+
+ trans = FOTG210_ISOC_ACTIVE;
+ trans |= buf & 0x0fff;
+ if (unlikely(((i + 1) == urb->number_of_packets))
+ && !(urb->transfer_flags & URB_NO_INTERRUPT))
+ trans |= FOTG210_ITD_IOC;
+ trans |= length << 16;
+ uframe->transaction = cpu_to_hc32(fotg210, trans);
+
+ /* might need to cross a buffer page within a uframe */
+ uframe->bufp = (buf & ~(u64)0x0fff);
+ buf += length;
+ if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
+ uframe->cross = 1;
+ }
+}
+
+static void
+iso_sched_free(
+ struct fotg210_iso_stream *stream,
+ struct fotg210_iso_sched *iso_sched
+)
+{
+ if (!iso_sched)
+ return;
+ /* caller must hold fotg210->lock!*/
+ list_splice(&iso_sched->td_list, &stream->free_list);
+ kfree(iso_sched);
+}
+
+static int
+itd_urb_transaction(
+ struct fotg210_iso_stream *stream,
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ gfp_t mem_flags
+)
+{
+ struct fotg210_itd *itd;
+ dma_addr_t itd_dma;
+ int i;
+ unsigned num_itds;
+ struct fotg210_iso_sched *sched;
+ unsigned long flags;
+
+ sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
+ if (unlikely(sched == NULL))
+ return -ENOMEM;
+
+ itd_sched_init(fotg210, sched, stream, urb);
+
+ if (urb->interval < 8)
+ num_itds = 1 + (sched->span + 7) / 8;
+ else
+ num_itds = urb->number_of_packets;
+
+ /* allocate/init ITDs */
+ spin_lock_irqsave(&fotg210->lock, flags);
+ for (i = 0; i < num_itds; i++) {
+
+ /*
+ * Use iTDs from the free list, but not iTDs that may
+ * still be in use by the hardware.
+ */
+ if (likely(!list_empty(&stream->free_list))) {
+ itd = list_first_entry(&stream->free_list,
+ struct fotg210_itd, itd_list);
+ if (itd->frame == fotg210->now_frame)
+ goto alloc_itd;
+ list_del(&itd->itd_list);
+ itd_dma = itd->itd_dma;
+ } else {
+ alloc_itd:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ itd = dma_pool_alloc(fotg210->itd_pool, mem_flags,
+ &itd_dma);
+ spin_lock_irqsave(&fotg210->lock, flags);
+ if (!itd) {
+ iso_sched_free(stream, sched);
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return -ENOMEM;
+ }
+ }
+
+ memset(itd, 0, sizeof(*itd));
+ itd->itd_dma = itd_dma;
+ list_add(&itd->itd_list, &sched->td_list);
+ }
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+
+ /* temporarily store schedule info in hcpriv */
+ urb->hcpriv = sched;
+ urb->error_count = 0;
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline int
+itd_slot_ok(
+ struct fotg210_hcd *fotg210,
+ u32 mod,
+ u32 uframe,
+ u8 usecs,
+ u32 period
+)
+{
+ uframe %= period;
+ do {
+ /* can't commit more than uframe_periodic_max usec */
+ if (periodic_usecs(fotg210, uframe >> 3, uframe & 0x7)
+ > (fotg210->uframe_periodic_max - usecs))
+ return 0;
+
+ /* we know urb->interval is 2^N uframes */
+ uframe += period;
+ } while (uframe < mod);
+ return 1;
+}
+
+/*
+ * This scheduler plans almost as far into the future as it has actual
+ * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
+ * "as small as possible" to be cache-friendlier.) That limits the size
+ * transfers you can stream reliably; avoid more than 64 msec per urb.
+ * Also avoid queue depths of less than fotg210's worst irq latency (affected
+ * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
+ * and other factors); or more than about 230 msec total (for portability,
+ * given FOTG210_TUNE_FLS and the slop). Or, write a smarter scheduler!
+ */
+
+#define SCHEDULE_SLOP 80 /* microframes */
+
+static int
+iso_stream_schedule(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ struct fotg210_iso_stream *stream
+)
+{
+ u32 now, next, start, period, span;
+ int status;
+ unsigned mod = fotg210->periodic_size << 3;
+ struct fotg210_iso_sched *sched = urb->hcpriv;
+
+ period = urb->interval;
+ span = sched->span;
+
+ if (span > mod - SCHEDULE_SLOP) {
+ fotg210_dbg(fotg210, "iso request %p too long\n", urb);
+ status = -EFBIG;
+ goto fail;
+ }
+
+ now = fotg210_read_frame_index(fotg210) & (mod - 1);
+
+ /* Typical case: reuse current schedule, stream is still active.
+ * Hopefully there are no gaps from the host falling behind
+ * (irq delays etc), but if there are we'll take the next
+ * slot in the schedule, implicitly assuming URB_ISO_ASAP.
+ */
+ if (likely(!list_empty(&stream->td_list))) {
+ u32 excess;
+
+ /* For high speed devices, allow scheduling within the
+ * isochronous scheduling threshold. For full speed devices
+ * and Intel PCI-based controllers, don't (work around for
+ * Intel ICH9 bug).
+ */
+ if (!stream->highspeed && fotg210->fs_i_thresh)
+ next = now + fotg210->i_thresh;
+ else
+ next = now;
+
+ /* Fell behind (by up to twice the slop amount)?
+ * We decide based on the time of the last currently-scheduled
+ * slot, not the time of the next available slot.
+ */
+ excess = (stream->next_uframe - period - next) & (mod - 1);
+ if (excess >= mod - 2 * SCHEDULE_SLOP)
+ start = next + excess - mod + period *
+ DIV_ROUND_UP(mod - excess, period);
+ else
+ start = next + excess + period;
+ if (start - now >= mod) {
+ fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n",
+ urb, start - now - period, period,
+ mod);
+ status = -EFBIG;
+ goto fail;
+ }
+ }
+
+ /* need to schedule; when's the next (u)frame we could start?
+ * this is bigger than fotg210->i_thresh allows; scheduling itself
+ * isn't free, the slop should handle reasonably slow cpus. it
+ * can also help high bandwidth if the dma and irq loads don't
+ * jump until after the queue is primed.
+ */
+ else {
+ int done = 0;
+ start = SCHEDULE_SLOP + (now & ~0x07);
+
+ /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
+
+ /* find a uframe slot with enough bandwidth.
+ * Early uframes are more precious because full-speed
+ * iso IN transfers can't use late uframes,
+ * and therefore they should be allocated last.
+ */
+ next = start;
+ start += period;
+ do {
+ start--;
+ /* check schedule: enough space? */
+ if (itd_slot_ok(fotg210, mod, start,
+ stream->usecs, period))
+ done = 1;
+ } while (start > next && !done);
+
+ /* no room in the schedule */
+ if (!done) {
+ fotg210_dbg(fotg210, "iso resched full %p (now %d max %d)\n",
+ urb, now, now + mod);
+ status = -ENOSPC;
+ goto fail;
+ }
+ }
+
+ /* Tried to schedule too far into the future? */
+ if (unlikely(start - now + span - period
+ >= mod - 2 * SCHEDULE_SLOP)) {
+ fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n",
+ urb, start - now, span - period,
+ mod - 2 * SCHEDULE_SLOP);
+ status = -EFBIG;
+ goto fail;
+ }
+
+ stream->next_uframe = start & (mod - 1);
+
+ /* report high speed start in uframes; full speed, in frames */
+ urb->start_frame = stream->next_uframe;
+ if (!stream->highspeed)
+ urb->start_frame >>= 3;
+
+ /* Make sure scan_isoc() sees these */
+ if (fotg210->isoc_count == 0)
+ fotg210->next_frame = now >> 3;
+ return 0;
+
+ fail:
+ iso_sched_free(stream, sched);
+ urb->hcpriv = NULL;
+ return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline void
+itd_init(struct fotg210_hcd *fotg210, struct fotg210_iso_stream *stream,
+ struct fotg210_itd *itd)
+{
+ int i;
+
+ /* it's been recently zeroed */
+ itd->hw_next = FOTG210_LIST_END(fotg210);
+ itd->hw_bufp[0] = stream->buf0;
+ itd->hw_bufp[1] = stream->buf1;
+ itd->hw_bufp[2] = stream->buf2;
+
+ for (i = 0; i < 8; i++)
+ itd->index[i] = -1;
+
+ /* All other fields are filled when scheduling */
+}
+
+static inline void
+itd_patch(
+ struct fotg210_hcd *fotg210,
+ struct fotg210_itd *itd,
+ struct fotg210_iso_sched *iso_sched,
+ unsigned index,
+ u16 uframe
+)
+{
+ struct fotg210_iso_packet *uf = &iso_sched->packet[index];
+ unsigned pg = itd->pg;
+
+ uframe &= 0x07;
+ itd->index[uframe] = index;
+
+ itd->hw_transaction[uframe] = uf->transaction;
+ itd->hw_transaction[uframe] |= cpu_to_hc32(fotg210, pg << 12);
+ itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, uf->bufp & ~(u32)0);
+ itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(uf->bufp >> 32));
+
+ /* iso_frame_desc[].offset must be strictly increasing */
+ if (unlikely(uf->cross)) {
+ u64 bufp = uf->bufp + 4096;
+
+ itd->pg = ++pg;
+ itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, bufp & ~(u32)0);
+ itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(bufp >> 32));
+ }
+}
+
+static inline void
+itd_link(struct fotg210_hcd *fotg210, unsigned frame, struct fotg210_itd *itd)
+{
+ union fotg210_shadow *prev = &fotg210->pshadow[frame];
+ __hc32 *hw_p = &fotg210->periodic[frame];
+ union fotg210_shadow here = *prev;
+ __hc32 type = 0;
+
+ /* skip any iso nodes which might belong to previous microframes */
+ while (here.ptr) {
+ type = Q_NEXT_TYPE(fotg210, *hw_p);
+ if (type == cpu_to_hc32(fotg210, Q_TYPE_QH))
+ break;
+ prev = periodic_next_shadow(fotg210, prev, type);
+ hw_p = shadow_next_periodic(fotg210, &here, type);
+ here = *prev;
+ }
+
+ itd->itd_next = here;
+ itd->hw_next = *hw_p;
+ prev->itd = itd;
+ itd->frame = frame;
+ wmb();
+ *hw_p = cpu_to_hc32(fotg210, itd->itd_dma | Q_TYPE_ITD);
+}
+
+/* fit urb's itds into the selected schedule slot; activate as needed */
+static void itd_link_urb(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ unsigned mod,
+ struct fotg210_iso_stream *stream
+)
+{
+ int packet;
+ unsigned next_uframe, uframe, frame;
+ struct fotg210_iso_sched *iso_sched = urb->hcpriv;
+ struct fotg210_itd *itd;
+
+ next_uframe = stream->next_uframe & (mod - 1);
+
+ if (unlikely(list_empty(&stream->td_list))) {
+ fotg210_to_hcd(fotg210)->self.bandwidth_allocated
+ += stream->bandwidth;
+ fotg210_dbg(fotg210,
+ "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
+ urb->dev->devpath, stream->bEndpointAddress & 0x0f,
+ (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
+ urb->interval,
+ next_uframe >> 3, next_uframe & 0x7);
+ }
+
+ /* fill iTDs uframe by uframe */
+ for (packet = 0, itd = NULL; packet < urb->number_of_packets;) {
+ if (itd == NULL) {
+ /* ASSERT: we have all necessary itds */
+
+ /* ASSERT: no itds for this endpoint in this uframe */
+
+ itd = list_entry(iso_sched->td_list.next,
+ struct fotg210_itd, itd_list);
+ list_move_tail(&itd->itd_list, &stream->td_list);
+ itd->stream = stream;
+ itd->urb = urb;
+ itd_init(fotg210, stream, itd);
+ }
+
+ uframe = next_uframe & 0x07;
+ frame = next_uframe >> 3;
+
+ itd_patch(fotg210, itd, iso_sched, packet, uframe);
+
+ next_uframe += stream->interval;
+ next_uframe &= mod - 1;
+ packet++;
+
+ /* link completed itds into the schedule */
+ if (((next_uframe >> 3) != frame)
+ || packet == urb->number_of_packets) {
+ itd_link(fotg210, frame & (fotg210->periodic_size - 1),
+ itd);
+ itd = NULL;
+ }
+ }
+ stream->next_uframe = next_uframe;
+
+ /* don't need that schedule data any more */
+ iso_sched_free(stream, iso_sched);
+ urb->hcpriv = NULL;
+
+ ++fotg210->isoc_count;
+ enable_periodic(fotg210);
+}
+
+#define ISO_ERRS (FOTG210_ISOC_BUF_ERR | FOTG210_ISOC_BABBLE |\
+ FOTG210_ISOC_XACTERR)
+
+/* Process and recycle a completed ITD. Return true iff its urb completed,
+ * and hence its completion callback probably added things to the hardware
+ * schedule.
+ *
+ * Note that we carefully avoid recycling this descriptor until after any
+ * completion callback runs, so that it won't be reused quickly. That is,
+ * assuming (a) no more than two urbs per frame on this endpoint, and also
+ * (b) only this endpoint's completions submit URBs. It seems some silicon
+ * corrupts things if you reuse completed descriptors very quickly...
+ */
+static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
+{
+ struct urb *urb = itd->urb;
+ struct usb_iso_packet_descriptor *desc;
+ u32 t;
+ unsigned uframe;
+ int urb_index = -1;
+ struct fotg210_iso_stream *stream = itd->stream;
+ struct usb_device *dev;
+ bool retval = false;
+
+ /* for each uframe with a packet */
+ for (uframe = 0; uframe < 8; uframe++) {
+ if (likely(itd->index[uframe] == -1))
+ continue;
+ urb_index = itd->index[uframe];
+ desc = &urb->iso_frame_desc[urb_index];
+
+ t = hc32_to_cpup(fotg210, &itd->hw_transaction[uframe]);
+ itd->hw_transaction[uframe] = 0;
+
+ /* report transfer status */
+ if (unlikely(t & ISO_ERRS)) {
+ urb->error_count++;
+ if (t & FOTG210_ISOC_BUF_ERR)
+ desc->status = usb_pipein(urb->pipe)
+ ? -ENOSR /* hc couldn't read */
+ : -ECOMM; /* hc couldn't write */
+ else if (t & FOTG210_ISOC_BABBLE)
+ desc->status = -EOVERFLOW;
+ else /* (t & FOTG210_ISOC_XACTERR) */
+ desc->status = -EPROTO;
+
+ /* HC need not update length with this error */
+ if (!(t & FOTG210_ISOC_BABBLE)) {
+ desc->actual_length =
+ fotg210_itdlen(urb, desc, t);
+ urb->actual_length += desc->actual_length;
+ }
+ } else if (likely((t & FOTG210_ISOC_ACTIVE) == 0)) {
+ desc->status = 0;
+ desc->actual_length = fotg210_itdlen(urb, desc, t);
+ urb->actual_length += desc->actual_length;
+ } else {
+ /* URB was too late */
+ desc->status = -EXDEV;
+ }
+ }
+
+ /* handle completion now? */
+ if (likely((urb_index + 1) != urb->number_of_packets))
+ goto done;
+
+ /* ASSERT: it's really the last itd for this urb
+ list_for_each_entry (itd, &stream->td_list, itd_list)
+ BUG_ON (itd->urb == urb);
+ */
+
+ /* give urb back to the driver; completion often (re)submits */
+ dev = urb->dev;
+ fotg210_urb_done(fotg210, urb, 0);
+ retval = true;
+ urb = NULL;
+
+ --fotg210->isoc_count;
+ disable_periodic(fotg210);
+
+ if (unlikely(list_is_singular(&stream->td_list))) {
+ fotg210_to_hcd(fotg210)->self.bandwidth_allocated
+ -= stream->bandwidth;
+ fotg210_dbg(fotg210,
+ "deschedule devp %s ep%d%s-iso\n",
+ dev->devpath, stream->bEndpointAddress & 0x0f,
+ (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
+ }
+
+done:
+ itd->urb = NULL;
+
+ /* Add to the end of the free list for later reuse */
+ list_move_tail(&itd->itd_list, &stream->free_list);
+
+ /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
+ if (list_empty(&stream->td_list)) {
+ list_splice_tail_init(&stream->free_list,
+ &fotg210->cached_itd_list);
+ start_free_itds(fotg210);
+ }
+
+ return retval;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int itd_submit(struct fotg210_hcd *fotg210, struct urb *urb,
+ gfp_t mem_flags)
+{
+ int status = -EINVAL;
+ unsigned long flags;
+ struct fotg210_iso_stream *stream;
+
+ /* Get iso_stream head */
+ stream = iso_stream_find(fotg210, urb);
+ if (unlikely(stream == NULL)) {
+ fotg210_dbg(fotg210, "can't get iso stream\n");
+ return -ENOMEM;
+ }
+ if (unlikely(urb->interval != stream->interval &&
+ fotg210_port_speed(fotg210, 0) ==
+ USB_PORT_STAT_HIGH_SPEED)) {
+ fotg210_dbg(fotg210, "can't change iso interval %d --> %d\n",
+ stream->interval, urb->interval);
+ goto done;
+ }
+
+#ifdef FOTG210_URB_TRACE
+ fotg210_dbg(fotg210,
+ "%s %s urb %p ep%d%s len %d, %d pkts %d uframes[%p]\n",
+ __func__, urb->dev->devpath, urb,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ urb->transfer_buffer_length,
+ urb->number_of_packets, urb->interval,
+ stream);
+#endif
+
+ /* allocate ITDs w/o locking anything */
+ status = itd_urb_transaction(stream, fotg210, urb, mem_flags);
+ if (unlikely(status < 0)) {
+ fotg210_dbg(fotg210, "can't init itds\n");
+ goto done;
+ }
+
+ /* schedule ... need to lock */
+ spin_lock_irqsave(&fotg210->lock, flags);
+ if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
+ status = -ESHUTDOWN;
+ goto done_not_linked;
+ }
+ status = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
+ if (unlikely(status))
+ goto done_not_linked;
+ status = iso_stream_schedule(fotg210, urb, stream);
+ if (likely(status == 0))
+ itd_link_urb(fotg210, urb, fotg210->periodic_size << 3, stream);
+ else
+ usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
+ done_not_linked:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ done:
+ return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void scan_isoc(struct fotg210_hcd *fotg210)
+{
+ unsigned uf, now_frame, frame;
+ unsigned fmask = fotg210->periodic_size - 1;
+ bool modified, live;
+
+ /*
+ * When running, scan from last scan point up to "now"
+ * else clean up by scanning everything that's left.
+ * Touches as few pages as possible: cache-friendly.
+ */
+ if (fotg210->rh_state >= FOTG210_RH_RUNNING) {
+ uf = fotg210_read_frame_index(fotg210);
+ now_frame = (uf >> 3) & fmask;
+ live = true;
+ } else {
+ now_frame = (fotg210->next_frame - 1) & fmask;
+ live = false;
+ }
+ fotg210->now_frame = now_frame;
+
+ frame = fotg210->next_frame;
+ for (;;) {
+ union fotg210_shadow q, *q_p;
+ __hc32 type, *hw_p;
+
+restart:
+ /* scan each element in frame's queue for completions */
+ q_p = &fotg210->pshadow[frame];
+ hw_p = &fotg210->periodic[frame];
+ q.ptr = q_p->ptr;
+ type = Q_NEXT_TYPE(fotg210, *hw_p);
+ modified = false;
+
+ while (q.ptr != NULL) {
+ switch (hc32_to_cpu(fotg210, type)) {
+ case Q_TYPE_ITD:
+ /* If this ITD is still active, leave it for
+ * later processing ... check the next entry.
+ * No need to check for activity unless the
+ * frame is current.
+ */
+ if (frame == now_frame && live) {
+ rmb();
+ for (uf = 0; uf < 8; uf++) {
+ if (q.itd->hw_transaction[uf] &
+ ITD_ACTIVE(fotg210))
+ break;
+ }
+ if (uf < 8) {
+ q_p = &q.itd->itd_next;
+ hw_p = &q.itd->hw_next;
+ type = Q_NEXT_TYPE(fotg210,
+ q.itd->hw_next);
+ q = *q_p;
+ break;
+ }
+ }
+
+ /* Take finished ITDs out of the schedule
+ * and process them: recycle, maybe report
+ * URB completion. HC won't cache the
+ * pointer for much longer, if at all.
+ */
+ *q_p = q.itd->itd_next;
+ *hw_p = q.itd->hw_next;
+ type = Q_NEXT_TYPE(fotg210, q.itd->hw_next);
+ wmb();
+ modified = itd_complete(fotg210, q.itd);
+ q = *q_p;
+ break;
+ default:
+ fotg210_dbg(fotg210, "corrupt type %d frame %d shadow %p\n",
+ type, frame, q.ptr);
+ /* FALL THROUGH */
+ case Q_TYPE_QH:
+ case Q_TYPE_FSTN:
+ /* End of the iTDs and siTDs */
+ q.ptr = NULL;
+ break;
+ }
+
+ /* assume completion callbacks modify the queue */
+ if (unlikely(modified && fotg210->isoc_count > 0))
+ goto restart;
+ }
+
+ /* Stop when we have reached the current frame */
+ if (frame == now_frame)
+ break;
+ frame = (frame + 1) & fmask;
+ }
+ fotg210->next_frame = now_frame;
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * Display / Set uframe_periodic_max
+ */
+static ssize_t show_uframe_periodic_max(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fotg210_hcd *fotg210;
+ int n;
+
+ fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
+ n = scnprintf(buf, PAGE_SIZE, "%d\n", fotg210->uframe_periodic_max);
+ return n;
+}
+
+
+static ssize_t store_uframe_periodic_max(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fotg210_hcd *fotg210;
+ unsigned uframe_periodic_max;
+ unsigned frame, uframe;
+ unsigned short allocated_max;
+ unsigned long flags;
+ ssize_t ret;
+
+ fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
+ if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
+ return -EINVAL;
+
+ if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
+ fotg210_info(fotg210, "rejecting invalid request for uframe_periodic_max=%u\n",
+ uframe_periodic_max);
+ return -EINVAL;
+ }
+
+ ret = -EINVAL;
+
+ /*
+ * lock, so that our checking does not race with possible periodic
+ * bandwidth allocation through submitting new urbs.
+ */
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ /*
+ * for request to decrease max periodic bandwidth, we have to check
+ * every microframe in the schedule to see whether the decrease is
+ * possible.
+ */
+ if (uframe_periodic_max < fotg210->uframe_periodic_max) {
+ allocated_max = 0;
+
+ for (frame = 0; frame < fotg210->periodic_size; ++frame)
+ for (uframe = 0; uframe < 7; ++uframe)
+ allocated_max = max(allocated_max,
+ periodic_usecs(fotg210, frame, uframe));
+
+ if (allocated_max > uframe_periodic_max) {
+ fotg210_info(fotg210,
+ "cannot decrease uframe_periodic_max becase "
+ "periodic bandwidth is already allocated "
+ "(%u > %u)\n",
+ allocated_max, uframe_periodic_max);
+ goto out_unlock;
+ }
+ }
+
+ /* increasing is always ok */
+
+ fotg210_info(fotg210, "setting max periodic bandwidth to %u%% (== %u usec/uframe)\n",
+ 100 * uframe_periodic_max/125, uframe_periodic_max);
+
+ if (uframe_periodic_max != 100)
+ fotg210_warn(fotg210, "max periodic bandwidth set is non-standard\n");
+
+ fotg210->uframe_periodic_max = uframe_periodic_max;
+ ret = count;
+
+out_unlock:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return ret;
+}
+
+static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max,
+ store_uframe_periodic_max);
+
+static inline int create_sysfs_files(struct fotg210_hcd *fotg210)
+{
+ struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
+ int i = 0;
+
+ if (i)
+ goto out;
+
+ i = device_create_file(controller, &dev_attr_uframe_periodic_max);
+out:
+ return i;
+}
+
+static inline void remove_sysfs_files(struct fotg210_hcd *fotg210)
+{
+ struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
+
+ device_remove_file(controller, &dev_attr_uframe_periodic_max);
+}
+/*-------------------------------------------------------------------------*/
+
+/* On some systems, leaving remote wakeup enabled prevents system shutdown.
+ * The firmware seems to think that powering off is a wakeup event!
+ * This routine turns off remote wakeup and everything else, on all ports.
+ */
+static void fotg210_turn_off_all_ports(struct fotg210_hcd *fotg210)
+{
+ u32 __iomem *status_reg = &fotg210->regs->port_status;
+
+ fotg210_writel(fotg210, PORT_RWC_BITS, status_reg);
+}
+
+/*
+ * Halt HC, turn off all ports, and let the BIOS use the companion controllers.
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static void fotg210_silence_controller(struct fotg210_hcd *fotg210)
+{
+ fotg210_halt(fotg210);
+
+ spin_lock_irq(&fotg210->lock);
+ fotg210->rh_state = FOTG210_RH_HALTED;
+ fotg210_turn_off_all_ports(fotg210);
+ spin_unlock_irq(&fotg210->lock);
+}
+
+/* fotg210_shutdown kick in for silicon on any bus (not just pci, etc).
+ * This forcibly disables dma and IRQs, helping kexec and other cases
+ * where the next system software may expect clean state.
+ */
+static void fotg210_shutdown(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+
+ spin_lock_irq(&fotg210->lock);
+ fotg210->shutdown = true;
+ fotg210->rh_state = FOTG210_RH_STOPPING;
+ fotg210->enabled_hrtimer_events = 0;
+ spin_unlock_irq(&fotg210->lock);
+
+ fotg210_silence_controller(fotg210);
+
+ hrtimer_cancel(&fotg210->hrtimer);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * fotg210_work is called from some interrupts, timers, and so on.
+ * it calls driver completion functions, after dropping fotg210->lock.
+ */
+static void fotg210_work(struct fotg210_hcd *fotg210)
+{
+ /* another CPU may drop fotg210->lock during a schedule scan while
+ * it reports urb completions. this flag guards against bogus
+ * attempts at re-entrant schedule scanning.
+ */
+ if (fotg210->scanning) {
+ fotg210->need_rescan = true;
+ return;
+ }
+ fotg210->scanning = true;
+
+ rescan:
+ fotg210->need_rescan = false;
+ if (fotg210->async_count)
+ scan_async(fotg210);
+ if (fotg210->intr_count > 0)
+ scan_intr(fotg210);
+ if (fotg210->isoc_count > 0)
+ scan_isoc(fotg210);
+ if (fotg210->need_rescan)
+ goto rescan;
+ fotg210->scanning = false;
+
+ /* the IO watchdog guards against hardware or driver bugs that
+ * misplace IRQs, and should let us run completely without IRQs.
+ * such lossage has been observed on both VT6202 and VT8235.
+ */
+ turn_on_io_watchdog(fotg210);
+}
+
+/*
+ * Called when the fotg210_hcd module is removed.
+ */
+static void fotg210_stop(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+
+ fotg210_dbg(fotg210, "stop\n");
+
+ /* no more interrupts ... */
+
+ spin_lock_irq(&fotg210->lock);
+ fotg210->enabled_hrtimer_events = 0;
+ spin_unlock_irq(&fotg210->lock);
+
+ fotg210_quiesce(fotg210);
+ fotg210_silence_controller(fotg210);
+ fotg210_reset(fotg210);
+
+ hrtimer_cancel(&fotg210->hrtimer);
+ remove_sysfs_files(fotg210);
+ remove_debug_files(fotg210);
+
+ /* root hub is shut down separately (first, when possible) */
+ spin_lock_irq(&fotg210->lock);
+ end_free_itds(fotg210);
+ spin_unlock_irq(&fotg210->lock);
+ fotg210_mem_cleanup(fotg210);
+
+#ifdef FOTG210_STATS
+ fotg210_dbg(fotg210, "irq normal %ld err %ld iaa %ld (lost %ld)\n",
+ fotg210->stats.normal, fotg210->stats.error, fotg210->stats.iaa,
+ fotg210->stats.lost_iaa);
+ fotg210_dbg(fotg210, "complete %ld unlink %ld\n",
+ fotg210->stats.complete, fotg210->stats.unlink);
+#endif
+
+ dbg_status(fotg210, "fotg210_stop completed",
+ fotg210_readl(fotg210, &fotg210->regs->status));
+}
+
+/* one-time init, only for memory state */
+static int hcd_fotg210_init(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ u32 temp;
+ int retval;
+ u32 hcc_params;
+ struct fotg210_qh_hw *hw;
+
+ spin_lock_init(&fotg210->lock);
+
+ /*
+ * keep io watchdog by default, those good HCDs could turn off it later
+ */
+ fotg210->need_io_watchdog = 1;
+
+ hrtimer_init(&fotg210->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ fotg210->hrtimer.function = fotg210_hrtimer_func;
+ fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT;
+
+ hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
+
+ /*
+ * by default set standard 80% (== 100 usec/uframe) max periodic
+ * bandwidth as required by USB 2.0
+ */
+ fotg210->uframe_periodic_max = 100;
+
+ /*
+ * hw default: 1K periodic list heads, one per frame.
+ * periodic_size can shrink by USBCMD update if hcc_params allows.
+ */
+ fotg210->periodic_size = DEFAULT_I_TDPS;
+ INIT_LIST_HEAD(&fotg210->intr_qh_list);
+ INIT_LIST_HEAD(&fotg210->cached_itd_list);
+
+ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+ /* periodic schedule size can be smaller than default */
+ switch (FOTG210_TUNE_FLS) {
+ case 0:
+ fotg210->periodic_size = 1024;
+ break;
+ case 1:
+ fotg210->periodic_size = 512;
+ break;
+ case 2:
+ fotg210->periodic_size = 256;
+ break;
+ default:
+ BUG();
+ }
+ }
+ retval = fotg210_mem_init(fotg210, GFP_KERNEL);
+ if (retval < 0)
+ return retval;
+
+ /* controllers may cache some of the periodic schedule ... */
+ fotg210->i_thresh = 2;
+
+ /*
+ * dedicate a qh for the async ring head, since we couldn't unlink
+ * a 'real' qh without stopping the async schedule [4.8]. use it
+ * as the 'reclamation list head' too.
+ * its dummy is used in hw_alt_next of many tds, to prevent the qh
+ * from automatically advancing to the next td after short reads.
+ */
+ fotg210->async->qh_next.qh = NULL;
+ hw = fotg210->async->hw;
+ hw->hw_next = QH_NEXT(fotg210, fotg210->async->qh_dma);
+ hw->hw_info1 = cpu_to_hc32(fotg210, QH_HEAD);
+ hw->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT);
+ hw->hw_qtd_next = FOTG210_LIST_END(fotg210);
+ fotg210->async->qh_state = QH_STATE_LINKED;
+ hw->hw_alt_next = QTD_NEXT(fotg210, fotg210->async->dummy->qtd_dma);
+
+ /* clear interrupt enables, set irq latency */
+ if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
+ log2_irq_thresh = 0;
+ temp = 1 << (16 + log2_irq_thresh);
+ if (HCC_CANPARK(hcc_params)) {
+ /* HW default park == 3, on hardware that supports it (like
+ * NVidia and ALI silicon), maximizes throughput on the async
+ * schedule by avoiding QH fetches between transfers.
+ *
+ * With fast usb storage devices and NForce2, "park" seems to
+ * make problems: throughput reduction (!), data errors...
+ */
+ if (park) {
+ park = min_t(unsigned, park, 3);
+ temp |= CMD_PARK;
+ temp |= park << 8;
+ }
+ fotg210_dbg(fotg210, "park %d\n", park);
+ }
+ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+ /* periodic schedule size can be smaller than default */
+ temp &= ~(3 << 2);
+ temp |= (FOTG210_TUNE_FLS << 2);
+ }
+ fotg210->command = temp;
+
+ /* Accept arbitrarily long scatter-gather lists */
+ if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+ hcd->self.sg_tablesize = ~0;
+ return 0;
+}
+
+/* start HC running; it's halted, hcd_fotg210_init() has been run (once) */
+static int fotg210_run(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ u32 temp;
+ u32 hcc_params;
+
+ hcd->uses_new_polling = 1;
+
+ /* EHCI spec section 4.1 */
+
+ fotg210_writel(fotg210, fotg210->periodic_dma,
+ &fotg210->regs->frame_list);
+ fotg210_writel(fotg210, (u32)fotg210->async->qh_dma,
+ &fotg210->regs->async_next);
+
+ /*
+ * hcc_params controls whether fotg210->regs->segment must (!!!)
+ * be used; it constrains QH/ITD/SITD and QTD locations.
+ * pci_pool consistent memory always uses segment zero.
+ * streaming mappings for I/O buffers, like pci_map_single(),
+ * can return segments above 4GB, if the device allows.
+ *
+ * NOTE: the dma mask is visible through dma_supported(), so
+ * drivers can pass this info along ... like NETIF_F_HIGHDMA,
+ * Scsi_Host.highmem_io, and so forth. It's readonly to all
+ * host side drivers though.
+ */
+ hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
+
+ /*
+ * Philips, Intel, and maybe others need CMD_RUN before the
+ * root hub will detect new devices (why?); NEC doesn't
+ */
+ fotg210->command &= ~(CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
+ fotg210->command |= CMD_RUN;
+ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
+ dbg_cmd(fotg210, "init", fotg210->command);
+
+ /*
+ * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
+ * are explicitly handed to companion controller(s), so no TT is
+ * involved with the root hub. (Except where one is integrated,
+ * and there's no companion controller unless maybe for USB OTG.)
+ *
+ * Turning on the CF flag will transfer ownership of all ports
+ * from the companions to the EHCI controller. If any of the
+ * companions are in the middle of a port reset at the time, it
+ * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
+ * guarantees that no resets are in progress. After we set CF,
+ * a short delay lets the hardware catch up; new resets shouldn't
+ * be started before the port switching actions could complete.
+ */
+ down_write(&ehci_cf_port_reset_rwsem);
+ fotg210->rh_state = FOTG210_RH_RUNNING;
+ /* unblock posted writes */
+ fotg210_readl(fotg210, &fotg210->regs->command);
+ msleep(5);
+ up_write(&ehci_cf_port_reset_rwsem);
+ fotg210->last_periodic_enable = ktime_get_real();
+
+ temp = HC_VERSION(fotg210,
+ fotg210_readl(fotg210, &fotg210->caps->hc_capbase));
+ fotg210_info(fotg210,
+ "USB %x.%x started, EHCI %x.%02x\n",
+ ((fotg210->sbrn & 0xf0)>>4), (fotg210->sbrn & 0x0f),
+ temp >> 8, temp & 0xff);
+
+ fotg210_writel(fotg210, INTR_MASK,
+ &fotg210->regs->intr_enable); /* Turn On Interrupts */
+
+ /* GRR this is run-once init(), being done every time the HC starts.
+ * So long as they're part of class devices, we can't do it init()
+ * since the class device isn't created that early.
+ */
+ create_debug_files(fotg210);
+ create_sysfs_files(fotg210);
+
+ return 0;
+}
+
+static int fotg210_setup(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ int retval;
+
+ fotg210->regs = (void __iomem *)fotg210->caps +
+ HC_LENGTH(fotg210,
+ fotg210_readl(fotg210, &fotg210->caps->hc_capbase));
+ dbg_hcs_params(fotg210, "reset");
+ dbg_hcc_params(fotg210, "reset");
+
+ /* cache this readonly data; minimize chip reads */
+ fotg210->hcs_params = fotg210_readl(fotg210,
+ &fotg210->caps->hcs_params);
+
+ fotg210->sbrn = HCD_USB2;
+
+ /* data structure init */
+ retval = hcd_fotg210_init(hcd);
+ if (retval)
+ return retval;
+
+ retval = fotg210_halt(fotg210);
+ if (retval)
+ return retval;
+
+ fotg210_reset(fotg210);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static irqreturn_t fotg210_irq(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ u32 status, masked_status, pcd_status = 0, cmd;
+ int bh;
+
+ spin_lock(&fotg210->lock);
+
+ status = fotg210_readl(fotg210, &fotg210->regs->status);
+
+ /* e.g. cardbus physical eject */
+ if (status == ~(u32) 0) {
+ fotg210_dbg(fotg210, "device removed\n");
+ goto dead;
+ }
+
+ /*
+ * We don't use STS_FLR, but some controllers don't like it to
+ * remain on, so mask it out along with the other status bits.
+ */
+ masked_status = status & (INTR_MASK | STS_FLR);
+
+ /* Shared IRQ? */
+ if (!masked_status ||
+ unlikely(fotg210->rh_state == FOTG210_RH_HALTED)) {
+ spin_unlock(&fotg210->lock);
+ return IRQ_NONE;
+ }
+
+ /* clear (just) interrupts */
+ fotg210_writel(fotg210, masked_status, &fotg210->regs->status);
+ cmd = fotg210_readl(fotg210, &fotg210->regs->command);
+ bh = 0;
+
+ /* unrequested/ignored: Frame List Rollover */
+ dbg_status(fotg210, "irq", status);
+
+ /* INT, ERR, and IAA interrupt rates can be throttled */
+
+ /* normal [4.15.1.2] or error [4.15.1.1] completion */
+ if (likely((status & (STS_INT|STS_ERR)) != 0)) {
+ if (likely((status & STS_ERR) == 0))
+ COUNT(fotg210->stats.normal);
+ else
+ COUNT(fotg210->stats.error);
+ bh = 1;
+ }
+
+ /* complete the unlinking of some qh [4.15.2.3] */
+ if (status & STS_IAA) {
+
+ /* Turn off the IAA watchdog */
+ fotg210->enabled_hrtimer_events &=
+ ~BIT(FOTG210_HRTIMER_IAA_WATCHDOG);
+
+ /*
+ * Mild optimization: Allow another IAAD to reset the
+ * hrtimer, if one occurs before the next expiration.
+ * In theory we could always cancel the hrtimer, but
+ * tests show that about half the time it will be reset
+ * for some other event anyway.
+ */
+ if (fotg210->next_hrtimer_event == FOTG210_HRTIMER_IAA_WATCHDOG)
+ ++fotg210->next_hrtimer_event;
+
+ /* guard against (alleged) silicon errata */
+ if (cmd & CMD_IAAD)
+ fotg210_dbg(fotg210, "IAA with IAAD still set?\n");
+ if (fotg210->async_iaa) {
+ COUNT(fotg210->stats.iaa);
+ end_unlink_async(fotg210);
+ } else
+ fotg210_dbg(fotg210, "IAA with nothing unlinked?\n");
+ }
+
+ /* remote wakeup [4.3.1] */
+ if (status & STS_PCD) {
+ int pstatus;
+ u32 __iomem *status_reg = &fotg210->regs->port_status;
+
+ /* kick root hub later */
+ pcd_status = status;
+
+ /* resume root hub? */
+ if (fotg210->rh_state == FOTG210_RH_SUSPENDED)
+ usb_hcd_resume_root_hub(hcd);
+
+ pstatus = fotg210_readl(fotg210, status_reg);
+
+ if (test_bit(0, &fotg210->suspended_ports) &&
+ ((pstatus & PORT_RESUME) ||
+ !(pstatus & PORT_SUSPEND)) &&
+ (pstatus & PORT_PE) &&
+ fotg210->reset_done[0] == 0) {
+
+ /* start 20 msec resume signaling from this port,
+ * and make khubd collect PORT_STAT_C_SUSPEND to
+ * stop that signaling. Use 5 ms extra for safety,
+ * like usb_port_resume() does.
+ */
+ fotg210->reset_done[0] = jiffies + msecs_to_jiffies(25);
+ set_bit(0, &fotg210->resuming_ports);
+ fotg210_dbg(fotg210, "port 1 remote wakeup\n");
+ mod_timer(&hcd->rh_timer, fotg210->reset_done[0]);
+ }
+ }
+
+ /* PCI errors [4.15.2.4] */
+ if (unlikely((status & STS_FATAL) != 0)) {
+ fotg210_err(fotg210, "fatal error\n");
+ dbg_cmd(fotg210, "fatal", cmd);
+ dbg_status(fotg210, "fatal", status);
+dead:
+ usb_hc_died(hcd);
+
+ /* Don't let the controller do anything more */
+ fotg210->shutdown = true;
+ fotg210->rh_state = FOTG210_RH_STOPPING;
+ fotg210->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
+ fotg210_writel(fotg210, fotg210->command,
+ &fotg210->regs->command);
+ fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
+ fotg210_handle_controller_death(fotg210);
+
+ /* Handle completions when the controller stops */
+ bh = 0;
+ }
+
+ if (bh)
+ fotg210_work(fotg210);
+ spin_unlock(&fotg210->lock);
+ if (pcd_status)
+ usb_hcd_poll_rh_status(hcd);
+ return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * non-error returns are a promise to giveback() the urb later
+ * we drop ownership so next owner (or urb unlink) can get it
+ *
+ * urb + dev is in hcd.self.controller.urb_list
+ * we're queueing TDs onto software and hardware lists
+ *
+ * hcd-specific init for hcpriv hasn't been done yet
+ *
+ * NOTE: control, bulk, and interrupt share the same code to append TDs
+ * to a (possibly active) QH, and the same QH scanning code.
+ */
+static int fotg210_urb_enqueue(
+ struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags
+) {
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct list_head qtd_list;
+
+ INIT_LIST_HEAD(&qtd_list);
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ /* qh_completions() code doesn't handle all the fault cases
+ * in multi-TD control transfers. Even 1KB is rare anyway.
+ */
+ if (urb->transfer_buffer_length > (16 * 1024))
+ return -EMSGSIZE;
+ /* FALLTHROUGH */
+ /* case PIPE_BULK: */
+ default:
+ if (!qh_urb_transaction(fotg210, urb, &qtd_list, mem_flags))
+ return -ENOMEM;
+ return submit_async(fotg210, urb, &qtd_list, mem_flags);
+
+ case PIPE_INTERRUPT:
+ if (!qh_urb_transaction(fotg210, urb, &qtd_list, mem_flags))
+ return -ENOMEM;
+ return intr_submit(fotg210, urb, &qtd_list, mem_flags);
+
+ case PIPE_ISOCHRONOUS:
+ return itd_submit(fotg210, urb, mem_flags);
+ }
+}
+
+/* remove from hardware lists
+ * completions normally happen asynchronously
+ */
+
+static int fotg210_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct fotg210_qh *qh;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc)
+ goto done;
+
+ switch (usb_pipetype(urb->pipe)) {
+ /* case PIPE_CONTROL: */
+ /* case PIPE_BULK:*/
+ default:
+ qh = (struct fotg210_qh *) urb->hcpriv;
+ if (!qh)
+ break;
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ case QH_STATE_COMPLETING:
+ start_unlink_async(fotg210, qh);
+ break;
+ case QH_STATE_UNLINK:
+ case QH_STATE_UNLINK_WAIT:
+ /* already started */
+ break;
+ case QH_STATE_IDLE:
+ /* QH might be waiting for a Clear-TT-Buffer */
+ qh_completions(fotg210, qh);
+ break;
+ }
+ break;
+
+ case PIPE_INTERRUPT:
+ qh = (struct fotg210_qh *) urb->hcpriv;
+ if (!qh)
+ break;
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ case QH_STATE_COMPLETING:
+ start_unlink_intr(fotg210, qh);
+ break;
+ case QH_STATE_IDLE:
+ qh_completions(fotg210, qh);
+ break;
+ default:
+ fotg210_dbg(fotg210, "bogus qh %p state %d\n",
+ qh, qh->qh_state);
+ goto done;
+ }
+ break;
+
+ case PIPE_ISOCHRONOUS:
+ /* itd... */
+
+ /* wait till next completion, do it then. */
+ /* completion irqs can wait up to 1024 msec, */
+ break;
+ }
+done:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* bulk qh holds the data toggle */
+
+static void
+fotg210_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ unsigned long flags;
+ struct fotg210_qh *qh, *tmp;
+
+ /* ASSERT: any requests/urbs are being unlinked */
+ /* ASSERT: nobody can be submitting urbs for this any more */
+
+rescan:
+ spin_lock_irqsave(&fotg210->lock, flags);
+ qh = ep->hcpriv;
+ if (!qh)
+ goto done;
+
+ /* endpoints can be iso streams. for now, we don't
+ * accelerate iso completions ... so spin a while.
+ */
+ if (qh->hw == NULL) {
+ struct fotg210_iso_stream *stream = ep->hcpriv;
+
+ if (!list_empty(&stream->td_list))
+ goto idle_timeout;
+
+ /* BUG_ON(!list_empty(&stream->free_list)); */
+ kfree(stream);
+ goto done;
+ }
+
+ if (fotg210->rh_state < FOTG210_RH_RUNNING)
+ qh->qh_state = QH_STATE_IDLE;
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ case QH_STATE_COMPLETING:
+ for (tmp = fotg210->async->qh_next.qh;
+ tmp && tmp != qh;
+ tmp = tmp->qh_next.qh)
+ continue;
+ /* periodic qh self-unlinks on empty, and a COMPLETING qh
+ * may already be unlinked.
+ */
+ if (tmp)
+ start_unlink_async(fotg210, qh);
+ /* FALL THROUGH */
+ case QH_STATE_UNLINK: /* wait for hw to finish? */
+ case QH_STATE_UNLINK_WAIT:
+idle_timeout:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ schedule_timeout_uninterruptible(1);
+ goto rescan;
+ case QH_STATE_IDLE: /* fully unlinked */
+ if (qh->clearing_tt)
+ goto idle_timeout;
+ if (list_empty(&qh->qtd_list)) {
+ qh_destroy(fotg210, qh);
+ break;
+ }
+ /* else FALL THROUGH */
+ default:
+ /* caller was supposed to have unlinked any requests;
+ * that's not our job. just leak this memory.
+ */
+ fotg210_err(fotg210, "qh %p (#%02x) state %d%s\n",
+ qh, ep->desc.bEndpointAddress, qh->qh_state,
+ list_empty(&qh->qtd_list) ? "" : "(has tds)");
+ break;
+ }
+ done:
+ ep->hcpriv = NULL;
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+}
+
+static void
+fotg210_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct fotg210_qh *qh;
+ int eptype = usb_endpoint_type(&ep->desc);
+ int epnum = usb_endpoint_num(&ep->desc);
+ int is_out = usb_endpoint_dir_out(&ep->desc);
+ unsigned long flags;
+
+ if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
+ return;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+ qh = ep->hcpriv;
+
+ /* For Bulk and Interrupt endpoints we maintain the toggle state
+ * in the hardware; the toggle bits in udev aren't used at all.
+ * When an endpoint is reset by usb_clear_halt() we must reset
+ * the toggle bit in the QH.
+ */
+ if (qh) {
+ usb_settoggle(qh->dev, epnum, is_out, 0);
+ if (!list_empty(&qh->qtd_list)) {
+ WARN_ONCE(1, "clear_halt for a busy endpoint\n");
+ } else if (qh->qh_state == QH_STATE_LINKED ||
+ qh->qh_state == QH_STATE_COMPLETING) {
+
+ /* The toggle value in the QH can't be updated
+ * while the QH is active. Unlink it now;
+ * re-linking will call qh_refresh().
+ */
+ if (eptype == USB_ENDPOINT_XFER_BULK)
+ start_unlink_async(fotg210, qh);
+ else
+ start_unlink_intr(fotg210, qh);
+ }
+ }
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+}
+
+static int fotg210_get_frame(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ return (fotg210_read_frame_index(fotg210) >> 3) %
+ fotg210->periodic_size;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * The EHCI in ChipIdea HDRC cannot be a separate module or device,
+ * because its registers (and irq) are shared between host/gadget/otg
+ * functions and in order to facilitate role switching we cannot
+ * give the fotg210 driver exclusive access to those.
+ */
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+
+static const struct hc_driver fotg210_fotg210_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Faraday USB2.0 Host Controller",
+ .hcd_priv_size = sizeof(struct fotg210_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = fotg210_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = hcd_fotg210_init,
+ .start = fotg210_run,
+ .stop = fotg210_stop,
+ .shutdown = fotg210_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = fotg210_urb_enqueue,
+ .urb_dequeue = fotg210_urb_dequeue,
+ .endpoint_disable = fotg210_endpoint_disable,
+ .endpoint_reset = fotg210_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = fotg210_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = fotg210_hub_status_data,
+ .hub_control = fotg210_hub_control,
+ .bus_suspend = fotg210_bus_suspend,
+ .bus_resume = fotg210_bus_resume,
+
+ .relinquish_port = fotg210_relinquish_port,
+ .port_handed_over = fotg210_port_handed_over,
+
+ .clear_tt_buffer_complete = fotg210_clear_tt_buffer_complete,
+};
+
+static void fotg210_init(struct fotg210_hcd *fotg210)
+{
+ u32 value;
+
+ iowrite32(GMIR_MDEV_INT | GMIR_MOTG_INT | GMIR_INT_POLARITY,
+ &fotg210->regs->gmir);
+
+ value = ioread32(&fotg210->regs->otgcsr);
+ value &= ~OTGCSR_A_BUS_DROP;
+ value |= OTGCSR_A_BUS_REQ;
+ iowrite32(value, &fotg210->regs->otgcsr);
+}
+
+/**
+ * fotg210_hcd_probe - initialize faraday FOTG210 HCDs
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int fotg210_hcd_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd;
+ struct resource *res;
+ int irq;
+ int retval = -ENODEV;
+ struct fotg210_hcd *fotg210;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ pdev->dev.power.power_state = PMSG_ON;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev,
+ "Found HC with no IRQ. Check %s setup!\n",
+ dev_name(dev));
+ return -ENODEV;
+ }
+
+ irq = res->start;
+
+ hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
+ dev_name(dev));
+ if (!hcd) {
+ dev_err(dev, "failed to create hcd with err %d\n", retval);
+ retval = -ENOMEM;
+ goto fail_create_hcd;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev,
+ "Found HC with no register addr. Check %s setup!\n",
+ dev_name(dev));
+ retval = -ENODEV;
+ goto fail_request_resource;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->has_tt = 1;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
+ fotg210_fotg210_hc_driver.description)) {
+ dev_dbg(dev, "controller already in use\n");
+ retval = -EBUSY;
+ goto fail_request_resource;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(dev,
+ "Found HC with no register addr. Check %s setup!\n",
+ dev_name(dev));
+ retval = -ENODEV;
+ goto fail_request_resource;
+ }
+
+ hcd->regs = ioremap_nocache(res->start, resource_size(res));
+ if (hcd->regs == NULL) {
+ dev_dbg(dev, "error mapping memory\n");
+ retval = -EFAULT;
+ goto fail_ioremap;
+ }
+
+ fotg210 = hcd_to_fotg210(hcd);
+
+ fotg210->caps = hcd->regs;
+
+ retval = fotg210_setup(hcd);
+ if (retval)
+ goto fail_add_hcd;
+
+ fotg210_init(fotg210);
+
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (retval) {
+ dev_err(dev, "failed to add hcd with err %d\n", retval);
+ goto fail_add_hcd;
+ }
+ device_wakeup_enable(hcd->self.controller);
+
+ return retval;
+
+fail_add_hcd:
+ iounmap(hcd->regs);
+fail_ioremap:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+fail_request_resource:
+ usb_put_hcd(hcd);
+fail_create_hcd:
+ dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval);
+ return retval;
+}
+
+/**
+ * fotg210_hcd_remove - shutdown processing for EHCI HCDs
+ * @dev: USB Host Controller being removed
+ *
+ */
+static int fotg210_hcd_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ if (!hcd)
+ return 0;
+
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static struct platform_driver fotg210_hcd_driver = {
+ .driver = {
+ .name = "fotg210-hcd",
+ },
+ .probe = fotg210_hcd_probe,
+ .remove = fotg210_hcd_remove,
+};
+
+static int __init fotg210_hcd_init(void)
+{
+ int retval = 0;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+ set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+ if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
+ test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
+ pr_warn(KERN_WARNING "Warning! fotg210_hcd should always be loaded before uhci_hcd and ohci_hcd, not after\n");
+
+ pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd\n",
+ hcd_name,
+ sizeof(struct fotg210_qh), sizeof(struct fotg210_qtd),
+ sizeof(struct fotg210_itd));
+
+ fotg210_debug_root = debugfs_create_dir("fotg210", usb_debug_root);
+ if (!fotg210_debug_root) {
+ retval = -ENOENT;
+ goto err_debug;
+ }
+
+ retval = platform_driver_register(&fotg210_hcd_driver);
+ if (retval < 0)
+ goto clean;
+ return retval;
+
+ platform_driver_unregister(&fotg210_hcd_driver);
+clean:
+ debugfs_remove(fotg210_debug_root);
+ fotg210_debug_root = NULL;
+err_debug:
+ clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+ return retval;
+}
+module_init(fotg210_hcd_init);
+
+static void __exit fotg210_hcd_cleanup(void)
+{
+ platform_driver_unregister(&fotg210_hcd_driver);
+ debugfs_remove(fotg210_debug_root);
+ clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+}
+module_exit(fotg210_hcd_cleanup);
diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
new file mode 100644
index 00000000000..ac6cd1bfd20
--- /dev/null
+++ b/drivers/usb/host/fotg210.h
@@ -0,0 +1,742 @@
+#ifndef __LINUX_FOTG210_H
+#define __LINUX_FOTG210_H
+
+/* definitions used for the EHCI driver */
+
+/*
+ * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
+ * __leXX (normally) or __beXX (given FOTG210_BIG_ENDIAN_DESC), depending on
+ * the host controller implementation.
+ *
+ * To facilitate the strongest possible byte-order checking from "sparse"
+ * and so on, we use __leXX unless that's not practical.
+ */
+#define __hc32 __le32
+#define __hc16 __le16
+
+/* statistics can be kept for tuning/monitoring */
+struct fotg210_stats {
+ /* irq usage */
+ unsigned long normal;
+ unsigned long error;
+ unsigned long iaa;
+ unsigned long lost_iaa;
+
+ /* termination of urbs from core */
+ unsigned long complete;
+ unsigned long unlink;
+};
+
+/* fotg210_hcd->lock guards shared data against other CPUs:
+ * fotg210_hcd: async, unlink, periodic (and shadow), ...
+ * usb_host_endpoint: hcpriv
+ * fotg210_qh: qh_next, qtd_list
+ * fotg210_qtd: qtd_list
+ *
+ * Also, hold this lock when talking to HC registers or
+ * when updating hw_* fields in shared qh/qtd/... structures.
+ */
+
+#define FOTG210_MAX_ROOT_PORTS 1 /* see HCS_N_PORTS */
+
+/*
+ * fotg210_rh_state values of FOTG210_RH_RUNNING or above mean that the
+ * controller may be doing DMA. Lower values mean there's no DMA.
+ */
+enum fotg210_rh_state {
+ FOTG210_RH_HALTED,
+ FOTG210_RH_SUSPENDED,
+ FOTG210_RH_RUNNING,
+ FOTG210_RH_STOPPING
+};
+
+/*
+ * Timer events, ordered by increasing delay length.
+ * Always update event_delays_ns[] and event_handlers[] (defined in
+ * ehci-timer.c) in parallel with this list.
+ */
+enum fotg210_hrtimer_event {
+ FOTG210_HRTIMER_POLL_ASS, /* Poll for async schedule off */
+ FOTG210_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */
+ FOTG210_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */
+ FOTG210_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */
+ FOTG210_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */
+ FOTG210_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */
+ FOTG210_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */
+ FOTG210_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */
+ FOTG210_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */
+ FOTG210_HRTIMER_IO_WATCHDOG, /* Check for missing IRQs */
+ FOTG210_HRTIMER_NUM_EVENTS /* Must come last */
+};
+#define FOTG210_HRTIMER_NO_EVENT 99
+
+struct fotg210_hcd { /* one per controller */
+ /* timing support */
+ enum fotg210_hrtimer_event next_hrtimer_event;
+ unsigned enabled_hrtimer_events;
+ ktime_t hr_timeouts[FOTG210_HRTIMER_NUM_EVENTS];
+ struct hrtimer hrtimer;
+
+ int PSS_poll_count;
+ int ASS_poll_count;
+ int died_poll_count;
+
+ /* glue to PCI and HCD framework */
+ struct fotg210_caps __iomem *caps;
+ struct fotg210_regs __iomem *regs;
+ struct fotg210_dbg_port __iomem *debug;
+
+ __u32 hcs_params; /* cached register copy */
+ spinlock_t lock;
+ enum fotg210_rh_state rh_state;
+
+ /* general schedule support */
+ bool scanning:1;
+ bool need_rescan:1;
+ bool intr_unlinking:1;
+ bool async_unlinking:1;
+ bool shutdown:1;
+ struct fotg210_qh *qh_scan_next;
+
+ /* async schedule support */
+ struct fotg210_qh *async;
+ struct fotg210_qh *dummy; /* For AMD quirk use */
+ struct fotg210_qh *async_unlink;
+ struct fotg210_qh *async_unlink_last;
+ struct fotg210_qh *async_iaa;
+ unsigned async_unlink_cycle;
+ unsigned async_count; /* async activity count */
+
+ /* periodic schedule support */
+#define DEFAULT_I_TDPS 1024 /* some HCs can do less */
+ unsigned periodic_size;
+ __hc32 *periodic; /* hw periodic table */
+ dma_addr_t periodic_dma;
+ struct list_head intr_qh_list;
+ unsigned i_thresh; /* uframes HC might cache */
+
+ union fotg210_shadow *pshadow; /* mirror hw periodic table */
+ struct fotg210_qh *intr_unlink;
+ struct fotg210_qh *intr_unlink_last;
+ unsigned intr_unlink_cycle;
+ unsigned now_frame; /* frame from HC hardware */
+ unsigned next_frame; /* scan periodic, start here */
+ unsigned intr_count; /* intr activity count */
+ unsigned isoc_count; /* isoc activity count */
+ unsigned periodic_count; /* periodic activity count */
+ /* max periodic time per uframe */
+ unsigned uframe_periodic_max;
+
+
+ /* list of itds completed while now_frame was still active */
+ struct list_head cached_itd_list;
+ struct fotg210_itd *last_itd_to_free;
+
+ /* per root hub port */
+ unsigned long reset_done[FOTG210_MAX_ROOT_PORTS];
+
+ /* bit vectors (one bit per port) */
+ unsigned long bus_suspended; /* which ports were
+ already suspended at the start of a bus suspend */
+ unsigned long companion_ports; /* which ports are
+ dedicated to the companion controller */
+ unsigned long owned_ports; /* which ports are
+ owned by the companion during a bus suspend */
+ unsigned long port_c_suspend; /* which ports have
+ the change-suspend feature turned on */
+ unsigned long suspended_ports; /* which ports are
+ suspended */
+ unsigned long resuming_ports; /* which ports have
+ started to resume */
+
+ /* per-HC memory pools (could be per-bus, but ...) */
+ struct dma_pool *qh_pool; /* qh per active urb */
+ struct dma_pool *qtd_pool; /* one or more per qh */
+ struct dma_pool *itd_pool; /* itd per iso urb */
+
+ unsigned random_frame;
+ unsigned long next_statechange;
+ ktime_t last_periodic_enable;
+ u32 command;
+
+ /* SILICON QUIRKS */
+ unsigned need_io_watchdog:1;
+ unsigned fs_i_thresh:1; /* Intel iso scheduling */
+
+ u8 sbrn; /* packed release number */
+
+ /* irq statistics */
+#ifdef FOTG210_STATS
+ struct fotg210_stats stats;
+# define COUNT(x) ((x)++)
+#else
+# define COUNT(x)
+#endif
+
+ /* debug files */
+ struct dentry *debug_dir;
+};
+
+/* convert between an HCD pointer and the corresponding FOTG210_HCD */
+static inline struct fotg210_hcd *hcd_to_fotg210(struct usb_hcd *hcd)
+{
+ return (struct fotg210_hcd *)(hcd->hcd_priv);
+}
+static inline struct usb_hcd *fotg210_to_hcd(struct fotg210_hcd *fotg210)
+{
+ return container_of((void *) fotg210, struct usb_hcd, hcd_priv);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
+
+/* Section 2.2 Host Controller Capability Registers */
+struct fotg210_caps {
+ /* these fields are specified as 8 and 16 bit registers,
+ * but some hosts can't perform 8 or 16 bit PCI accesses.
+ * some hosts treat caplength and hciversion as parts of a 32-bit
+ * register, others treat them as two separate registers, this
+ * affects the memory map for big endian controllers.
+ */
+ u32 hc_capbase;
+#define HC_LENGTH(fotg210, p) (0x00ff&((p) >> /* bits 7:0 / offset 00h */ \
+ (fotg210_big_endian_capbase(fotg210) ? 24 : 0)))
+#define HC_VERSION(fotg210, p) (0xffff&((p) >> /* bits 31:16 / offset 02h */ \
+ (fotg210_big_endian_capbase(fotg210) ? 0 : 16)))
+ u32 hcs_params; /* HCSPARAMS - offset 0x4 */
+#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
+
+ u32 hcc_params; /* HCCPARAMS - offset 0x8 */
+#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
+#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/
+ u8 portroute[8]; /* nibbles for routing - offset 0xC */
+};
+
+
+/* Section 2.3 Host Controller Operational Registers */
+struct fotg210_regs {
+
+ /* USBCMD: offset 0x00 */
+ u32 command;
+
+/* EHCI 1.1 addendum */
+/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
+#define CMD_PARK (1<<11) /* enable "park" on async qh */
+#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
+#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
+#define CMD_ASE (1<<5) /* async schedule enable */
+#define CMD_PSE (1<<4) /* periodic schedule enable */
+/* 3:2 is periodic frame list size */
+#define CMD_RESET (1<<1) /* reset HC not bus */
+#define CMD_RUN (1<<0) /* start/stop HC */
+
+ /* USBSTS: offset 0x04 */
+ u32 status;
+#define STS_ASS (1<<15) /* Async Schedule Status */
+#define STS_PSS (1<<14) /* Periodic Schedule Status */
+#define STS_RECL (1<<13) /* Reclamation */
+#define STS_HALT (1<<12) /* Not running (any reason) */
+/* some bits reserved */
+ /* these STS_* flags are also intr_enable bits (USBINTR) */
+#define STS_IAA (1<<5) /* Interrupted on async advance */
+#define STS_FATAL (1<<4) /* such as some PCI access errors */
+#define STS_FLR (1<<3) /* frame list rolled over */
+#define STS_PCD (1<<2) /* port change detect */
+#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */
+#define STS_INT (1<<0) /* "normal" completion (short, ...) */
+
+ /* USBINTR: offset 0x08 */
+ u32 intr_enable;
+
+ /* FRINDEX: offset 0x0C */
+ u32 frame_index; /* current microframe number */
+ /* CTRLDSSEGMENT: offset 0x10 */
+ u32 segment; /* address bits 63:32 if needed */
+ /* PERIODICLISTBASE: offset 0x14 */
+ u32 frame_list; /* points to periodic list */
+ /* ASYNCLISTADDR: offset 0x18 */
+ u32 async_next; /* address of next async queue head */
+
+ u32 reserved1;
+ /* PORTSC: offset 0x20 */
+ u32 port_status;
+/* 31:23 reserved */
+#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
+#define PORT_RESET (1<<8) /* reset port */
+#define PORT_SUSPEND (1<<7) /* suspend port */
+#define PORT_RESUME (1<<6) /* resume it */
+#define PORT_PEC (1<<3) /* port enable change */
+#define PORT_PE (1<<2) /* port enable */
+#define PORT_CSC (1<<1) /* connect status change */
+#define PORT_CONNECT (1<<0) /* device connected */
+#define PORT_RWC_BITS (PORT_CSC | PORT_PEC)
+ u32 reserved2[19];
+
+ /* OTGCSR: offet 0x70 */
+ u32 otgcsr;
+#define OTGCSR_HOST_SPD_TYP (3 << 22)
+#define OTGCSR_A_BUS_DROP (1 << 5)
+#define OTGCSR_A_BUS_REQ (1 << 4)
+
+ /* OTGISR: offset 0x74 */
+ u32 otgisr;
+#define OTGISR_OVC (1 << 10)
+
+ u32 reserved3[15];
+
+ /* GMIR: offset 0xB4 */
+ u32 gmir;
+#define GMIR_INT_POLARITY (1 << 3) /*Active High*/
+#define GMIR_MHC_INT (1 << 2)
+#define GMIR_MOTG_INT (1 << 1)
+#define GMIR_MDEV_INT (1 << 0)
+};
+
+/* Appendix C, Debug port ... intended for use with special "debug devices"
+ * that can help if there's no serial console. (nonstandard enumeration.)
+ */
+struct fotg210_dbg_port {
+ u32 control;
+#define DBGP_OWNER (1<<30)
+#define DBGP_ENABLED (1<<28)
+#define DBGP_DONE (1<<16)
+#define DBGP_INUSE (1<<10)
+#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
+# define DBGP_ERR_BAD 1
+# define DBGP_ERR_SIGNAL 2
+#define DBGP_ERROR (1<<6)
+#define DBGP_GO (1<<5)
+#define DBGP_OUT (1<<4)
+#define DBGP_LEN(x) (((x)>>0)&0x0f)
+ u32 pids;
+#define DBGP_PID_GET(x) (((x)>>16)&0xff)
+#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
+ u32 data03;
+ u32 data47;
+ u32 address;
+#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
+};
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+#include <linux/init.h>
+extern int __init early_dbgp_init(char *s);
+extern struct console early_dbgp_console;
+#endif /* CONFIG_EARLY_PRINTK_DBGP */
+
+struct usb_hcd;
+
+static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd)
+{
+ return 1; /* Shouldn't this be 0? */
+}
+
+static inline int xen_dbgp_external_startup(struct usb_hcd *hcd)
+{
+ return -1;
+}
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+/* Call backs from fotg210 host driver to fotg210 debug driver */
+extern int dbgp_external_startup(struct usb_hcd *);
+extern int dbgp_reset_prep(struct usb_hcd *hcd);
+#else
+static inline int dbgp_reset_prep(struct usb_hcd *hcd)
+{
+ return xen_dbgp_reset_prep(hcd);
+}
+static inline int dbgp_external_startup(struct usb_hcd *hcd)
+{
+ return xen_dbgp_external_startup(hcd);
+}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#define QTD_NEXT(fotg210, dma) cpu_to_hc32(fotg210, (u32)dma)
+
+/*
+ * EHCI Specification 0.95 Section 3.5
+ * QTD: describe data transfer components (buffer, direction, ...)
+ * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
+ *
+ * These are associated only with "QH" (Queue Head) structures,
+ * used with control, bulk, and interrupt transfers.
+ */
+struct fotg210_qtd {
+ /* first part defined by EHCI spec */
+ __hc32 hw_next; /* see EHCI 3.5.1 */
+ __hc32 hw_alt_next; /* see EHCI 3.5.2 */
+ __hc32 hw_token; /* see EHCI 3.5.3 */
+#define QTD_TOGGLE (1 << 31) /* data toggle */
+#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff)
+#define QTD_IOC (1 << 15) /* interrupt on complete */
+#define QTD_CERR(tok) (((tok)>>10) & 0x3)
+#define QTD_PID(tok) (((tok)>>8) & 0x3)
+#define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */
+#define QTD_STS_HALT (1 << 6) /* halted on error */
+#define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */
+#define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */
+#define QTD_STS_XACT (1 << 3) /* device gave illegal response */
+#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */
+#define QTD_STS_STS (1 << 1) /* split transaction state */
+#define QTD_STS_PING (1 << 0) /* issue PING? */
+
+#define ACTIVE_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_ACTIVE)
+#define HALT_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_HALT)
+#define STATUS_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_STS)
+
+ __hc32 hw_buf[5]; /* see EHCI 3.5.4 */
+ __hc32 hw_buf_hi[5]; /* Appendix B */
+
+ /* the rest is HCD-private */
+ dma_addr_t qtd_dma; /* qtd address */
+ struct list_head qtd_list; /* sw qtd list */
+ struct urb *urb; /* qtd's urb */
+ size_t length; /* length of buffer */
+} __aligned(32);
+
+/* mask NakCnt+T in qh->hw_alt_next */
+#define QTD_MASK(fotg210) cpu_to_hc32(fotg210, ~0x1f)
+
+#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1)
+
+/*-------------------------------------------------------------------------*/
+
+/* type tag from {qh,itd,fstn}->hw_next */
+#define Q_NEXT_TYPE(fotg210, dma) ((dma) & cpu_to_hc32(fotg210, 3 << 1))
+
+/*
+ * Now the following defines are not converted using the
+ * cpu_to_le32() macro anymore, since we have to support
+ * "dynamic" switching between be and le support, so that the driver
+ * can be used on one system with SoC EHCI controller using big-endian
+ * descriptors as well as a normal little-endian PCI EHCI controller.
+ */
+/* values for that type tag */
+#define Q_TYPE_ITD (0 << 1)
+#define Q_TYPE_QH (1 << 1)
+#define Q_TYPE_SITD (2 << 1)
+#define Q_TYPE_FSTN (3 << 1)
+
+/* next async queue entry, or pointer to interrupt/periodic QH */
+#define QH_NEXT(fotg210, dma) \
+ (cpu_to_hc32(fotg210, (((u32)dma)&~0x01f)|Q_TYPE_QH))
+
+/* for periodic/async schedules and qtd lists, mark end of list */
+#define FOTG210_LIST_END(fotg210) \
+ cpu_to_hc32(fotg210, 1) /* "null pointer" to hw */
+
+/*
+ * Entries in periodic shadow table are pointers to one of four kinds
+ * of data structure. That's dictated by the hardware; a type tag is
+ * encoded in the low bits of the hardware's periodic schedule. Use
+ * Q_NEXT_TYPE to get the tag.
+ *
+ * For entries in the async schedule, the type tag always says "qh".
+ */
+union fotg210_shadow {
+ struct fotg210_qh *qh; /* Q_TYPE_QH */
+ struct fotg210_itd *itd; /* Q_TYPE_ITD */
+ struct fotg210_fstn *fstn; /* Q_TYPE_FSTN */
+ __hc32 *hw_next; /* (all types) */
+ void *ptr;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.95 Section 3.6
+ * QH: describes control/bulk/interrupt endpoints
+ * See Fig 3-7 "Queue Head Structure Layout".
+ *
+ * These appear in both the async and (for interrupt) periodic schedules.
+ */
+
+/* first part defined by EHCI spec */
+struct fotg210_qh_hw {
+ __hc32 hw_next; /* see EHCI 3.6.1 */
+ __hc32 hw_info1; /* see EHCI 3.6.2 */
+#define QH_CONTROL_EP (1 << 27) /* FS/LS control endpoint */
+#define QH_HEAD (1 << 15) /* Head of async reclamation list */
+#define QH_TOGGLE_CTL (1 << 14) /* Data toggle control */
+#define QH_HIGH_SPEED (2 << 12) /* Endpoint speed */
+#define QH_LOW_SPEED (1 << 12)
+#define QH_FULL_SPEED (0 << 12)
+#define QH_INACTIVATE (1 << 7) /* Inactivate on next transaction */
+ __hc32 hw_info2; /* see EHCI 3.6.2 */
+#define QH_SMASK 0x000000ff
+#define QH_CMASK 0x0000ff00
+#define QH_HUBADDR 0x007f0000
+#define QH_HUBPORT 0x3f800000
+#define QH_MULT 0xc0000000
+ __hc32 hw_current; /* qtd list - see EHCI 3.6.4 */
+
+ /* qtd overlay (hardware parts of a struct fotg210_qtd) */
+ __hc32 hw_qtd_next;
+ __hc32 hw_alt_next;
+ __hc32 hw_token;
+ __hc32 hw_buf[5];
+ __hc32 hw_buf_hi[5];
+} __aligned(32);
+
+struct fotg210_qh {
+ struct fotg210_qh_hw *hw; /* Must come first */
+ /* the rest is HCD-private */
+ dma_addr_t qh_dma; /* address of qh */
+ union fotg210_shadow qh_next; /* ptr to qh; or periodic */
+ struct list_head qtd_list; /* sw qtd list */
+ struct list_head intr_node; /* list of intr QHs */
+ struct fotg210_qtd *dummy;
+ struct fotg210_qh *unlink_next; /* next on unlink list */
+
+ unsigned unlink_cycle;
+
+ u8 needs_rescan; /* Dequeue during giveback */
+ u8 qh_state;
+#define QH_STATE_LINKED 1 /* HC sees this */
+#define QH_STATE_UNLINK 2 /* HC may still see this */
+#define QH_STATE_IDLE 3 /* HC doesn't see this */
+#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on unlink q */
+#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
+
+ u8 xacterrs; /* XactErr retry counter */
+#define QH_XACTERR_MAX 32 /* XactErr retry limit */
+
+ /* periodic schedule info */
+ u8 usecs; /* intr bandwidth */
+ u8 gap_uf; /* uframes split/csplit gap */
+ u8 c_usecs; /* ... split completion bw */
+ u16 tt_usecs; /* tt downstream bandwidth */
+ unsigned short period; /* polling interval */
+ unsigned short start; /* where polling starts */
+#define NO_FRAME ((unsigned short)~0) /* pick new start */
+
+ struct usb_device *dev; /* access to TT */
+ unsigned is_out:1; /* bulk or intr OUT */
+ unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* description of one iso transaction (up to 3 KB data if highspeed) */
+struct fotg210_iso_packet {
+ /* These will be copied to iTD when scheduling */
+ u64 bufp; /* itd->hw_bufp{,_hi}[pg] |= */
+ __hc32 transaction; /* itd->hw_transaction[i] |= */
+ u8 cross; /* buf crosses pages */
+ /* for full speed OUT splits */
+ u32 buf1;
+};
+
+/* temporary schedule data for packets from iso urbs (both speeds)
+ * each packet is one logical usb transaction to the device (not TT),
+ * beginning at stream->next_uframe
+ */
+struct fotg210_iso_sched {
+ struct list_head td_list;
+ unsigned span;
+ struct fotg210_iso_packet packet[0];
+};
+
+/*
+ * fotg210_iso_stream - groups all (s)itds for this endpoint.
+ * acts like a qh would, if EHCI had them for ISO.
+ */
+struct fotg210_iso_stream {
+ /* first field matches fotg210_hq, but is NULL */
+ struct fotg210_qh_hw *hw;
+
+ u8 bEndpointAddress;
+ u8 highspeed;
+ struct list_head td_list; /* queued itds */
+ struct list_head free_list; /* list of unused itds */
+ struct usb_device *udev;
+ struct usb_host_endpoint *ep;
+
+ /* output of (re)scheduling */
+ int next_uframe;
+ __hc32 splits;
+
+ /* the rest is derived from the endpoint descriptor,
+ * trusting urb->interval == f(epdesc->bInterval) and
+ * including the extra info for hw_bufp[0..2]
+ */
+ u8 usecs, c_usecs;
+ u16 interval;
+ u16 tt_usecs;
+ u16 maxp;
+ u16 raw_mask;
+ unsigned bandwidth;
+
+ /* This is used to initialize iTD's hw_bufp fields */
+ __hc32 buf0;
+ __hc32 buf1;
+ __hc32 buf2;
+
+ /* this is used to initialize sITD's tt info */
+ __hc32 address;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.95 Section 3.3
+ * Fig 3-4 "Isochronous Transaction Descriptor (iTD)"
+ *
+ * Schedule records for high speed iso xfers
+ */
+struct fotg210_itd {
+ /* first part defined by EHCI spec */
+ __hc32 hw_next; /* see EHCI 3.3.1 */
+ __hc32 hw_transaction[8]; /* see EHCI 3.3.2 */
+#define FOTG210_ISOC_ACTIVE (1<<31) /* activate transfer this slot */
+#define FOTG210_ISOC_BUF_ERR (1<<30) /* Data buffer error */
+#define FOTG210_ISOC_BABBLE (1<<29) /* babble detected */
+#define FOTG210_ISOC_XACTERR (1<<28) /* XactErr - transaction error */
+#define FOTG210_ITD_LENGTH(tok) (((tok)>>16) & 0x0fff)
+#define FOTG210_ITD_IOC (1 << 15) /* interrupt on complete */
+
+#define ITD_ACTIVE(fotg210) cpu_to_hc32(fotg210, FOTG210_ISOC_ACTIVE)
+
+ __hc32 hw_bufp[7]; /* see EHCI 3.3.3 */
+ __hc32 hw_bufp_hi[7]; /* Appendix B */
+
+ /* the rest is HCD-private */
+ dma_addr_t itd_dma; /* for this itd */
+ union fotg210_shadow itd_next; /* ptr to periodic q entry */
+
+ struct urb *urb;
+ struct fotg210_iso_stream *stream; /* endpoint's queue */
+ struct list_head itd_list; /* list of stream's itds */
+
+ /* any/all hw_transactions here may be used by that urb */
+ unsigned frame; /* where scheduled */
+ unsigned pg;
+ unsigned index[8]; /* in urb->iso_frame_desc */
+} __aligned(32);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.96 Section 3.7
+ * Periodic Frame Span Traversal Node (FSTN)
+ *
+ * Manages split interrupt transactions (using TT) that span frame boundaries
+ * into uframes 0/1; see 4.12.2.2. In those uframes, a "save place" FSTN
+ * makes the HC jump (back) to a QH to scan for fs/ls QH completions until
+ * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work.
+ */
+struct fotg210_fstn {
+ __hc32 hw_next; /* any periodic q entry */
+ __hc32 hw_prev; /* qh or FOTG210_LIST_END */
+
+ /* the rest is HCD-private */
+ dma_addr_t fstn_dma;
+ union fotg210_shadow fstn_next; /* ptr to periodic q entry */
+} __aligned(32);
+
+/*-------------------------------------------------------------------------*/
+
+/* Prepare the PORTSC wakeup flags during controller suspend/resume */
+
+#define fotg210_prepare_ports_for_controller_suspend(fotg210, do_wakeup) \
+ fotg210_adjust_port_wakeup_flags(fotg210, true, do_wakeup);
+
+#define fotg210_prepare_ports_for_controller_resume(fotg210) \
+ fotg210_adjust_port_wakeup_flags(fotg210, false, false);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Some EHCI controllers have a Transaction Translator built into the
+ * root hub. This is a non-standard feature. Each controller will need
+ * to add code to the following inline functions, and call them as
+ * needed (mostly in root hub code).
+ */
+
+static inline unsigned int
+fotg210_get_speed(struct fotg210_hcd *fotg210, unsigned int portsc)
+{
+ return (readl(&fotg210->regs->otgcsr)
+ & OTGCSR_HOST_SPD_TYP) >> 22;
+}
+
+/* Returns the speed of a device attached to a port on the root hub. */
+static inline unsigned int
+fotg210_port_speed(struct fotg210_hcd *fotg210, unsigned int portsc)
+{
+ switch (fotg210_get_speed(fotg210, portsc)) {
+ case 0:
+ return 0;
+ case 1:
+ return USB_PORT_STAT_LOW_SPEED;
+ case 2:
+ default:
+ return USB_PORT_STAT_HIGH_SPEED;
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define fotg210_has_fsl_portno_bug(e) (0)
+
+/*
+ * While most USB host controllers implement their registers in
+ * little-endian format, a minority (celleb companion chip) implement
+ * them in big endian format.
+ *
+ * This attempts to support either format at compile time without a
+ * runtime penalty, or both formats with the additional overhead
+ * of checking a flag bit.
+ *
+ */
+
+#define fotg210_big_endian_mmio(e) 0
+#define fotg210_big_endian_capbase(e) 0
+
+static inline unsigned int fotg210_readl(const struct fotg210_hcd *fotg210,
+ __u32 __iomem *regs)
+{
+ return readl(regs);
+}
+
+static inline void fotg210_writel(const struct fotg210_hcd *fotg210,
+ const unsigned int val, __u32 __iomem *regs)
+{
+ writel(val, regs);
+}
+
+/* cpu to fotg210 */
+static inline __hc32 cpu_to_hc32(const struct fotg210_hcd *fotg210, const u32 x)
+{
+ return cpu_to_le32(x);
+}
+
+/* fotg210 to cpu */
+static inline u32 hc32_to_cpu(const struct fotg210_hcd *fotg210, const __hc32 x)
+{
+ return le32_to_cpu(x);
+}
+
+static inline u32 hc32_to_cpup(const struct fotg210_hcd *fotg210,
+ const __hc32 *x)
+{
+ return le32_to_cpup(x);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline unsigned fotg210_read_frame_index(struct fotg210_hcd *fotg210)
+{
+ return fotg210_readl(fotg210, &fotg210->regs->frame_index);
+}
+
+#define fotg210_itdlen(urb, desc, t) ({ \
+ usb_pipein((urb)->pipe) ? \
+ (desc)->length - FOTG210_ITD_LENGTH(t) : \
+ FOTG210_ITD_LENGTH(t); \
+})
+/*-------------------------------------------------------------------------*/
+
+#endif /* __LINUX_FOTG210_H */
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 574b99ea070..9162d1b6c0a 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
+#include <linux/module.h>
struct fsl_usb2_dev_data {
char *dr_mode; /* controller mode */
@@ -23,7 +24,7 @@ struct fsl_usb2_dev_data {
enum fsl_usb2_operating_modes op_mode; /* operating mode */
};
-struct fsl_usb2_dev_data dr_mode_data[] __devinitdata = {
+static struct fsl_usb2_dev_data dr_mode_data[] = {
{
.dr_mode = "host",
.drivers = { "fsl-ehci", NULL, NULL, },
@@ -41,7 +42,7 @@ struct fsl_usb2_dev_data dr_mode_data[] __devinitdata = {
},
};
-struct fsl_usb2_dev_data * __devinit get_dr_mode_data(struct device_node *np)
+static struct fsl_usb2_dev_data *get_dr_mode_data(struct device_node *np)
{
const unsigned char *prop;
int i;
@@ -58,7 +59,7 @@ struct fsl_usb2_dev_data * __devinit get_dr_mode_data(struct device_node *np)
return &dr_mode_data[0]; /* mode not specified, use host */
}
-static enum fsl_usb2_phy_modes __devinit determine_usb_phy(const char *phy_type)
+static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type)
{
if (!phy_type)
return FSL_USB2_PHY_NONE;
@@ -74,7 +75,7 @@ static enum fsl_usb2_phy_modes __devinit determine_usb_phy(const char *phy_type)
return FSL_USB2_PHY_NONE;
}
-struct platform_device * __devinit fsl_usb2_device_register(
+static struct platform_device *fsl_usb2_device_register(
struct platform_device *ofdev,
struct fsl_usb2_platform_data *pdata,
const char *name, int id)
@@ -93,7 +94,6 @@ struct platform_device * __devinit fsl_usb2_device_register(
pdev->dev.parent = &ofdev->dev;
pdev->dev.coherent_dma_mask = ofdev->dev.coherent_dma_mask;
- pdev->dev.dma_mask = &pdev->archdata.dma_mask;
*pdev->dev.dma_mask = *ofdev->dev.dma_mask;
retval = platform_device_add_data(pdev, pdata, sizeof(*pdata));
@@ -119,7 +119,45 @@ error:
static const struct of_device_id fsl_usb2_mph_dr_of_match[];
-static int __devinit fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev)
+static int usb_get_ver_info(struct device_node *np)
+{
+ int ver = -1;
+
+ /*
+ * returns 1 for usb controller version 1.6
+ * returns 2 for usb controller version 2.2
+ * returns 0 otherwise
+ */
+ if (of_device_is_compatible(np, "fsl-usb2-dr")) {
+ if (of_device_is_compatible(np, "fsl-usb2-dr-v1.6"))
+ ver = FSL_USB_VER_1_6;
+ else if (of_device_is_compatible(np, "fsl-usb2-dr-v2.2"))
+ ver = FSL_USB_VER_2_2;
+ else if (of_device_is_compatible(np, "fsl-usb2-dr-v2.4"))
+ ver = FSL_USB_VER_2_4;
+ else /* for previous controller versions */
+ ver = FSL_USB_VER_OLD;
+
+ if (ver > -1)
+ return ver;
+ }
+
+ if (of_device_is_compatible(np, "fsl,mpc5121-usb2-dr"))
+ return FSL_USB_VER_OLD;
+
+ if (of_device_is_compatible(np, "fsl-usb2-mph")) {
+ if (of_device_is_compatible(np, "fsl-usb2-mph-v1.6"))
+ ver = FSL_USB_VER_1_6;
+ else if (of_device_is_compatible(np, "fsl-usb2-mph-v2.2"))
+ ver = FSL_USB_VER_2_2;
+ else /* for previous controller versions */
+ ver = FSL_USB_VER_OLD;
+ }
+
+ return ver;
+}
+
+static int fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct platform_device *usb_dev;
@@ -166,6 +204,14 @@ static int __devinit fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev)
prop = of_get_property(np, "phy_type", NULL);
pdata->phy_mode = determine_usb_phy(prop);
+ pdata->controller_ver = usb_get_ver_info(np);
+
+ if (pdata->have_sysif_regs) {
+ if (pdata->controller_ver < 0) {
+ dev_warn(&ofdev->dev, "Could not get controller version\n");
+ return -ENODEV;
+ }
+ }
for (i = 0; i < ARRAY_SIZE(dev_data->drivers); i++) {
if (!dev_data->drivers[i])
@@ -181,13 +227,13 @@ static int __devinit fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev)
return 0;
}
-static int __devexit __unregister_subdev(struct device *dev, void *d)
+static int __unregister_subdev(struct device *dev, void *d)
{
platform_device_unregister(to_platform_device(dev));
return 0;
}
-static int __devexit fsl_usb2_mph_dr_of_remove(struct platform_device *ofdev)
+static int fsl_usb2_mph_dr_of_remove(struct platform_device *ofdev)
{
device_for_each_child(&ofdev->dev, NULL, __unregister_subdev);
return 0;
@@ -212,27 +258,20 @@ static int __devexit fsl_usb2_mph_dr_of_remove(struct platform_device *ofdev)
int fsl_usb2_mpc5121_init(struct platform_device *pdev)
{
- struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct clk *clk;
- char clk_name[10];
- int base, clk_num;
-
- base = pdev->resource->start & 0xf000;
- if (base == 0x3000)
- clk_num = 1;
- else if (base == 0x4000)
- clk_num = 2;
- else
- return -ENODEV;
+ int err;
- snprintf(clk_name, sizeof(clk_name), "usb%d_clk", clk_num);
- clk = clk_get(&pdev->dev, clk_name);
+ clk = devm_clk_get(pdev->dev.parent, "ipg");
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clk\n");
return PTR_ERR(clk);
}
-
- clk_enable(clk);
+ err = clk_prepare_enable(clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable clk\n");
+ return err;
+ }
pdata->clk = clk;
if (pdata->phy_mode == FSL_USB2_PHY_UTMI_WIDE) {
@@ -252,29 +291,32 @@ int fsl_usb2_mpc5121_init(struct platform_device *pdev)
static void fsl_usb2_mpc5121_exit(struct platform_device *pdev)
{
- struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
pdata->regs = NULL;
- if (pdata->clk) {
- clk_disable(pdata->clk);
- clk_put(pdata->clk);
- }
+ if (pdata->clk)
+ clk_disable_unprepare(pdata->clk);
}
-struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
+static struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
.big_endian_desc = 1,
.big_endian_mmio = 1,
.es = 1,
+ .have_sysif_regs = 0,
.le_setup_buf = 1,
.init = fsl_usb2_mpc5121_init,
.exit = fsl_usb2_mpc5121_exit,
};
#endif /* CONFIG_PPC_MPC512x */
+static struct fsl_usb2_platform_data fsl_usb2_mpc8xxx_pd = {
+ .have_sysif_regs = 1,
+};
+
static const struct of_device_id fsl_usb2_mph_dr_of_match[] = {
- { .compatible = "fsl-usb2-mph", },
- { .compatible = "fsl-usb2-dr", },
+ { .compatible = "fsl-usb2-mph", .data = &fsl_usb2_mpc8xxx_pd, },
+ { .compatible = "fsl-usb2-dr", .data = &fsl_usb2_mpc8xxx_pd, },
#ifdef CONFIG_PPC_MPC512x
{ .compatible = "fsl,mpc5121-usb2-dr", .data = &fsl_usb2_mpc5121_pd, },
#endif
@@ -288,20 +330,10 @@ static struct platform_driver fsl_usb2_mph_dr_driver = {
.of_match_table = fsl_usb2_mph_dr_of_match,
},
.probe = fsl_usb2_mph_dr_of_probe,
- .remove = __devexit_p(fsl_usb2_mph_dr_of_remove),
+ .remove = fsl_usb2_mph_dr_of_remove,
};
-static int __init fsl_usb2_mph_dr_init(void)
-{
- return platform_driver_register(&fsl_usb2_mph_dr_driver);
-}
-module_init(fsl_usb2_mph_dr_init);
-
-static void __exit fsl_usb2_mph_dr_exit(void)
-{
- platform_driver_unregister(&fsl_usb2_mph_dr_driver);
-}
-module_exit(fsl_usb2_mph_dr_exit);
+module_platform_driver(fsl_usb2_mph_dr_driver);
MODULE_DESCRIPTION("FSL MPH DR OF devices driver");
MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
new file mode 100644
index 00000000000..ba9499060f6
--- /dev/null
+++ b/drivers/usb/host/fusbh200-hcd.c
@@ -0,0 +1,5894 @@
+/*
+ * Faraday FUSBH200 EHCI-like driver
+ *
+ * Copyright (c) 2013 Faraday Technology Corporation
+ *
+ * Author: Yuan-Hsin Chen <yhchen@faraday-tech.com>
+ * Feng-Hsin Chiang <john453@faraday-tech.com>
+ * Po-Yu Chuang <ratbert.chuang@gmail.com>
+ *
+ * Most of code borrowed from the Linux-3.7 EHCI driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/hrtimer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+
+/*-------------------------------------------------------------------------*/
+#define DRIVER_AUTHOR "Yuan-Hsin Chen"
+#define DRIVER_DESC "FUSBH200 Host Controller (EHCI) Driver"
+
+static const char hcd_name [] = "fusbh200_hcd";
+
+#undef FUSBH200_URB_TRACE
+
+/* magic numbers that can affect system performance */
+#define FUSBH200_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
+#define FUSBH200_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
+#define FUSBH200_TUNE_RL_TT 0
+#define FUSBH200_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
+#define FUSBH200_TUNE_MULT_TT 1
+/*
+ * Some drivers think it's safe to schedule isochronous transfers more than
+ * 256 ms into the future (partly as a result of an old bug in the scheduling
+ * code). In an attempt to avoid trouble, we will use a minimum scheduling
+ * length of 512 frames instead of 256.
+ */
+#define FUSBH200_TUNE_FLS 1 /* (medium) 512-frame schedule */
+
+/* Initial IRQ latency: faster than hw default */
+static int log2_irq_thresh = 0; // 0 to 6
+module_param (log2_irq_thresh, int, S_IRUGO);
+MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
+
+/* initial park setting: slower than hw default */
+static unsigned park = 0;
+module_param (park, uint, S_IRUGO);
+MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
+
+/* for link power management(LPM) feature */
+static unsigned int hird;
+module_param(hird, int, S_IRUGO);
+MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us");
+
+#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
+
+#include "fusbh200.h"
+
+/*-------------------------------------------------------------------------*/
+
+#define fusbh200_dbg(fusbh200, fmt, args...) \
+ dev_dbg (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
+#define fusbh200_err(fusbh200, fmt, args...) \
+ dev_err (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
+#define fusbh200_info(fusbh200, fmt, args...) \
+ dev_info (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
+#define fusbh200_warn(fusbh200, fmt, args...) \
+ dev_warn (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
+
+/* check the values in the HCSPARAMS register
+ * (host controller _Structural_ parameters)
+ * see EHCI spec, Table 2-4 for each value
+ */
+static void dbg_hcs_params (struct fusbh200_hcd *fusbh200, char *label)
+{
+ u32 params = fusbh200_readl(fusbh200, &fusbh200->caps->hcs_params);
+
+ fusbh200_dbg (fusbh200,
+ "%s hcs_params 0x%x ports=%d\n",
+ label, params,
+ HCS_N_PORTS (params)
+ );
+}
+
+/* check the values in the HCCPARAMS register
+ * (host controller _Capability_ parameters)
+ * see EHCI Spec, Table 2-5 for each value
+ * */
+static void dbg_hcc_params (struct fusbh200_hcd *fusbh200, char *label)
+{
+ u32 params = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
+
+ fusbh200_dbg (fusbh200,
+ "%s hcc_params %04x uframes %s%s\n",
+ label,
+ params,
+ HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
+ HCC_CANPARK(params) ? " park" : "");
+}
+
+static void __maybe_unused
+dbg_qtd (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd)
+{
+ fusbh200_dbg(fusbh200, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
+ hc32_to_cpup(fusbh200, &qtd->hw_next),
+ hc32_to_cpup(fusbh200, &qtd->hw_alt_next),
+ hc32_to_cpup(fusbh200, &qtd->hw_token),
+ hc32_to_cpup(fusbh200, &qtd->hw_buf [0]));
+ if (qtd->hw_buf [1])
+ fusbh200_dbg(fusbh200, " p1=%08x p2=%08x p3=%08x p4=%08x\n",
+ hc32_to_cpup(fusbh200, &qtd->hw_buf[1]),
+ hc32_to_cpup(fusbh200, &qtd->hw_buf[2]),
+ hc32_to_cpup(fusbh200, &qtd->hw_buf[3]),
+ hc32_to_cpup(fusbh200, &qtd->hw_buf[4]));
+}
+
+static void __maybe_unused
+dbg_qh (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ struct fusbh200_qh_hw *hw = qh->hw;
+
+ fusbh200_dbg (fusbh200, "%s qh %p n%08x info %x %x qtd %x\n", label,
+ qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current);
+ dbg_qtd("overlay", fusbh200, (struct fusbh200_qtd *) &hw->hw_qtd_next);
+}
+
+static void __maybe_unused
+dbg_itd (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_itd *itd)
+{
+ fusbh200_dbg (fusbh200, "%s [%d] itd %p, next %08x, urb %p\n",
+ label, itd->frame, itd, hc32_to_cpu(fusbh200, itd->hw_next),
+ itd->urb);
+ fusbh200_dbg (fusbh200,
+ " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ hc32_to_cpu(fusbh200, itd->hw_transaction[0]),
+ hc32_to_cpu(fusbh200, itd->hw_transaction[1]),
+ hc32_to_cpu(fusbh200, itd->hw_transaction[2]),
+ hc32_to_cpu(fusbh200, itd->hw_transaction[3]),
+ hc32_to_cpu(fusbh200, itd->hw_transaction[4]),
+ hc32_to_cpu(fusbh200, itd->hw_transaction[5]),
+ hc32_to_cpu(fusbh200, itd->hw_transaction[6]),
+ hc32_to_cpu(fusbh200, itd->hw_transaction[7]));
+ fusbh200_dbg (fusbh200,
+ " buf: %08x %08x %08x %08x %08x %08x %08x\n",
+ hc32_to_cpu(fusbh200, itd->hw_bufp[0]),
+ hc32_to_cpu(fusbh200, itd->hw_bufp[1]),
+ hc32_to_cpu(fusbh200, itd->hw_bufp[2]),
+ hc32_to_cpu(fusbh200, itd->hw_bufp[3]),
+ hc32_to_cpu(fusbh200, itd->hw_bufp[4]),
+ hc32_to_cpu(fusbh200, itd->hw_bufp[5]),
+ hc32_to_cpu(fusbh200, itd->hw_bufp[6]));
+ fusbh200_dbg (fusbh200, " index: %d %d %d %d %d %d %d %d\n",
+ itd->index[0], itd->index[1], itd->index[2],
+ itd->index[3], itd->index[4], itd->index[5],
+ itd->index[6], itd->index[7]);
+}
+
+static int __maybe_unused
+dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
+{
+ return scnprintf (buf, len,
+ "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
+ label, label [0] ? " " : "", status,
+ (status & STS_ASS) ? " Async" : "",
+ (status & STS_PSS) ? " Periodic" : "",
+ (status & STS_RECL) ? " Recl" : "",
+ (status & STS_HALT) ? " Halt" : "",
+ (status & STS_IAA) ? " IAA" : "",
+ (status & STS_FATAL) ? " FATAL" : "",
+ (status & STS_FLR) ? " FLR" : "",
+ (status & STS_PCD) ? " PCD" : "",
+ (status & STS_ERR) ? " ERR" : "",
+ (status & STS_INT) ? " INT" : ""
+ );
+}
+
+static int __maybe_unused
+dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
+{
+ return scnprintf (buf, len,
+ "%s%sintrenable %02x%s%s%s%s%s%s",
+ label, label [0] ? " " : "", enable,
+ (enable & STS_IAA) ? " IAA" : "",
+ (enable & STS_FATAL) ? " FATAL" : "",
+ (enable & STS_FLR) ? " FLR" : "",
+ (enable & STS_PCD) ? " PCD" : "",
+ (enable & STS_ERR) ? " ERR" : "",
+ (enable & STS_INT) ? " INT" : ""
+ );
+}
+
+static const char *const fls_strings [] =
+ { "1024", "512", "256", "??" };
+
+static int
+dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
+{
+ return scnprintf (buf, len,
+ "%s%scommand %07x %s=%d ithresh=%d%s%s%s "
+ "period=%s%s %s",
+ label, label [0] ? " " : "", command,
+ (command & CMD_PARK) ? " park" : "(park)",
+ CMD_PARK_CNT (command),
+ (command >> 16) & 0x3f,
+ (command & CMD_IAAD) ? " IAAD" : "",
+ (command & CMD_ASE) ? " Async" : "",
+ (command & CMD_PSE) ? " Periodic" : "",
+ fls_strings [(command >> 2) & 0x3],
+ (command & CMD_RESET) ? " Reset" : "",
+ (command & CMD_RUN) ? "RUN" : "HALT"
+ );
+}
+
+static int
+dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
+{
+ char *sig;
+
+ /* signaling state */
+ switch (status & (3 << 10)) {
+ case 0 << 10: sig = "se0"; break;
+ case 1 << 10: sig = "k"; break; /* low speed */
+ case 2 << 10: sig = "j"; break;
+ default: sig = "?"; break;
+ }
+
+ return scnprintf (buf, len,
+ "%s%sport:%d status %06x %d "
+ "sig=%s%s%s%s%s%s%s%s",
+ label, label [0] ? " " : "", port, status,
+ status>>25,/*device address */
+ sig,
+ (status & PORT_RESET) ? " RESET" : "",
+ (status & PORT_SUSPEND) ? " SUSPEND" : "",
+ (status & PORT_RESUME) ? " RESUME" : "",
+ (status & PORT_PEC) ? " PEC" : "",
+ (status & PORT_PE) ? " PE" : "",
+ (status & PORT_CSC) ? " CSC" : "",
+ (status & PORT_CONNECT) ? " CONNECT" : "");
+}
+
+/* functions have the "wrong" filename when they're output... */
+#define dbg_status(fusbh200, label, status) { \
+ char _buf [80]; \
+ dbg_status_buf (_buf, sizeof _buf, label, status); \
+ fusbh200_dbg (fusbh200, "%s\n", _buf); \
+}
+
+#define dbg_cmd(fusbh200, label, command) { \
+ char _buf [80]; \
+ dbg_command_buf (_buf, sizeof _buf, label, command); \
+ fusbh200_dbg (fusbh200, "%s\n", _buf); \
+}
+
+#define dbg_port(fusbh200, label, port, status) { \
+ char _buf [80]; \
+ dbg_port_buf (_buf, sizeof _buf, label, port, status); \
+ fusbh200_dbg (fusbh200, "%s\n", _buf); \
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* troubleshooting help: expose state in debugfs */
+
+static int debug_async_open(struct inode *, struct file *);
+static int debug_periodic_open(struct inode *, struct file *);
+static int debug_registers_open(struct inode *, struct file *);
+static int debug_async_open(struct inode *, struct file *);
+
+static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
+static int debug_close(struct inode *, struct file *);
+
+static const struct file_operations debug_async_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_async_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
+static const struct file_operations debug_periodic_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_periodic_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
+static const struct file_operations debug_registers_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_registers_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
+
+static struct dentry *fusbh200_debug_root;
+
+struct debug_buffer {
+ ssize_t (*fill_func)(struct debug_buffer *); /* fill method */
+ struct usb_bus *bus;
+ struct mutex mutex; /* protect filling of buffer */
+ size_t count; /* number of characters filled into buffer */
+ char *output_buf;
+ size_t alloc_size;
+};
+
+#define speed_char(info1) ({ char tmp; \
+ switch (info1 & (3 << 12)) { \
+ case QH_FULL_SPEED: tmp = 'f'; break; \
+ case QH_LOW_SPEED: tmp = 'l'; break; \
+ case QH_HIGH_SPEED: tmp = 'h'; break; \
+ default: tmp = '?'; break; \
+ } tmp; })
+
+static inline char token_mark(struct fusbh200_hcd *fusbh200, __hc32 token)
+{
+ __u32 v = hc32_to_cpu(fusbh200, token);
+
+ if (v & QTD_STS_ACTIVE)
+ return '*';
+ if (v & QTD_STS_HALT)
+ return '-';
+ if (!IS_SHORT_READ (v))
+ return ' ';
+ /* tries to advance through hw_alt_next */
+ return '/';
+}
+
+static void qh_lines (
+ struct fusbh200_hcd *fusbh200,
+ struct fusbh200_qh *qh,
+ char **nextp,
+ unsigned *sizep
+)
+{
+ u32 scratch;
+ u32 hw_curr;
+ struct fusbh200_qtd *td;
+ unsigned temp;
+ unsigned size = *sizep;
+ char *next = *nextp;
+ char mark;
+ __le32 list_end = FUSBH200_LIST_END(fusbh200);
+ struct fusbh200_qh_hw *hw = qh->hw;
+
+ if (hw->hw_qtd_next == list_end) /* NEC does this */
+ mark = '@';
+ else
+ mark = token_mark(fusbh200, hw->hw_token);
+ if (mark == '/') { /* qh_alt_next controls qh advance? */
+ if ((hw->hw_alt_next & QTD_MASK(fusbh200))
+ == fusbh200->async->hw->hw_alt_next)
+ mark = '#'; /* blocked */
+ else if (hw->hw_alt_next == list_end)
+ mark = '.'; /* use hw_qtd_next */
+ /* else alt_next points to some other qtd */
+ }
+ scratch = hc32_to_cpup(fusbh200, &hw->hw_info1);
+ hw_curr = (mark == '*') ? hc32_to_cpup(fusbh200, &hw->hw_current) : 0;
+ temp = scnprintf (next, size,
+ "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)",
+ qh, scratch & 0x007f,
+ speed_char (scratch),
+ (scratch >> 8) & 0x000f,
+ scratch, hc32_to_cpup(fusbh200, &hw->hw_info2),
+ hc32_to_cpup(fusbh200, &hw->hw_token), mark,
+ (cpu_to_hc32(fusbh200, QTD_TOGGLE) & hw->hw_token)
+ ? "data1" : "data0",
+ (hc32_to_cpup(fusbh200, &hw->hw_alt_next) >> 1) & 0x0f);
+ size -= temp;
+ next += temp;
+
+ /* hc may be modifying the list as we read it ... */
+ list_for_each_entry(td, &qh->qtd_list, qtd_list) {
+ scratch = hc32_to_cpup(fusbh200, &td->hw_token);
+ mark = ' ';
+ if (hw_curr == td->qtd_dma)
+ mark = '*';
+ else if (hw->hw_qtd_next == cpu_to_hc32(fusbh200, td->qtd_dma))
+ mark = '+';
+ else if (QTD_LENGTH (scratch)) {
+ if (td->hw_alt_next == fusbh200->async->hw->hw_alt_next)
+ mark = '#';
+ else if (td->hw_alt_next != list_end)
+ mark = '/';
+ }
+ temp = snprintf (next, size,
+ "\n\t%p%c%s len=%d %08x urb %p",
+ td, mark, ({ char *tmp;
+ switch ((scratch>>8)&0x03) {
+ case 0: tmp = "out"; break;
+ case 1: tmp = "in"; break;
+ case 2: tmp = "setup"; break;
+ default: tmp = "?"; break;
+ } tmp;}),
+ (scratch >> 16) & 0x7fff,
+ scratch,
+ td->urb);
+ if (size < temp)
+ temp = size;
+ size -= temp;
+ next += temp;
+ if (temp == size)
+ goto done;
+ }
+
+ temp = snprintf (next, size, "\n");
+ if (size < temp)
+ temp = size;
+ size -= temp;
+ next += temp;
+
+done:
+ *sizep = size;
+ *nextp = next;
+}
+
+static ssize_t fill_async_buffer(struct debug_buffer *buf)
+{
+ struct usb_hcd *hcd;
+ struct fusbh200_hcd *fusbh200;
+ unsigned long flags;
+ unsigned temp, size;
+ char *next;
+ struct fusbh200_qh *qh;
+
+ hcd = bus_to_hcd(buf->bus);
+ fusbh200 = hcd_to_fusbh200 (hcd);
+ next = buf->output_buf;
+ size = buf->alloc_size;
+
+ *next = 0;
+
+ /* dumps a snapshot of the async schedule.
+ * usually empty except for long-term bulk reads, or head.
+ * one QH per line, and TDs we know about
+ */
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ for (qh = fusbh200->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh)
+ qh_lines (fusbh200, qh, &next, &size);
+ if (fusbh200->async_unlink && size > 0) {
+ temp = scnprintf(next, size, "\nunlink =\n");
+ size -= temp;
+ next += temp;
+
+ for (qh = fusbh200->async_unlink; size > 0 && qh;
+ qh = qh->unlink_next)
+ qh_lines (fusbh200, qh, &next, &size);
+ }
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+
+ return strlen(buf->output_buf);
+}
+
+#define DBG_SCHED_LIMIT 64
+static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
+{
+ struct usb_hcd *hcd;
+ struct fusbh200_hcd *fusbh200;
+ unsigned long flags;
+ union fusbh200_shadow p, *seen;
+ unsigned temp, size, seen_count;
+ char *next;
+ unsigned i;
+ __hc32 tag;
+
+ if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC)))
+ return 0;
+ seen_count = 0;
+
+ hcd = bus_to_hcd(buf->bus);
+ fusbh200 = hcd_to_fusbh200 (hcd);
+ next = buf->output_buf;
+ size = buf->alloc_size;
+
+ temp = scnprintf (next, size, "size = %d\n", fusbh200->periodic_size);
+ size -= temp;
+ next += temp;
+
+ /* dump a snapshot of the periodic schedule.
+ * iso changes, interrupt usually doesn't.
+ */
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ for (i = 0; i < fusbh200->periodic_size; i++) {
+ p = fusbh200->pshadow [i];
+ if (likely (!p.ptr))
+ continue;
+ tag = Q_NEXT_TYPE(fusbh200, fusbh200->periodic [i]);
+
+ temp = scnprintf (next, size, "%4d: ", i);
+ size -= temp;
+ next += temp;
+
+ do {
+ struct fusbh200_qh_hw *hw;
+
+ switch (hc32_to_cpu(fusbh200, tag)) {
+ case Q_TYPE_QH:
+ hw = p.qh->hw;
+ temp = scnprintf (next, size, " qh%d-%04x/%p",
+ p.qh->period,
+ hc32_to_cpup(fusbh200,
+ &hw->hw_info2)
+ /* uframe masks */
+ & (QH_CMASK | QH_SMASK),
+ p.qh);
+ size -= temp;
+ next += temp;
+ /* don't repeat what follows this qh */
+ for (temp = 0; temp < seen_count; temp++) {
+ if (seen [temp].ptr != p.ptr)
+ continue;
+ if (p.qh->qh_next.ptr) {
+ temp = scnprintf (next, size,
+ " ...");
+ size -= temp;
+ next += temp;
+ }
+ break;
+ }
+ /* show more info the first time around */
+ if (temp == seen_count) {
+ u32 scratch = hc32_to_cpup(fusbh200,
+ &hw->hw_info1);
+ struct fusbh200_qtd *qtd;
+ char *type = "";
+
+ /* count tds, get ep direction */
+ temp = 0;
+ list_for_each_entry (qtd,
+ &p.qh->qtd_list,
+ qtd_list) {
+ temp++;
+ switch (0x03 & (hc32_to_cpu(
+ fusbh200,
+ qtd->hw_token) >> 8)) {
+ case 0: type = "out"; continue;
+ case 1: type = "in"; continue;
+ }
+ }
+
+ temp = scnprintf (next, size,
+ " (%c%d ep%d%s "
+ "[%d/%d] q%d p%d)",
+ speed_char (scratch),
+ scratch & 0x007f,
+ (scratch >> 8) & 0x000f, type,
+ p.qh->usecs, p.qh->c_usecs,
+ temp,
+ 0x7ff & (scratch >> 16));
+
+ if (seen_count < DBG_SCHED_LIMIT)
+ seen [seen_count++].qh = p.qh;
+ } else
+ temp = 0;
+ tag = Q_NEXT_TYPE(fusbh200, hw->hw_next);
+ p = p.qh->qh_next;
+ break;
+ case Q_TYPE_FSTN:
+ temp = scnprintf (next, size,
+ " fstn-%8x/%p", p.fstn->hw_prev,
+ p.fstn);
+ tag = Q_NEXT_TYPE(fusbh200, p.fstn->hw_next);
+ p = p.fstn->fstn_next;
+ break;
+ case Q_TYPE_ITD:
+ temp = scnprintf (next, size,
+ " itd/%p", p.itd);
+ tag = Q_NEXT_TYPE(fusbh200, p.itd->hw_next);
+ p = p.itd->itd_next;
+ break;
+ }
+ size -= temp;
+ next += temp;
+ } while (p.ptr);
+
+ temp = scnprintf (next, size, "\n");
+ size -= temp;
+ next += temp;
+ }
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ kfree (seen);
+
+ return buf->alloc_size - size;
+}
+#undef DBG_SCHED_LIMIT
+
+static const char *rh_state_string(struct fusbh200_hcd *fusbh200)
+{
+ switch (fusbh200->rh_state) {
+ case FUSBH200_RH_HALTED:
+ return "halted";
+ case FUSBH200_RH_SUSPENDED:
+ return "suspended";
+ case FUSBH200_RH_RUNNING:
+ return "running";
+ case FUSBH200_RH_STOPPING:
+ return "stopping";
+ }
+ return "?";
+}
+
+static ssize_t fill_registers_buffer(struct debug_buffer *buf)
+{
+ struct usb_hcd *hcd;
+ struct fusbh200_hcd *fusbh200;
+ unsigned long flags;
+ unsigned temp, size, i;
+ char *next, scratch [80];
+ static char fmt [] = "%*s\n";
+ static char label [] = "";
+
+ hcd = bus_to_hcd(buf->bus);
+ fusbh200 = hcd_to_fusbh200 (hcd);
+ next = buf->output_buf;
+ size = buf->alloc_size;
+
+ spin_lock_irqsave (&fusbh200->lock, flags);
+
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
+ size = scnprintf (next, size,
+ "bus %s, device %s\n"
+ "%s\n"
+ "SUSPENDED (no register access)\n",
+ hcd->self.controller->bus->name,
+ dev_name(hcd->self.controller),
+ hcd->product_desc);
+ goto done;
+ }
+
+ /* Capability Registers */
+ i = HC_VERSION(fusbh200, fusbh200_readl(fusbh200, &fusbh200->caps->hc_capbase));
+ temp = scnprintf (next, size,
+ "bus %s, device %s\n"
+ "%s\n"
+ "EHCI %x.%02x, rh state %s\n",
+ hcd->self.controller->bus->name,
+ dev_name(hcd->self.controller),
+ hcd->product_desc,
+ i >> 8, i & 0x0ff, rh_state_string(fusbh200));
+ size -= temp;
+ next += temp;
+
+ // FIXME interpret both types of params
+ i = fusbh200_readl(fusbh200, &fusbh200->caps->hcs_params);
+ temp = scnprintf (next, size, "structural params 0x%08x\n", i);
+ size -= temp;
+ next += temp;
+
+ i = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
+ temp = scnprintf (next, size, "capability params 0x%08x\n", i);
+ size -= temp;
+ next += temp;
+
+ /* Operational Registers */
+ temp = dbg_status_buf (scratch, sizeof scratch, label,
+ fusbh200_readl(fusbh200, &fusbh200->regs->status));
+ temp = scnprintf (next, size, fmt, temp, scratch);
+ size -= temp;
+ next += temp;
+
+ temp = dbg_command_buf (scratch, sizeof scratch, label,
+ fusbh200_readl(fusbh200, &fusbh200->regs->command));
+ temp = scnprintf (next, size, fmt, temp, scratch);
+ size -= temp;
+ next += temp;
+
+ temp = dbg_intr_buf (scratch, sizeof scratch, label,
+ fusbh200_readl(fusbh200, &fusbh200->regs->intr_enable));
+ temp = scnprintf (next, size, fmt, temp, scratch);
+ size -= temp;
+ next += temp;
+
+ temp = scnprintf (next, size, "uframe %04x\n",
+ fusbh200_read_frame_index(fusbh200));
+ size -= temp;
+ next += temp;
+
+ if (fusbh200->async_unlink) {
+ temp = scnprintf(next, size, "async unlink qh %p\n",
+ fusbh200->async_unlink);
+ size -= temp;
+ next += temp;
+ }
+
+ temp = scnprintf (next, size,
+ "irq normal %ld err %ld iaa %ld (lost %ld)\n",
+ fusbh200->stats.normal, fusbh200->stats.error, fusbh200->stats.iaa,
+ fusbh200->stats.lost_iaa);
+ size -= temp;
+ next += temp;
+
+ temp = scnprintf (next, size, "complete %ld unlink %ld\n",
+ fusbh200->stats.complete, fusbh200->stats.unlink);
+ size -= temp;
+ next += temp;
+
+done:
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+
+ return buf->alloc_size - size;
+}
+
+static struct debug_buffer *alloc_buffer(struct usb_bus *bus,
+ ssize_t (*fill_func)(struct debug_buffer *))
+{
+ struct debug_buffer *buf;
+
+ buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
+
+ if (buf) {
+ buf->bus = bus;
+ buf->fill_func = fill_func;
+ mutex_init(&buf->mutex);
+ buf->alloc_size = PAGE_SIZE;
+ }
+
+ return buf;
+}
+
+static int fill_buffer(struct debug_buffer *buf)
+{
+ int ret = 0;
+
+ if (!buf->output_buf)
+ buf->output_buf = vmalloc(buf->alloc_size);
+
+ if (!buf->output_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = buf->fill_func(buf);
+
+ if (ret >= 0) {
+ buf->count = ret;
+ ret = 0;
+ }
+
+out:
+ return ret;
+}
+
+static ssize_t debug_output(struct file *file, char __user *user_buf,
+ size_t len, loff_t *offset)
+{
+ struct debug_buffer *buf = file->private_data;
+ int ret = 0;
+
+ mutex_lock(&buf->mutex);
+ if (buf->count == 0) {
+ ret = fill_buffer(buf);
+ if (ret != 0) {
+ mutex_unlock(&buf->mutex);
+ goto out;
+ }
+ }
+ mutex_unlock(&buf->mutex);
+
+ ret = simple_read_from_buffer(user_buf, len, offset,
+ buf->output_buf, buf->count);
+
+out:
+ return ret;
+
+}
+
+static int debug_close(struct inode *inode, struct file *file)
+{
+ struct debug_buffer *buf = file->private_data;
+
+ if (buf) {
+ vfree(buf->output_buf);
+ kfree(buf);
+ }
+
+ return 0;
+}
+static int debug_async_open(struct inode *inode, struct file *file)
+{
+ file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
+
+ return file->private_data ? 0 : -ENOMEM;
+}
+
+static int debug_periodic_open(struct inode *inode, struct file *file)
+{
+ struct debug_buffer *buf;
+ buf = alloc_buffer(inode->i_private, fill_periodic_buffer);
+ if (!buf)
+ return -ENOMEM;
+
+ buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE;
+ file->private_data = buf;
+ return 0;
+}
+
+static int debug_registers_open(struct inode *inode, struct file *file)
+{
+ file->private_data = alloc_buffer(inode->i_private,
+ fill_registers_buffer);
+
+ return file->private_data ? 0 : -ENOMEM;
+}
+
+static inline void create_debug_files (struct fusbh200_hcd *fusbh200)
+{
+ struct usb_bus *bus = &fusbh200_to_hcd(fusbh200)->self;
+
+ fusbh200->debug_dir = debugfs_create_dir(bus->bus_name, fusbh200_debug_root);
+ if (!fusbh200->debug_dir)
+ return;
+
+ if (!debugfs_create_file("async", S_IRUGO, fusbh200->debug_dir, bus,
+ &debug_async_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("periodic", S_IRUGO, fusbh200->debug_dir, bus,
+ &debug_periodic_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("registers", S_IRUGO, fusbh200->debug_dir, bus,
+ &debug_registers_fops))
+ goto file_error;
+
+ return;
+
+file_error:
+ debugfs_remove_recursive(fusbh200->debug_dir);
+}
+
+static inline void remove_debug_files (struct fusbh200_hcd *fusbh200)
+{
+ debugfs_remove_recursive(fusbh200->debug_dir);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * handshake - spin reading hc until handshake completes or fails
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @usec: timeout in microseconds
+ *
+ * Returns negative errno, or zero on success
+ *
+ * Success happens when the "mask" bits have the specified value (hardware
+ * handshake done). There are two failure modes: "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ *
+ * That last failure should_only happen in cases like physical cardbus eject
+ * before driver shutdown. But it also seems to be caused by bugs in cardbus
+ * bridge shutdown: shutting down the bridge before the devices using it.
+ */
+static int handshake (struct fusbh200_hcd *fusbh200, void __iomem *ptr,
+ u32 mask, u32 done, int usec)
+{
+ u32 result;
+
+ do {
+ result = fusbh200_readl(fusbh200, ptr);
+ if (result == ~(u32)0) /* card removed */
+ return -ENODEV;
+ result &= mask;
+ if (result == done)
+ return 0;
+ udelay (1);
+ usec--;
+ } while (usec > 0);
+ return -ETIMEDOUT;
+}
+
+/*
+ * Force HC to halt state from unknown (EHCI spec section 2.3).
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static int fusbh200_halt (struct fusbh200_hcd *fusbh200)
+{
+ u32 temp;
+
+ spin_lock_irq(&fusbh200->lock);
+
+ /* disable any irqs left enabled by previous code */
+ fusbh200_writel(fusbh200, 0, &fusbh200->regs->intr_enable);
+
+ /*
+ * This routine gets called during probe before fusbh200->command
+ * has been initialized, so we can't rely on its value.
+ */
+ fusbh200->command &= ~CMD_RUN;
+ temp = fusbh200_readl(fusbh200, &fusbh200->regs->command);
+ temp &= ~(CMD_RUN | CMD_IAAD);
+ fusbh200_writel(fusbh200, temp, &fusbh200->regs->command);
+
+ spin_unlock_irq(&fusbh200->lock);
+ synchronize_irq(fusbh200_to_hcd(fusbh200)->irq);
+
+ return handshake(fusbh200, &fusbh200->regs->status,
+ STS_HALT, STS_HALT, 16 * 125);
+}
+
+/*
+ * Reset a non-running (STS_HALT == 1) controller.
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static int fusbh200_reset (struct fusbh200_hcd *fusbh200)
+{
+ int retval;
+ u32 command = fusbh200_readl(fusbh200, &fusbh200->regs->command);
+
+ /* If the EHCI debug controller is active, special care must be
+ * taken before and after a host controller reset */
+ if (fusbh200->debug && !dbgp_reset_prep(fusbh200_to_hcd(fusbh200)))
+ fusbh200->debug = NULL;
+
+ command |= CMD_RESET;
+ dbg_cmd (fusbh200, "reset", command);
+ fusbh200_writel(fusbh200, command, &fusbh200->regs->command);
+ fusbh200->rh_state = FUSBH200_RH_HALTED;
+ fusbh200->next_statechange = jiffies;
+ retval = handshake (fusbh200, &fusbh200->regs->command,
+ CMD_RESET, 0, 250 * 1000);
+
+ if (retval)
+ return retval;
+
+ if (fusbh200->debug)
+ dbgp_external_startup(fusbh200_to_hcd(fusbh200));
+
+ fusbh200->port_c_suspend = fusbh200->suspended_ports =
+ fusbh200->resuming_ports = 0;
+ return retval;
+}
+
+/*
+ * Idle the controller (turn off the schedules).
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static void fusbh200_quiesce (struct fusbh200_hcd *fusbh200)
+{
+ u32 temp;
+
+ if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
+ return;
+
+ /* wait for any schedule enables/disables to take effect */
+ temp = (fusbh200->command << 10) & (STS_ASS | STS_PSS);
+ handshake(fusbh200, &fusbh200->regs->status, STS_ASS | STS_PSS, temp, 16 * 125);
+
+ /* then disable anything that's still active */
+ spin_lock_irq(&fusbh200->lock);
+ fusbh200->command &= ~(CMD_ASE | CMD_PSE);
+ fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
+ spin_unlock_irq(&fusbh200->lock);
+
+ /* hardware can take 16 microframes to turn off ... */
+ handshake(fusbh200, &fusbh200->regs->status, STS_ASS | STS_PSS, 0, 16 * 125);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void end_unlink_async(struct fusbh200_hcd *fusbh200);
+static void unlink_empty_async(struct fusbh200_hcd *fusbh200);
+static void fusbh200_work(struct fusbh200_hcd *fusbh200);
+static void start_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
+static void end_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
+
+/*-------------------------------------------------------------------------*/
+
+/* Set a bit in the USBCMD register */
+static void fusbh200_set_command_bit(struct fusbh200_hcd *fusbh200, u32 bit)
+{
+ fusbh200->command |= bit;
+ fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
+
+ /* unblock posted write */
+ fusbh200_readl(fusbh200, &fusbh200->regs->command);
+}
+
+/* Clear a bit in the USBCMD register */
+static void fusbh200_clear_command_bit(struct fusbh200_hcd *fusbh200, u32 bit)
+{
+ fusbh200->command &= ~bit;
+ fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
+
+ /* unblock posted write */
+ fusbh200_readl(fusbh200, &fusbh200->regs->command);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI timer support... Now using hrtimers.
+ *
+ * Lots of different events are triggered from fusbh200->hrtimer. Whenever
+ * the timer routine runs, it checks each possible event; events that are
+ * currently enabled and whose expiration time has passed get handled.
+ * The set of enabled events is stored as a collection of bitflags in
+ * fusbh200->enabled_hrtimer_events, and they are numbered in order of
+ * increasing delay values (ranging between 1 ms and 100 ms).
+ *
+ * Rather than implementing a sorted list or tree of all pending events,
+ * we keep track only of the lowest-numbered pending event, in
+ * fusbh200->next_hrtimer_event. Whenever fusbh200->hrtimer gets restarted, its
+ * expiration time is set to the timeout value for this event.
+ *
+ * As a result, events might not get handled right away; the actual delay
+ * could be anywhere up to twice the requested delay. This doesn't
+ * matter, because none of the events are especially time-critical. The
+ * ones that matter most all have a delay of 1 ms, so they will be
+ * handled after 2 ms at most, which is okay. In addition to this, we
+ * allow for an expiration range of 1 ms.
+ */
+
+/*
+ * Delay lengths for the hrtimer event types.
+ * Keep this list sorted by delay length, in the same order as
+ * the event types indexed by enum fusbh200_hrtimer_event in fusbh200.h.
+ */
+static unsigned event_delays_ns[] = {
+ 1 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_POLL_ASS */
+ 1 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_POLL_PSS */
+ 1 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_POLL_DEAD */
+ 1125 * NSEC_PER_USEC, /* FUSBH200_HRTIMER_UNLINK_INTR */
+ 2 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_FREE_ITDS */
+ 6 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_ASYNC_UNLINKS */
+ 10 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_IAA_WATCHDOG */
+ 10 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_DISABLE_PERIODIC */
+ 15 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_DISABLE_ASYNC */
+ 100 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_IO_WATCHDOG */
+};
+
+/* Enable a pending hrtimer event */
+static void fusbh200_enable_event(struct fusbh200_hcd *fusbh200, unsigned event,
+ bool resched)
+{
+ ktime_t *timeout = &fusbh200->hr_timeouts[event];
+
+ if (resched)
+ *timeout = ktime_add(ktime_get(),
+ ktime_set(0, event_delays_ns[event]));
+ fusbh200->enabled_hrtimer_events |= (1 << event);
+
+ /* Track only the lowest-numbered pending event */
+ if (event < fusbh200->next_hrtimer_event) {
+ fusbh200->next_hrtimer_event = event;
+ hrtimer_start_range_ns(&fusbh200->hrtimer, *timeout,
+ NSEC_PER_MSEC, HRTIMER_MODE_ABS);
+ }
+}
+
+
+/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
+static void fusbh200_poll_ASS(struct fusbh200_hcd *fusbh200)
+{
+ unsigned actual, want;
+
+ /* Don't enable anything if the controller isn't running (e.g., died) */
+ if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
+ return;
+
+ want = (fusbh200->command & CMD_ASE) ? STS_ASS : 0;
+ actual = fusbh200_readl(fusbh200, &fusbh200->regs->status) & STS_ASS;
+
+ if (want != actual) {
+
+ /* Poll again later, but give up after about 20 ms */
+ if (fusbh200->ASS_poll_count++ < 20) {
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_POLL_ASS, true);
+ return;
+ }
+ fusbh200_dbg(fusbh200, "Waited too long for the async schedule status (%x/%x), giving up\n",
+ want, actual);
+ }
+ fusbh200->ASS_poll_count = 0;
+
+ /* The status is up-to-date; restart or stop the schedule as needed */
+ if (want == 0) { /* Stopped */
+ if (fusbh200->async_count > 0)
+ fusbh200_set_command_bit(fusbh200, CMD_ASE);
+
+ } else { /* Running */
+ if (fusbh200->async_count == 0) {
+
+ /* Turn off the schedule after a while */
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_DISABLE_ASYNC,
+ true);
+ }
+ }
+}
+
+/* Turn off the async schedule after a brief delay */
+static void fusbh200_disable_ASE(struct fusbh200_hcd *fusbh200)
+{
+ fusbh200_clear_command_bit(fusbh200, CMD_ASE);
+}
+
+
+/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
+static void fusbh200_poll_PSS(struct fusbh200_hcd *fusbh200)
+{
+ unsigned actual, want;
+
+ /* Don't do anything if the controller isn't running (e.g., died) */
+ if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
+ return;
+
+ want = (fusbh200->command & CMD_PSE) ? STS_PSS : 0;
+ actual = fusbh200_readl(fusbh200, &fusbh200->regs->status) & STS_PSS;
+
+ if (want != actual) {
+
+ /* Poll again later, but give up after about 20 ms */
+ if (fusbh200->PSS_poll_count++ < 20) {
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_POLL_PSS, true);
+ return;
+ }
+ fusbh200_dbg(fusbh200, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
+ want, actual);
+ }
+ fusbh200->PSS_poll_count = 0;
+
+ /* The status is up-to-date; restart or stop the schedule as needed */
+ if (want == 0) { /* Stopped */
+ if (fusbh200->periodic_count > 0)
+ fusbh200_set_command_bit(fusbh200, CMD_PSE);
+
+ } else { /* Running */
+ if (fusbh200->periodic_count == 0) {
+
+ /* Turn off the schedule after a while */
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_DISABLE_PERIODIC,
+ true);
+ }
+ }
+}
+
+/* Turn off the periodic schedule after a brief delay */
+static void fusbh200_disable_PSE(struct fusbh200_hcd *fusbh200)
+{
+ fusbh200_clear_command_bit(fusbh200, CMD_PSE);
+}
+
+
+/* Poll the STS_HALT status bit; see when a dead controller stops */
+static void fusbh200_handle_controller_death(struct fusbh200_hcd *fusbh200)
+{
+ if (!(fusbh200_readl(fusbh200, &fusbh200->regs->status) & STS_HALT)) {
+
+ /* Give up after a few milliseconds */
+ if (fusbh200->died_poll_count++ < 5) {
+ /* Try again later */
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_POLL_DEAD, true);
+ return;
+ }
+ fusbh200_warn(fusbh200, "Waited too long for the controller to stop, giving up\n");
+ }
+
+ /* Clean up the mess */
+ fusbh200->rh_state = FUSBH200_RH_HALTED;
+ fusbh200_writel(fusbh200, 0, &fusbh200->regs->intr_enable);
+ fusbh200_work(fusbh200);
+ end_unlink_async(fusbh200);
+
+ /* Not in process context, so don't try to reset the controller */
+}
+
+
+/* Handle unlinked interrupt QHs once they are gone from the hardware */
+static void fusbh200_handle_intr_unlinks(struct fusbh200_hcd *fusbh200)
+{
+ bool stopped = (fusbh200->rh_state < FUSBH200_RH_RUNNING);
+
+ /*
+ * Process all the QHs on the intr_unlink list that were added
+ * before the current unlink cycle began. The list is in
+ * temporal order, so stop when we reach the first entry in the
+ * current cycle. But if the root hub isn't running then
+ * process all the QHs on the list.
+ */
+ fusbh200->intr_unlinking = true;
+ while (fusbh200->intr_unlink) {
+ struct fusbh200_qh *qh = fusbh200->intr_unlink;
+
+ if (!stopped && qh->unlink_cycle == fusbh200->intr_unlink_cycle)
+ break;
+ fusbh200->intr_unlink = qh->unlink_next;
+ qh->unlink_next = NULL;
+ end_unlink_intr(fusbh200, qh);
+ }
+
+ /* Handle remaining entries later */
+ if (fusbh200->intr_unlink) {
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_UNLINK_INTR, true);
+ ++fusbh200->intr_unlink_cycle;
+ }
+ fusbh200->intr_unlinking = false;
+}
+
+
+/* Start another free-iTDs/siTDs cycle */
+static void start_free_itds(struct fusbh200_hcd *fusbh200)
+{
+ if (!(fusbh200->enabled_hrtimer_events & BIT(FUSBH200_HRTIMER_FREE_ITDS))) {
+ fusbh200->last_itd_to_free = list_entry(
+ fusbh200->cached_itd_list.prev,
+ struct fusbh200_itd, itd_list);
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_FREE_ITDS, true);
+ }
+}
+
+/* Wait for controller to stop using old iTDs and siTDs */
+static void end_free_itds(struct fusbh200_hcd *fusbh200)
+{
+ struct fusbh200_itd *itd, *n;
+
+ if (fusbh200->rh_state < FUSBH200_RH_RUNNING) {
+ fusbh200->last_itd_to_free = NULL;
+ }
+
+ list_for_each_entry_safe(itd, n, &fusbh200->cached_itd_list, itd_list) {
+ list_del(&itd->itd_list);
+ dma_pool_free(fusbh200->itd_pool, itd, itd->itd_dma);
+ if (itd == fusbh200->last_itd_to_free)
+ break;
+ }
+
+ if (!list_empty(&fusbh200->cached_itd_list))
+ start_free_itds(fusbh200);
+}
+
+
+/* Handle lost (or very late) IAA interrupts */
+static void fusbh200_iaa_watchdog(struct fusbh200_hcd *fusbh200)
+{
+ if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
+ return;
+
+ /*
+ * Lost IAA irqs wedge things badly; seen first with a vt8235.
+ * So we need this watchdog, but must protect it against both
+ * (a) SMP races against real IAA firing and retriggering, and
+ * (b) clean HC shutdown, when IAA watchdog was pending.
+ */
+ if (fusbh200->async_iaa) {
+ u32 cmd, status;
+
+ /* If we get here, IAA is *REALLY* late. It's barely
+ * conceivable that the system is so busy that CMD_IAAD
+ * is still legitimately set, so let's be sure it's
+ * clear before we read STS_IAA. (The HC should clear
+ * CMD_IAAD when it sets STS_IAA.)
+ */
+ cmd = fusbh200_readl(fusbh200, &fusbh200->regs->command);
+
+ /*
+ * If IAA is set here it either legitimately triggered
+ * after the watchdog timer expired (_way_ late, so we'll
+ * still count it as lost) ... or a silicon erratum:
+ * - VIA seems to set IAA without triggering the IRQ;
+ * - IAAD potentially cleared without setting IAA.
+ */
+ status = fusbh200_readl(fusbh200, &fusbh200->regs->status);
+ if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
+ COUNT(fusbh200->stats.lost_iaa);
+ fusbh200_writel(fusbh200, STS_IAA, &fusbh200->regs->status);
+ }
+
+ fusbh200_dbg(fusbh200, "IAA watchdog: status %x cmd %x\n",
+ status, cmd);
+ end_unlink_async(fusbh200);
+ }
+}
+
+
+/* Enable the I/O watchdog, if appropriate */
+static void turn_on_io_watchdog(struct fusbh200_hcd *fusbh200)
+{
+ /* Not needed if the controller isn't running or it's already enabled */
+ if (fusbh200->rh_state != FUSBH200_RH_RUNNING ||
+ (fusbh200->enabled_hrtimer_events &
+ BIT(FUSBH200_HRTIMER_IO_WATCHDOG)))
+ return;
+
+ /*
+ * Isochronous transfers always need the watchdog.
+ * For other sorts we use it only if the flag is set.
+ */
+ if (fusbh200->isoc_count > 0 || (fusbh200->need_io_watchdog &&
+ fusbh200->async_count + fusbh200->intr_count > 0))
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_IO_WATCHDOG, true);
+}
+
+
+/*
+ * Handler functions for the hrtimer event types.
+ * Keep this array in the same order as the event types indexed by
+ * enum fusbh200_hrtimer_event in fusbh200.h.
+ */
+static void (*event_handlers[])(struct fusbh200_hcd *) = {
+ fusbh200_poll_ASS, /* FUSBH200_HRTIMER_POLL_ASS */
+ fusbh200_poll_PSS, /* FUSBH200_HRTIMER_POLL_PSS */
+ fusbh200_handle_controller_death, /* FUSBH200_HRTIMER_POLL_DEAD */
+ fusbh200_handle_intr_unlinks, /* FUSBH200_HRTIMER_UNLINK_INTR */
+ end_free_itds, /* FUSBH200_HRTIMER_FREE_ITDS */
+ unlink_empty_async, /* FUSBH200_HRTIMER_ASYNC_UNLINKS */
+ fusbh200_iaa_watchdog, /* FUSBH200_HRTIMER_IAA_WATCHDOG */
+ fusbh200_disable_PSE, /* FUSBH200_HRTIMER_DISABLE_PERIODIC */
+ fusbh200_disable_ASE, /* FUSBH200_HRTIMER_DISABLE_ASYNC */
+ fusbh200_work, /* FUSBH200_HRTIMER_IO_WATCHDOG */
+};
+
+static enum hrtimer_restart fusbh200_hrtimer_func(struct hrtimer *t)
+{
+ struct fusbh200_hcd *fusbh200 = container_of(t, struct fusbh200_hcd, hrtimer);
+ ktime_t now;
+ unsigned long events;
+ unsigned long flags;
+ unsigned e;
+
+ spin_lock_irqsave(&fusbh200->lock, flags);
+
+ events = fusbh200->enabled_hrtimer_events;
+ fusbh200->enabled_hrtimer_events = 0;
+ fusbh200->next_hrtimer_event = FUSBH200_HRTIMER_NO_EVENT;
+
+ /*
+ * Check each pending event. If its time has expired, handle
+ * the event; otherwise re-enable it.
+ */
+ now = ktime_get();
+ for_each_set_bit(e, &events, FUSBH200_HRTIMER_NUM_EVENTS) {
+ if (now.tv64 >= fusbh200->hr_timeouts[e].tv64)
+ event_handlers[e](fusbh200);
+ else
+ fusbh200_enable_event(fusbh200, e, false);
+ }
+
+ spin_unlock_irqrestore(&fusbh200->lock, flags);
+ return HRTIMER_NORESTART;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define fusbh200_bus_suspend NULL
+#define fusbh200_bus_resume NULL
+
+/*-------------------------------------------------------------------------*/
+
+static int check_reset_complete (
+ struct fusbh200_hcd *fusbh200,
+ int index,
+ u32 __iomem *status_reg,
+ int port_status
+) {
+ if (!(port_status & PORT_CONNECT))
+ return port_status;
+
+ /* if reset finished and it's still not enabled -- handoff */
+ if (!(port_status & PORT_PE)) {
+ /* with integrated TT, there's nobody to hand it to! */
+ fusbh200_dbg (fusbh200,
+ "Failed to enable port %d on root hub TT\n",
+ index+1);
+ return port_status;
+ } else {
+ fusbh200_dbg(fusbh200, "port %d reset complete, port enabled\n",
+ index + 1);
+ }
+
+ return port_status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+
+/* build "status change" packet (one or two bytes) from HC registers */
+
+static int
+fusbh200_hub_status_data (struct usb_hcd *hcd, char *buf)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
+ u32 temp, status;
+ u32 mask;
+ int retval = 1;
+ unsigned long flags;
+
+ /* init status to no-changes */
+ buf [0] = 0;
+
+ /* Inform the core about resumes-in-progress by returning
+ * a non-zero value even if there are no status changes.
+ */
+ status = fusbh200->resuming_ports;
+
+ mask = PORT_CSC | PORT_PEC;
+ // PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND
+
+ /* no hub change reports (bit 0) for now (power, ...) */
+
+ /* port N changes (bit N)? */
+ spin_lock_irqsave (&fusbh200->lock, flags);
+
+ temp = fusbh200_readl(fusbh200, &fusbh200->regs->port_status);
+
+ /*
+ * Return status information even for ports with OWNER set.
+ * Otherwise khubd wouldn't see the disconnect event when a
+ * high-speed device is switched over to the companion
+ * controller by the user.
+ */
+
+ if ((temp & mask) != 0 || test_bit(0, &fusbh200->port_c_suspend)
+ || (fusbh200->reset_done[0] && time_after_eq(
+ jiffies, fusbh200->reset_done[0]))) {
+ buf [0] |= 1 << 1;
+ status = STS_PCD;
+ }
+ /* FIXME autosuspend idle root hubs */
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ return status ? retval : 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+fusbh200_hub_descriptor (
+ struct fusbh200_hcd *fusbh200,
+ struct usb_hub_descriptor *desc
+) {
+ int ports = HCS_N_PORTS (fusbh200->hcs_params);
+ u16 temp;
+
+ desc->bDescriptorType = 0x29;
+ desc->bPwrOn2PwrGood = 10; /* fusbh200 1.0, 2.3.9 says 20ms max */
+ desc->bHubContrCurrent = 0;
+
+ desc->bNbrPorts = ports;
+ temp = 1 + (ports / 8);
+ desc->bDescLength = 7 + 2 * temp;
+
+ /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+ memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+ memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
+
+ temp = 0x0008; /* per-port overcurrent reporting */
+ temp |= 0x0002; /* no power switching */
+ desc->wHubCharacteristics = cpu_to_le16(temp);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int fusbh200_hub_control (
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength
+) {
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
+ int ports = HCS_N_PORTS (fusbh200->hcs_params);
+ u32 __iomem *status_reg = &fusbh200->regs->port_status;
+ u32 temp, temp1, status;
+ unsigned long flags;
+ int retval = 0;
+ unsigned selector;
+
+ /*
+ * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
+ * HCS_INDICATOR may say we can change LEDs to off/amber/green.
+ * (track current state ourselves) ... blink for diagnostics,
+ * power, "this is the one", etc. EHCI spec supports this.
+ */
+
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ switch (typeReq) {
+ case ClearHubFeature:
+ switch (wValue) {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* no hub-wide feature/status flags */
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case ClearPortFeature:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ temp = fusbh200_readl(fusbh200, status_reg);
+ temp &= ~PORT_RWC_BITS;
+
+ /*
+ * Even if OWNER is set, so the port is owned by the
+ * companion controller, khubd needs to be able to clear
+ * the port-change status bits (especially
+ * USB_PORT_STAT_C_CONNECTION).
+ */
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ fusbh200_writel(fusbh200, temp & ~PORT_PE, status_reg);
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ fusbh200_writel(fusbh200, temp | PORT_PEC, status_reg);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ if (temp & PORT_RESET)
+ goto error;
+ if (!(temp & PORT_SUSPEND))
+ break;
+ if ((temp & PORT_PE) == 0)
+ goto error;
+
+ /* resume signaling for 20 msec */
+ fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg);
+ fusbh200->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(20);
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ clear_bit(wIndex, &fusbh200->port_c_suspend);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ fusbh200_writel(fusbh200, temp | PORT_CSC, status_reg);
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ fusbh200_writel(fusbh200, temp | BMISR_OVC, &fusbh200->regs->bmisr);
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ /* GetPortStatus clears reset */
+ break;
+ default:
+ goto error;
+ }
+ fusbh200_readl(fusbh200, &fusbh200->regs->command); /* unblock posted write */
+ break;
+ case GetHubDescriptor:
+ fusbh200_hub_descriptor (fusbh200, (struct usb_hub_descriptor *)
+ buf);
+ break;
+ case GetHubStatus:
+ /* no hub-wide feature/status flags */
+ memset (buf, 0, 4);
+ //cpu_to_le32s ((u32 *) buf);
+ break;
+ case GetPortStatus:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ status = 0;
+ temp = fusbh200_readl(fusbh200, status_reg);
+
+ // wPortChange bits
+ if (temp & PORT_CSC)
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
+ if (temp & PORT_PEC)
+ status |= USB_PORT_STAT_C_ENABLE << 16;
+
+ temp1 = fusbh200_readl(fusbh200, &fusbh200->regs->bmisr);
+ if (temp1 & BMISR_OVC)
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+
+ /* whoever resumes must GetPortStatus to complete it!! */
+ if (temp & PORT_RESUME) {
+
+ /* Remote Wakeup received? */
+ if (!fusbh200->reset_done[wIndex]) {
+ /* resume signaling for 20 msec */
+ fusbh200->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(20);
+ /* check the port again */
+ mod_timer(&fusbh200_to_hcd(fusbh200)->rh_timer,
+ fusbh200->reset_done[wIndex]);
+ }
+
+ /* resume completed? */
+ else if (time_after_eq(jiffies,
+ fusbh200->reset_done[wIndex])) {
+ clear_bit(wIndex, &fusbh200->suspended_ports);
+ set_bit(wIndex, &fusbh200->port_c_suspend);
+ fusbh200->reset_done[wIndex] = 0;
+
+ /* stop resume signaling */
+ temp = fusbh200_readl(fusbh200, status_reg);
+ fusbh200_writel(fusbh200,
+ temp & ~(PORT_RWC_BITS | PORT_RESUME),
+ status_reg);
+ clear_bit(wIndex, &fusbh200->resuming_ports);
+ retval = handshake(fusbh200, status_reg,
+ PORT_RESUME, 0, 2000 /* 2msec */);
+ if (retval != 0) {
+ fusbh200_err(fusbh200,
+ "port %d resume error %d\n",
+ wIndex + 1, retval);
+ goto error;
+ }
+ temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
+ }
+ }
+
+ /* whoever resets must GetPortStatus to complete it!! */
+ if ((temp & PORT_RESET)
+ && time_after_eq(jiffies,
+ fusbh200->reset_done[wIndex])) {
+ status |= USB_PORT_STAT_C_RESET << 16;
+ fusbh200->reset_done [wIndex] = 0;
+ clear_bit(wIndex, &fusbh200->resuming_ports);
+
+ /* force reset to complete */
+ fusbh200_writel(fusbh200, temp & ~(PORT_RWC_BITS | PORT_RESET),
+ status_reg);
+ /* REVISIT: some hardware needs 550+ usec to clear
+ * this bit; seems too long to spin routinely...
+ */
+ retval = handshake(fusbh200, status_reg,
+ PORT_RESET, 0, 1000);
+ if (retval != 0) {
+ fusbh200_err (fusbh200, "port %d reset error %d\n",
+ wIndex + 1, retval);
+ goto error;
+ }
+
+ /* see what we found out */
+ temp = check_reset_complete (fusbh200, wIndex, status_reg,
+ fusbh200_readl(fusbh200, status_reg));
+ }
+
+ if (!(temp & (PORT_RESUME|PORT_RESET))) {
+ fusbh200->reset_done[wIndex] = 0;
+ clear_bit(wIndex, &fusbh200->resuming_ports);
+ }
+
+ /* transfer dedicated ports to the companion hc */
+ if ((temp & PORT_CONNECT) &&
+ test_bit(wIndex, &fusbh200->companion_ports)) {
+ temp &= ~PORT_RWC_BITS;
+ fusbh200_writel(fusbh200, temp, status_reg);
+ fusbh200_dbg(fusbh200, "port %d --> companion\n", wIndex + 1);
+ temp = fusbh200_readl(fusbh200, status_reg);
+ }
+
+ /*
+ * Even if OWNER is set, there's no harm letting khubd
+ * see the wPortStatus values (they should all be 0 except
+ * for PORT_POWER anyway).
+ */
+
+ if (temp & PORT_CONNECT) {
+ status |= USB_PORT_STAT_CONNECTION;
+ status |= fusbh200_port_speed(fusbh200, temp);
+ }
+ if (temp & PORT_PE)
+ status |= USB_PORT_STAT_ENABLE;
+
+ /* maybe the port was unsuspended without our knowledge */
+ if (temp & (PORT_SUSPEND|PORT_RESUME)) {
+ status |= USB_PORT_STAT_SUSPEND;
+ } else if (test_bit(wIndex, &fusbh200->suspended_ports)) {
+ clear_bit(wIndex, &fusbh200->suspended_ports);
+ clear_bit(wIndex, &fusbh200->resuming_ports);
+ fusbh200->reset_done[wIndex] = 0;
+ if (temp & PORT_PE)
+ set_bit(wIndex, &fusbh200->port_c_suspend);
+ }
+
+ temp1 = fusbh200_readl(fusbh200, &fusbh200->regs->bmisr);
+ if (temp1 & BMISR_OVC)
+ status |= USB_PORT_STAT_OVERCURRENT;
+ if (temp & PORT_RESET)
+ status |= USB_PORT_STAT_RESET;
+ if (test_bit(wIndex, &fusbh200->port_c_suspend))
+ status |= USB_PORT_STAT_C_SUSPEND << 16;
+
+ if (status & ~0xffff) /* only if wPortChange is interesting */
+ dbg_port(fusbh200, "GetStatus", wIndex + 1, temp);
+ put_unaligned_le32(status, buf);
+ break;
+ case SetHubFeature:
+ switch (wValue) {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* no hub-wide feature/status flags */
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case SetPortFeature:
+ selector = wIndex >> 8;
+ wIndex &= 0xff;
+
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ temp = fusbh200_readl(fusbh200, status_reg);
+ temp &= ~PORT_RWC_BITS;
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ if ((temp & PORT_PE) == 0
+ || (temp & PORT_RESET) != 0)
+ goto error;
+
+ /* After above check the port must be connected.
+ * Set appropriate bit thus could put phy into low power
+ * mode if we have hostpc feature
+ */
+ fusbh200_writel(fusbh200, temp | PORT_SUSPEND, status_reg);
+ set_bit(wIndex, &fusbh200->suspended_ports);
+ break;
+ case USB_PORT_FEAT_RESET:
+ if (temp & PORT_RESUME)
+ goto error;
+ /* line status bits may report this as low speed,
+ * which can be fine if this root hub has a
+ * transaction translator built in.
+ */
+ fusbh200_dbg(fusbh200, "port %d reset\n", wIndex + 1);
+ temp |= PORT_RESET;
+ temp &= ~PORT_PE;
+
+ /*
+ * caller must wait, then call GetPortStatus
+ * usb 2.0 spec says 50 ms resets on root
+ */
+ fusbh200->reset_done [wIndex] = jiffies
+ + msecs_to_jiffies (50);
+ fusbh200_writel(fusbh200, temp, status_reg);
+ break;
+
+ /* For downstream facing ports (these): one hub port is put
+ * into test mode according to USB2 11.24.2.13, then the hub
+ * must be reset (which for root hub now means rmmod+modprobe,
+ * or else system reboot). See EHCI 2.3.9 and 4.14 for info
+ * about the EHCI-specific stuff.
+ */
+ case USB_PORT_FEAT_TEST:
+ if (!selector || selector > 5)
+ goto error;
+ spin_unlock_irqrestore(&fusbh200->lock, flags);
+ fusbh200_quiesce(fusbh200);
+ spin_lock_irqsave(&fusbh200->lock, flags);
+
+ /* Put all enabled ports into suspend */
+ temp = fusbh200_readl(fusbh200, status_reg) & ~PORT_RWC_BITS;
+ if (temp & PORT_PE)
+ fusbh200_writel(fusbh200, temp | PORT_SUSPEND,
+ status_reg);
+
+ spin_unlock_irqrestore(&fusbh200->lock, flags);
+ fusbh200_halt(fusbh200);
+ spin_lock_irqsave(&fusbh200->lock, flags);
+
+ temp = fusbh200_readl(fusbh200, status_reg);
+ temp |= selector << 16;
+ fusbh200_writel(fusbh200, temp, status_reg);
+ break;
+
+ default:
+ goto error;
+ }
+ fusbh200_readl(fusbh200, &fusbh200->regs->command); /* unblock posted writes */
+ break;
+
+ default:
+error:
+ /* "stall" on error */
+ retval = -EPIPE;
+ }
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ return retval;
+}
+
+static void __maybe_unused fusbh200_relinquish_port(struct usb_hcd *hcd,
+ int portnum)
+{
+ return;
+}
+
+static int __maybe_unused fusbh200_port_handed_over(struct usb_hcd *hcd,
+ int portnum)
+{
+ return 0;
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * There's basically three types of memory:
+ * - data used only by the HCD ... kmalloc is fine
+ * - async and periodic schedules, shared by HC and HCD ... these
+ * need to use dma_pool or dma_alloc_coherent
+ * - driver buffers, read/written by HC ... single shot DMA mapped
+ *
+ * There's also "register" data (e.g. PCI or SOC), which is memory mapped.
+ * No memory seen by this driver is pageable.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/* Allocate the key transfer structures from the previously allocated pool */
+
+static inline void fusbh200_qtd_init(struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd,
+ dma_addr_t dma)
+{
+ memset (qtd, 0, sizeof *qtd);
+ qtd->qtd_dma = dma;
+ qtd->hw_token = cpu_to_hc32(fusbh200, QTD_STS_HALT);
+ qtd->hw_next = FUSBH200_LIST_END(fusbh200);
+ qtd->hw_alt_next = FUSBH200_LIST_END(fusbh200);
+ INIT_LIST_HEAD (&qtd->qtd_list);
+}
+
+static struct fusbh200_qtd *fusbh200_qtd_alloc (struct fusbh200_hcd *fusbh200, gfp_t flags)
+{
+ struct fusbh200_qtd *qtd;
+ dma_addr_t dma;
+
+ qtd = dma_pool_alloc (fusbh200->qtd_pool, flags, &dma);
+ if (qtd != NULL) {
+ fusbh200_qtd_init(fusbh200, qtd, dma);
+ }
+ return qtd;
+}
+
+static inline void fusbh200_qtd_free (struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd)
+{
+ dma_pool_free (fusbh200->qtd_pool, qtd, qtd->qtd_dma);
+}
+
+
+static void qh_destroy(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ /* clean qtds first, and know this is not linked */
+ if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
+ fusbh200_dbg (fusbh200, "unused qh not empty!\n");
+ BUG ();
+ }
+ if (qh->dummy)
+ fusbh200_qtd_free (fusbh200, qh->dummy);
+ dma_pool_free(fusbh200->qh_pool, qh->hw, qh->qh_dma);
+ kfree(qh);
+}
+
+static struct fusbh200_qh *fusbh200_qh_alloc (struct fusbh200_hcd *fusbh200, gfp_t flags)
+{
+ struct fusbh200_qh *qh;
+ dma_addr_t dma;
+
+ qh = kzalloc(sizeof *qh, GFP_ATOMIC);
+ if (!qh)
+ goto done;
+ qh->hw = (struct fusbh200_qh_hw *)
+ dma_pool_alloc(fusbh200->qh_pool, flags, &dma);
+ if (!qh->hw)
+ goto fail;
+ memset(qh->hw, 0, sizeof *qh->hw);
+ qh->qh_dma = dma;
+ // INIT_LIST_HEAD (&qh->qh_list);
+ INIT_LIST_HEAD (&qh->qtd_list);
+
+ /* dummy td enables safe urb queuing */
+ qh->dummy = fusbh200_qtd_alloc (fusbh200, flags);
+ if (qh->dummy == NULL) {
+ fusbh200_dbg (fusbh200, "no dummy td\n");
+ goto fail1;
+ }
+done:
+ return qh;
+fail1:
+ dma_pool_free(fusbh200->qh_pool, qh->hw, qh->qh_dma);
+fail:
+ kfree(qh);
+ return NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* The queue heads and transfer descriptors are managed from pools tied
+ * to each of the "per device" structures.
+ * This is the initialisation and cleanup code.
+ */
+
+static void fusbh200_mem_cleanup (struct fusbh200_hcd *fusbh200)
+{
+ if (fusbh200->async)
+ qh_destroy(fusbh200, fusbh200->async);
+ fusbh200->async = NULL;
+
+ if (fusbh200->dummy)
+ qh_destroy(fusbh200, fusbh200->dummy);
+ fusbh200->dummy = NULL;
+
+ /* DMA consistent memory and pools */
+ if (fusbh200->qtd_pool)
+ dma_pool_destroy (fusbh200->qtd_pool);
+ fusbh200->qtd_pool = NULL;
+
+ if (fusbh200->qh_pool) {
+ dma_pool_destroy (fusbh200->qh_pool);
+ fusbh200->qh_pool = NULL;
+ }
+
+ if (fusbh200->itd_pool)
+ dma_pool_destroy (fusbh200->itd_pool);
+ fusbh200->itd_pool = NULL;
+
+ if (fusbh200->periodic)
+ dma_free_coherent (fusbh200_to_hcd(fusbh200)->self.controller,
+ fusbh200->periodic_size * sizeof (u32),
+ fusbh200->periodic, fusbh200->periodic_dma);
+ fusbh200->periodic = NULL;
+
+ /* shadow periodic table */
+ kfree(fusbh200->pshadow);
+ fusbh200->pshadow = NULL;
+}
+
+/* remember to add cleanup code (above) if you add anything here */
+static int fusbh200_mem_init (struct fusbh200_hcd *fusbh200, gfp_t flags)
+{
+ int i;
+
+ /* QTDs for control/bulk/intr transfers */
+ fusbh200->qtd_pool = dma_pool_create ("fusbh200_qtd",
+ fusbh200_to_hcd(fusbh200)->self.controller,
+ sizeof (struct fusbh200_qtd),
+ 32 /* byte alignment (for hw parts) */,
+ 4096 /* can't cross 4K */);
+ if (!fusbh200->qtd_pool) {
+ goto fail;
+ }
+
+ /* QHs for control/bulk/intr transfers */
+ fusbh200->qh_pool = dma_pool_create ("fusbh200_qh",
+ fusbh200_to_hcd(fusbh200)->self.controller,
+ sizeof(struct fusbh200_qh_hw),
+ 32 /* byte alignment (for hw parts) */,
+ 4096 /* can't cross 4K */);
+ if (!fusbh200->qh_pool) {
+ goto fail;
+ }
+ fusbh200->async = fusbh200_qh_alloc (fusbh200, flags);
+ if (!fusbh200->async) {
+ goto fail;
+ }
+
+ /* ITD for high speed ISO transfers */
+ fusbh200->itd_pool = dma_pool_create ("fusbh200_itd",
+ fusbh200_to_hcd(fusbh200)->self.controller,
+ sizeof (struct fusbh200_itd),
+ 64 /* byte alignment (for hw parts) */,
+ 4096 /* can't cross 4K */);
+ if (!fusbh200->itd_pool) {
+ goto fail;
+ }
+
+ /* Hardware periodic table */
+ fusbh200->periodic = (__le32 *)
+ dma_alloc_coherent (fusbh200_to_hcd(fusbh200)->self.controller,
+ fusbh200->periodic_size * sizeof(__le32),
+ &fusbh200->periodic_dma, 0);
+ if (fusbh200->periodic == NULL) {
+ goto fail;
+ }
+
+ for (i = 0; i < fusbh200->periodic_size; i++)
+ fusbh200->periodic[i] = FUSBH200_LIST_END(fusbh200);
+
+ /* software shadow of hardware table */
+ fusbh200->pshadow = kcalloc(fusbh200->periodic_size, sizeof(void *), flags);
+ if (fusbh200->pshadow != NULL)
+ return 0;
+
+fail:
+ fusbh200_dbg (fusbh200, "couldn't init memory\n");
+ fusbh200_mem_cleanup (fusbh200);
+ return -ENOMEM;
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
+ *
+ * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
+ * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
+ * buffers needed for the larger number). We use one QH per endpoint, queue
+ * multiple urbs (all three types) per endpoint. URBs may need several qtds.
+ *
+ * ISO traffic uses "ISO TD" (itd) records, and (along with
+ * interrupts) needs careful scheduling. Performance improvements can be
+ * an ongoing challenge. That's in "ehci-sched.c".
+ *
+ * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
+ * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
+ * (b) special fields in qh entries or (c) split iso entries. TTs will
+ * buffer low/full speed data so the host collects it at high speed.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/* fill a qtd, returning how much of the buffer we were able to queue up */
+
+static int
+qtd_fill(struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd, dma_addr_t buf,
+ size_t len, int token, int maxpacket)
+{
+ int i, count;
+ u64 addr = buf;
+
+ /* one buffer entry per 4K ... first might be short or unaligned */
+ qtd->hw_buf[0] = cpu_to_hc32(fusbh200, (u32)addr);
+ qtd->hw_buf_hi[0] = cpu_to_hc32(fusbh200, (u32)(addr >> 32));
+ count = 0x1000 - (buf & 0x0fff); /* rest of that page */
+ if (likely (len < count)) /* ... iff needed */
+ count = len;
+ else {
+ buf += 0x1000;
+ buf &= ~0x0fff;
+
+ /* per-qtd limit: from 16K to 20K (best alignment) */
+ for (i = 1; count < len && i < 5; i++) {
+ addr = buf;
+ qtd->hw_buf[i] = cpu_to_hc32(fusbh200, (u32)addr);
+ qtd->hw_buf_hi[i] = cpu_to_hc32(fusbh200,
+ (u32)(addr >> 32));
+ buf += 0x1000;
+ if ((count + 0x1000) < len)
+ count += 0x1000;
+ else
+ count = len;
+ }
+
+ /* short packets may only terminate transfers */
+ if (count != len)
+ count -= (count % maxpacket);
+ }
+ qtd->hw_token = cpu_to_hc32(fusbh200, (count << 16) | token);
+ qtd->length = count;
+
+ return count;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline void
+qh_update (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh, struct fusbh200_qtd *qtd)
+{
+ struct fusbh200_qh_hw *hw = qh->hw;
+
+ /* writes to an active overlay are unsafe */
+ BUG_ON(qh->qh_state != QH_STATE_IDLE);
+
+ hw->hw_qtd_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
+ hw->hw_alt_next = FUSBH200_LIST_END(fusbh200);
+
+ /* Except for control endpoints, we make hardware maintain data
+ * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
+ * and set the pseudo-toggle in udev. Only usb_clear_halt() will
+ * ever clear it.
+ */
+ if (!(hw->hw_info1 & cpu_to_hc32(fusbh200, QH_TOGGLE_CTL))) {
+ unsigned is_out, epnum;
+
+ is_out = qh->is_out;
+ epnum = (hc32_to_cpup(fusbh200, &hw->hw_info1) >> 8) & 0x0f;
+ if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
+ hw->hw_token &= ~cpu_to_hc32(fusbh200, QTD_TOGGLE);
+ usb_settoggle (qh->dev, epnum, is_out, 1);
+ }
+ }
+
+ hw->hw_token &= cpu_to_hc32(fusbh200, QTD_TOGGLE | QTD_STS_PING);
+}
+
+/* if it weren't for a common silicon quirk (writing the dummy into the qh
+ * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
+ * recovery (including urb dequeue) would need software changes to a QH...
+ */
+static void
+qh_refresh (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ struct fusbh200_qtd *qtd;
+
+ if (list_empty (&qh->qtd_list))
+ qtd = qh->dummy;
+ else {
+ qtd = list_entry (qh->qtd_list.next,
+ struct fusbh200_qtd, qtd_list);
+ /*
+ * first qtd may already be partially processed.
+ * If we come here during unlink, the QH overlay region
+ * might have reference to the just unlinked qtd. The
+ * qtd is updated in qh_completions(). Update the QH
+ * overlay here.
+ */
+ if (cpu_to_hc32(fusbh200, qtd->qtd_dma) == qh->hw->hw_current) {
+ qh->hw->hw_qtd_next = qtd->hw_next;
+ qtd = NULL;
+ }
+ }
+
+ if (qtd)
+ qh_update (fusbh200, qh, qtd);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void qh_link_async(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
+
+static void fusbh200_clear_tt_buffer_complete(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
+ struct fusbh200_qh *qh = ep->hcpriv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fusbh200->lock, flags);
+ qh->clearing_tt = 0;
+ if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
+ && fusbh200->rh_state == FUSBH200_RH_RUNNING)
+ qh_link_async(fusbh200, qh);
+ spin_unlock_irqrestore(&fusbh200->lock, flags);
+}
+
+static void fusbh200_clear_tt_buffer(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh,
+ struct urb *urb, u32 token)
+{
+
+ /* If an async split transaction gets an error or is unlinked,
+ * the TT buffer may be left in an indeterminate state. We
+ * have to clear the TT buffer.
+ *
+ * Note: this routine is never called for Isochronous transfers.
+ */
+ if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
+ struct usb_device *tt = urb->dev->tt->hub;
+
+ dev_dbg(&tt->dev,
+ "clear tt buffer port %d, a%d ep%d t%08x\n",
+ urb->dev->ttport, urb->dev->devnum,
+ usb_pipeendpoint(urb->pipe), token);
+
+ if (urb->dev->tt->hub !=
+ fusbh200_to_hcd(fusbh200)->self.root_hub) {
+ if (usb_hub_clear_tt_buffer(urb) == 0)
+ qh->clearing_tt = 1;
+ }
+ }
+}
+
+static int qtd_copy_status (
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ size_t length,
+ u32 token
+)
+{
+ int status = -EINPROGRESS;
+
+ /* count IN/OUT bytes, not SETUP (even short packets) */
+ if (likely (QTD_PID (token) != 2))
+ urb->actual_length += length - QTD_LENGTH (token);
+
+ /* don't modify error codes */
+ if (unlikely(urb->unlinked))
+ return status;
+
+ /* force cleanup after short read; not always an error */
+ if (unlikely (IS_SHORT_READ (token)))
+ status = -EREMOTEIO;
+
+ /* serious "can't proceed" faults reported by the hardware */
+ if (token & QTD_STS_HALT) {
+ if (token & QTD_STS_BABBLE) {
+ /* FIXME "must" disable babbling device's port too */
+ status = -EOVERFLOW;
+ /* CERR nonzero + halt --> stall */
+ } else if (QTD_CERR(token)) {
+ status = -EPIPE;
+
+ /* In theory, more than one of the following bits can be set
+ * since they are sticky and the transaction is retried.
+ * Which to test first is rather arbitrary.
+ */
+ } else if (token & QTD_STS_MMF) {
+ /* fs/ls interrupt xfer missed the complete-split */
+ status = -EPROTO;
+ } else if (token & QTD_STS_DBE) {
+ status = (QTD_PID (token) == 1) /* IN ? */
+ ? -ENOSR /* hc couldn't read data */
+ : -ECOMM; /* hc couldn't write data */
+ } else if (token & QTD_STS_XACT) {
+ /* timeout, bad CRC, wrong PID, etc */
+ fusbh200_dbg(fusbh200, "devpath %s ep%d%s 3strikes\n",
+ urb->dev->devpath,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out");
+ status = -EPROTO;
+ } else { /* unknown */
+ status = -EPROTO;
+ }
+
+ fusbh200_dbg(fusbh200,
+ "dev%d ep%d%s qtd token %08x --> status %d\n",
+ usb_pipedevice (urb->pipe),
+ usb_pipeendpoint (urb->pipe),
+ usb_pipein (urb->pipe) ? "in" : "out",
+ token, status);
+ }
+
+ return status;
+}
+
+static void
+fusbh200_urb_done(struct fusbh200_hcd *fusbh200, struct urb *urb, int status)
+__releases(fusbh200->lock)
+__acquires(fusbh200->lock)
+{
+ if (likely (urb->hcpriv != NULL)) {
+ struct fusbh200_qh *qh = (struct fusbh200_qh *) urb->hcpriv;
+
+ /* S-mask in a QH means it's an interrupt urb */
+ if ((qh->hw->hw_info2 & cpu_to_hc32(fusbh200, QH_SMASK)) != 0) {
+
+ /* ... update hc-wide periodic stats (for usbfs) */
+ fusbh200_to_hcd(fusbh200)->self.bandwidth_int_reqs--;
+ }
+ }
+
+ if (unlikely(urb->unlinked)) {
+ COUNT(fusbh200->stats.unlink);
+ } else {
+ /* report non-error and short read status as zero */
+ if (status == -EINPROGRESS || status == -EREMOTEIO)
+ status = 0;
+ COUNT(fusbh200->stats.complete);
+ }
+
+#ifdef FUSBH200_URB_TRACE
+ fusbh200_dbg (fusbh200,
+ "%s %s urb %p ep%d%s status %d len %d/%d\n",
+ __func__, urb->dev->devpath, urb,
+ usb_pipeendpoint (urb->pipe),
+ usb_pipein (urb->pipe) ? "in" : "out",
+ status,
+ urb->actual_length, urb->transfer_buffer_length);
+#endif
+
+ /* complete() can reenter this HCD */
+ usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
+ spin_unlock (&fusbh200->lock);
+ usb_hcd_giveback_urb(fusbh200_to_hcd(fusbh200), urb, status);
+ spin_lock (&fusbh200->lock);
+}
+
+static int qh_schedule (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
+
+/*
+ * Process and free completed qtds for a qh, returning URBs to drivers.
+ * Chases up to qh->hw_current. Returns number of completions called,
+ * indicating how much "real" work we did.
+ */
+static unsigned
+qh_completions (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ struct fusbh200_qtd *last, *end = qh->dummy;
+ struct list_head *entry, *tmp;
+ int last_status;
+ int stopped;
+ unsigned count = 0;
+ u8 state;
+ struct fusbh200_qh_hw *hw = qh->hw;
+
+ if (unlikely (list_empty (&qh->qtd_list)))
+ return count;
+
+ /* completions (or tasks on other cpus) must never clobber HALT
+ * till we've gone through and cleaned everything up, even when
+ * they add urbs to this qh's queue or mark them for unlinking.
+ *
+ * NOTE: unlinking expects to be done in queue order.
+ *
+ * It's a bug for qh->qh_state to be anything other than
+ * QH_STATE_IDLE, unless our caller is scan_async() or
+ * scan_intr().
+ */
+ state = qh->qh_state;
+ qh->qh_state = QH_STATE_COMPLETING;
+ stopped = (state == QH_STATE_IDLE);
+
+ rescan:
+ last = NULL;
+ last_status = -EINPROGRESS;
+ qh->needs_rescan = 0;
+
+ /* remove de-activated QTDs from front of queue.
+ * after faults (including short reads), cleanup this urb
+ * then let the queue advance.
+ * if queue is stopped, handles unlinks.
+ */
+ list_for_each_safe (entry, tmp, &qh->qtd_list) {
+ struct fusbh200_qtd *qtd;
+ struct urb *urb;
+ u32 token = 0;
+
+ qtd = list_entry (entry, struct fusbh200_qtd, qtd_list);
+ urb = qtd->urb;
+
+ /* clean up any state from previous QTD ...*/
+ if (last) {
+ if (likely (last->urb != urb)) {
+ fusbh200_urb_done(fusbh200, last->urb, last_status);
+ count++;
+ last_status = -EINPROGRESS;
+ }
+ fusbh200_qtd_free (fusbh200, last);
+ last = NULL;
+ }
+
+ /* ignore urbs submitted during completions we reported */
+ if (qtd == end)
+ break;
+
+ /* hardware copies qtd out of qh overlay */
+ rmb ();
+ token = hc32_to_cpu(fusbh200, qtd->hw_token);
+
+ /* always clean up qtds the hc de-activated */
+ retry_xacterr:
+ if ((token & QTD_STS_ACTIVE) == 0) {
+
+ /* Report Data Buffer Error: non-fatal but useful */
+ if (token & QTD_STS_DBE)
+ fusbh200_dbg(fusbh200,
+ "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ urb,
+ usb_endpoint_num(&urb->ep->desc),
+ usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd,
+ qh);
+
+ /* on STALL, error, and short reads this urb must
+ * complete and all its qtds must be recycled.
+ */
+ if ((token & QTD_STS_HALT) != 0) {
+
+ /* retry transaction errors until we
+ * reach the software xacterr limit
+ */
+ if ((token & QTD_STS_XACT) &&
+ QTD_CERR(token) == 0 &&
+ ++qh->xacterrs < QH_XACTERR_MAX &&
+ !urb->unlinked) {
+ fusbh200_dbg(fusbh200,
+ "detected XactErr len %zu/%zu retry %d\n",
+ qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
+
+ /* reset the token in the qtd and the
+ * qh overlay (which still contains
+ * the qtd) so that we pick up from
+ * where we left off
+ */
+ token &= ~QTD_STS_HALT;
+ token |= QTD_STS_ACTIVE |
+ (FUSBH200_TUNE_CERR << 10);
+ qtd->hw_token = cpu_to_hc32(fusbh200,
+ token);
+ wmb();
+ hw->hw_token = cpu_to_hc32(fusbh200,
+ token);
+ goto retry_xacterr;
+ }
+ stopped = 1;
+
+ /* magic dummy for some short reads; qh won't advance.
+ * that silicon quirk can kick in with this dummy too.
+ *
+ * other short reads won't stop the queue, including
+ * control transfers (status stage handles that) or
+ * most other single-qtd reads ... the queue stops if
+ * URB_SHORT_NOT_OK was set so the driver submitting
+ * the urbs could clean it up.
+ */
+ } else if (IS_SHORT_READ (token)
+ && !(qtd->hw_alt_next
+ & FUSBH200_LIST_END(fusbh200))) {
+ stopped = 1;
+ }
+
+ /* stop scanning when we reach qtds the hc is using */
+ } else if (likely (!stopped
+ && fusbh200->rh_state >= FUSBH200_RH_RUNNING)) {
+ break;
+
+ /* scan the whole queue for unlinks whenever it stops */
+ } else {
+ stopped = 1;
+
+ /* cancel everything if we halt, suspend, etc */
+ if (fusbh200->rh_state < FUSBH200_RH_RUNNING)
+ last_status = -ESHUTDOWN;
+
+ /* this qtd is active; skip it unless a previous qtd
+ * for its urb faulted, or its urb was canceled.
+ */
+ else if (last_status == -EINPROGRESS && !urb->unlinked)
+ continue;
+
+ /* qh unlinked; token in overlay may be most current */
+ if (state == QH_STATE_IDLE
+ && cpu_to_hc32(fusbh200, qtd->qtd_dma)
+ == hw->hw_current) {
+ token = hc32_to_cpu(fusbh200, hw->hw_token);
+
+ /* An unlink may leave an incomplete
+ * async transaction in the TT buffer.
+ * We have to clear it.
+ */
+ fusbh200_clear_tt_buffer(fusbh200, qh, urb, token);
+ }
+ }
+
+ /* unless we already know the urb's status, collect qtd status
+ * and update count of bytes transferred. in common short read
+ * cases with only one data qtd (including control transfers),
+ * queue processing won't halt. but with two or more qtds (for
+ * example, with a 32 KB transfer), when the first qtd gets a
+ * short read the second must be removed by hand.
+ */
+ if (last_status == -EINPROGRESS) {
+ last_status = qtd_copy_status(fusbh200, urb,
+ qtd->length, token);
+ if (last_status == -EREMOTEIO
+ && (qtd->hw_alt_next
+ & FUSBH200_LIST_END(fusbh200)))
+ last_status = -EINPROGRESS;
+
+ /* As part of low/full-speed endpoint-halt processing
+ * we must clear the TT buffer (11.17.5).
+ */
+ if (unlikely(last_status != -EINPROGRESS &&
+ last_status != -EREMOTEIO)) {
+ /* The TT's in some hubs malfunction when they
+ * receive this request following a STALL (they
+ * stop sending isochronous packets). Since a
+ * STALL can't leave the TT buffer in a busy
+ * state (if you believe Figures 11-48 - 11-51
+ * in the USB 2.0 spec), we won't clear the TT
+ * buffer in this case. Strictly speaking this
+ * is a violation of the spec.
+ */
+ if (last_status != -EPIPE)
+ fusbh200_clear_tt_buffer(fusbh200, qh, urb,
+ token);
+ }
+ }
+
+ /* if we're removing something not at the queue head,
+ * patch the hardware queue pointer.
+ */
+ if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
+ last = list_entry (qtd->qtd_list.prev,
+ struct fusbh200_qtd, qtd_list);
+ last->hw_next = qtd->hw_next;
+ }
+
+ /* remove qtd; it's recycled after possible urb completion */
+ list_del (&qtd->qtd_list);
+ last = qtd;
+
+ /* reinit the xacterr counter for the next qtd */
+ qh->xacterrs = 0;
+ }
+
+ /* last urb's completion might still need calling */
+ if (likely (last != NULL)) {
+ fusbh200_urb_done(fusbh200, last->urb, last_status);
+ count++;
+ fusbh200_qtd_free (fusbh200, last);
+ }
+
+ /* Do we need to rescan for URBs dequeued during a giveback? */
+ if (unlikely(qh->needs_rescan)) {
+ /* If the QH is already unlinked, do the rescan now. */
+ if (state == QH_STATE_IDLE)
+ goto rescan;
+
+ /* Otherwise we have to wait until the QH is fully unlinked.
+ * Our caller will start an unlink if qh->needs_rescan is
+ * set. But if an unlink has already started, nothing needs
+ * to be done.
+ */
+ if (state != QH_STATE_LINKED)
+ qh->needs_rescan = 0;
+ }
+
+ /* restore original state; caller must unlink or relink */
+ qh->qh_state = state;
+
+ /* be sure the hardware's done with the qh before refreshing
+ * it after fault cleanup, or recovering from silicon wrongly
+ * overlaying the dummy qtd (which reduces DMA chatter).
+ */
+ if (stopped != 0 || hw->hw_qtd_next == FUSBH200_LIST_END(fusbh200)) {
+ switch (state) {
+ case QH_STATE_IDLE:
+ qh_refresh(fusbh200, qh);
+ break;
+ case QH_STATE_LINKED:
+ /* We won't refresh a QH that's linked (after the HC
+ * stopped the queue). That avoids a race:
+ * - HC reads first part of QH;
+ * - CPU updates that first part and the token;
+ * - HC reads rest of that QH, including token
+ * Result: HC gets an inconsistent image, and then
+ * DMAs to/from the wrong memory (corrupting it).
+ *
+ * That should be rare for interrupt transfers,
+ * except maybe high bandwidth ...
+ */
+
+ /* Tell the caller to start an unlink */
+ qh->needs_rescan = 1;
+ break;
+ /* otherwise, unlink already started */
+ }
+ }
+
+ return count;
+}
+
+/*-------------------------------------------------------------------------*/
+
+// high bandwidth multiplier, as encoded in highspeed endpoint descriptors
+#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
+// ... and packet size, for any kind of endpoint descriptor
+#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
+
+/*
+ * reverse of qh_urb_transaction: free a list of TDs.
+ * used for cleanup after errors, before HC sees an URB's TDs.
+ */
+static void qtd_list_free (
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ struct list_head *qtd_list
+) {
+ struct list_head *entry, *temp;
+
+ list_for_each_safe (entry, temp, qtd_list) {
+ struct fusbh200_qtd *qtd;
+
+ qtd = list_entry (entry, struct fusbh200_qtd, qtd_list);
+ list_del (&qtd->qtd_list);
+ fusbh200_qtd_free (fusbh200, qtd);
+ }
+}
+
+/*
+ * create a list of filled qtds for this URB; won't link into qh.
+ */
+static struct list_head *
+qh_urb_transaction (
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ struct list_head *head,
+ gfp_t flags
+) {
+ struct fusbh200_qtd *qtd, *qtd_prev;
+ dma_addr_t buf;
+ int len, this_sg_len, maxpacket;
+ int is_input;
+ u32 token;
+ int i;
+ struct scatterlist *sg;
+
+ /*
+ * URBs map to sequences of QTDs: one logical transaction
+ */
+ qtd = fusbh200_qtd_alloc (fusbh200, flags);
+ if (unlikely (!qtd))
+ return NULL;
+ list_add_tail (&qtd->qtd_list, head);
+ qtd->urb = urb;
+
+ token = QTD_STS_ACTIVE;
+ token |= (FUSBH200_TUNE_CERR << 10);
+ /* for split transactions, SplitXState initialized to zero */
+
+ len = urb->transfer_buffer_length;
+ is_input = usb_pipein (urb->pipe);
+ if (usb_pipecontrol (urb->pipe)) {
+ /* SETUP pid */
+ qtd_fill(fusbh200, qtd, urb->setup_dma,
+ sizeof (struct usb_ctrlrequest),
+ token | (2 /* "setup" */ << 8), 8);
+
+ /* ... and always at least one more pid */
+ token ^= QTD_TOGGLE;
+ qtd_prev = qtd;
+ qtd = fusbh200_qtd_alloc (fusbh200, flags);
+ if (unlikely (!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
+ list_add_tail (&qtd->qtd_list, head);
+
+ /* for zero length DATA stages, STATUS is always IN */
+ if (len == 0)
+ token |= (1 /* "in" */ << 8);
+ }
+
+ /*
+ * data transfer stage: buffer setup
+ */
+ i = urb->num_mapped_sgs;
+ if (len > 0 && i > 0) {
+ sg = urb->sg;
+ buf = sg_dma_address(sg);
+
+ /* urb->transfer_buffer_length may be smaller than the
+ * size of the scatterlist (or vice versa)
+ */
+ this_sg_len = min_t(int, sg_dma_len(sg), len);
+ } else {
+ sg = NULL;
+ buf = urb->transfer_dma;
+ this_sg_len = len;
+ }
+
+ if (is_input)
+ token |= (1 /* "in" */ << 8);
+ /* else it's already initted to "out" pid (0 << 8) */
+
+ maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
+
+ /*
+ * buffer gets wrapped in one or more qtds;
+ * last one may be "short" (including zero len)
+ * and may serve as a control status ack
+ */
+ for (;;) {
+ int this_qtd_len;
+
+ this_qtd_len = qtd_fill(fusbh200, qtd, buf, this_sg_len, token,
+ maxpacket);
+ this_sg_len -= this_qtd_len;
+ len -= this_qtd_len;
+ buf += this_qtd_len;
+
+ /*
+ * short reads advance to a "magic" dummy instead of the next
+ * qtd ... that forces the queue to stop, for manual cleanup.
+ * (this will usually be overridden later.)
+ */
+ if (is_input)
+ qtd->hw_alt_next = fusbh200->async->hw->hw_alt_next;
+
+ /* qh makes control packets use qtd toggle; maybe switch it */
+ if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
+ token ^= QTD_TOGGLE;
+
+ if (likely(this_sg_len <= 0)) {
+ if (--i <= 0 || len <= 0)
+ break;
+ sg = sg_next(sg);
+ buf = sg_dma_address(sg);
+ this_sg_len = min_t(int, sg_dma_len(sg), len);
+ }
+
+ qtd_prev = qtd;
+ qtd = fusbh200_qtd_alloc (fusbh200, flags);
+ if (unlikely (!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
+ list_add_tail (&qtd->qtd_list, head);
+ }
+
+ /*
+ * unless the caller requires manual cleanup after short reads,
+ * have the alt_next mechanism keep the queue running after the
+ * last data qtd (the only one, for control and most other cases).
+ */
+ if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
+ || usb_pipecontrol (urb->pipe)))
+ qtd->hw_alt_next = FUSBH200_LIST_END(fusbh200);
+
+ /*
+ * control requests may need a terminating data "status" ack;
+ * other OUT ones may need a terminating short packet
+ * (zero length).
+ */
+ if (likely (urb->transfer_buffer_length != 0)) {
+ int one_more = 0;
+
+ if (usb_pipecontrol (urb->pipe)) {
+ one_more = 1;
+ token ^= 0x0100; /* "in" <--> "out" */
+ token |= QTD_TOGGLE; /* force DATA1 */
+ } else if (usb_pipeout(urb->pipe)
+ && (urb->transfer_flags & URB_ZERO_PACKET)
+ && !(urb->transfer_buffer_length % maxpacket)) {
+ one_more = 1;
+ }
+ if (one_more) {
+ qtd_prev = qtd;
+ qtd = fusbh200_qtd_alloc (fusbh200, flags);
+ if (unlikely (!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
+ list_add_tail (&qtd->qtd_list, head);
+
+ /* never any data in such packets */
+ qtd_fill(fusbh200, qtd, 0, 0, token, 0);
+ }
+ }
+
+ /* by default, enable interrupt on urb completion */
+ if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
+ qtd->hw_token |= cpu_to_hc32(fusbh200, QTD_IOC);
+ return head;
+
+cleanup:
+ qtd_list_free (fusbh200, urb, head);
+ return NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+// Would be best to create all qh's from config descriptors,
+// when each interface/altsetting is established. Unlink
+// any previous qh and cancel its urbs first; endpoints are
+// implicitly reset then (data toggle too).
+// That'd mean updating how usbcore talks to HCDs. (2.7?)
+
+
+/*
+ * Each QH holds a qtd list; a QH is used for everything except iso.
+ *
+ * For interrupt urbs, the scheduler must set the microframe scheduling
+ * mask(s) each time the QH gets scheduled. For highspeed, that's
+ * just one microframe in the s-mask. For split interrupt transactions
+ * there are additional complications: c-mask, maybe FSTNs.
+ */
+static struct fusbh200_qh *
+qh_make (
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ gfp_t flags
+) {
+ struct fusbh200_qh *qh = fusbh200_qh_alloc (fusbh200, flags);
+ u32 info1 = 0, info2 = 0;
+ int is_input, type;
+ int maxp = 0;
+ struct usb_tt *tt = urb->dev->tt;
+ struct fusbh200_qh_hw *hw;
+
+ if (!qh)
+ return qh;
+
+ /*
+ * init endpoint/device data for this QH
+ */
+ info1 |= usb_pipeendpoint (urb->pipe) << 8;
+ info1 |= usb_pipedevice (urb->pipe) << 0;
+
+ is_input = usb_pipein (urb->pipe);
+ type = usb_pipetype (urb->pipe);
+ maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
+
+ /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
+ * acts like up to 3KB, but is built from smaller packets.
+ */
+ if (max_packet(maxp) > 1024) {
+ fusbh200_dbg(fusbh200, "bogus qh maxpacket %d\n", max_packet(maxp));
+ goto done;
+ }
+
+ /* Compute interrupt scheduling parameters just once, and save.
+ * - allowing for high bandwidth, how many nsec/uframe are used?
+ * - split transactions need a second CSPLIT uframe; same question
+ * - splits also need a schedule gap (for full/low speed I/O)
+ * - qh has a polling interval
+ *
+ * For control/bulk requests, the HC or TT handles these.
+ */
+ if (type == PIPE_INTERRUPT) {
+ qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
+ is_input, 0,
+ hb_mult(maxp) * max_packet(maxp)));
+ qh->start = NO_FRAME;
+
+ if (urb->dev->speed == USB_SPEED_HIGH) {
+ qh->c_usecs = 0;
+ qh->gap_uf = 0;
+
+ qh->period = urb->interval >> 3;
+ if (qh->period == 0 && urb->interval != 1) {
+ /* NOTE interval 2 or 4 uframes could work.
+ * But interval 1 scheduling is simpler, and
+ * includes high bandwidth.
+ */
+ urb->interval = 1;
+ } else if (qh->period > fusbh200->periodic_size) {
+ qh->period = fusbh200->periodic_size;
+ urb->interval = qh->period << 3;
+ }
+ } else {
+ int think_time;
+
+ /* gap is f(FS/LS transfer times) */
+ qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
+ is_input, 0, maxp) / (125 * 1000);
+
+ /* FIXME this just approximates SPLIT/CSPLIT times */
+ if (is_input) { // SPLIT, gap, CSPLIT+DATA
+ qh->c_usecs = qh->usecs + HS_USECS (0);
+ qh->usecs = HS_USECS (1);
+ } else { // SPLIT+DATA, gap, CSPLIT
+ qh->usecs += HS_USECS (1);
+ qh->c_usecs = HS_USECS (0);
+ }
+
+ think_time = tt ? tt->think_time : 0;
+ qh->tt_usecs = NS_TO_US (think_time +
+ usb_calc_bus_time (urb->dev->speed,
+ is_input, 0, max_packet (maxp)));
+ qh->period = urb->interval;
+ if (qh->period > fusbh200->periodic_size) {
+ qh->period = fusbh200->periodic_size;
+ urb->interval = qh->period;
+ }
+ }
+ }
+
+ /* support for tt scheduling, and access to toggles */
+ qh->dev = urb->dev;
+
+ /* using TT? */
+ switch (urb->dev->speed) {
+ case USB_SPEED_LOW:
+ info1 |= QH_LOW_SPEED;
+ /* FALL THROUGH */
+
+ case USB_SPEED_FULL:
+ /* EPS 0 means "full" */
+ if (type != PIPE_INTERRUPT)
+ info1 |= (FUSBH200_TUNE_RL_TT << 28);
+ if (type == PIPE_CONTROL) {
+ info1 |= QH_CONTROL_EP; /* for TT */
+ info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
+ }
+ info1 |= maxp << 16;
+
+ info2 |= (FUSBH200_TUNE_MULT_TT << 30);
+
+ /* Some Freescale processors have an erratum in which the
+ * port number in the queue head was 0..N-1 instead of 1..N.
+ */
+ if (fusbh200_has_fsl_portno_bug(fusbh200))
+ info2 |= (urb->dev->ttport-1) << 23;
+ else
+ info2 |= urb->dev->ttport << 23;
+
+ /* set the address of the TT; for TDI's integrated
+ * root hub tt, leave it zeroed.
+ */
+ if (tt && tt->hub != fusbh200_to_hcd(fusbh200)->self.root_hub)
+ info2 |= tt->hub->devnum << 16;
+
+ /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
+
+ break;
+
+ case USB_SPEED_HIGH: /* no TT involved */
+ info1 |= QH_HIGH_SPEED;
+ if (type == PIPE_CONTROL) {
+ info1 |= (FUSBH200_TUNE_RL_HS << 28);
+ info1 |= 64 << 16; /* usb2 fixed maxpacket */
+ info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
+ info2 |= (FUSBH200_TUNE_MULT_HS << 30);
+ } else if (type == PIPE_BULK) {
+ info1 |= (FUSBH200_TUNE_RL_HS << 28);
+ /* The USB spec says that high speed bulk endpoints
+ * always use 512 byte maxpacket. But some device
+ * vendors decided to ignore that, and MSFT is happy
+ * to help them do so. So now people expect to use
+ * such nonconformant devices with Linux too; sigh.
+ */
+ info1 |= max_packet(maxp) << 16;
+ info2 |= (FUSBH200_TUNE_MULT_HS << 30);
+ } else { /* PIPE_INTERRUPT */
+ info1 |= max_packet (maxp) << 16;
+ info2 |= hb_mult (maxp) << 30;
+ }
+ break;
+ default:
+ fusbh200_dbg(fusbh200, "bogus dev %p speed %d\n", urb->dev,
+ urb->dev->speed);
+done:
+ qh_destroy(fusbh200, qh);
+ return NULL;
+ }
+
+ /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
+
+ /* init as live, toggle clear, advance to dummy */
+ qh->qh_state = QH_STATE_IDLE;
+ hw = qh->hw;
+ hw->hw_info1 = cpu_to_hc32(fusbh200, info1);
+ hw->hw_info2 = cpu_to_hc32(fusbh200, info2);
+ qh->is_out = !is_input;
+ usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
+ qh_refresh (fusbh200, qh);
+ return qh;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void enable_async(struct fusbh200_hcd *fusbh200)
+{
+ if (fusbh200->async_count++)
+ return;
+
+ /* Stop waiting to turn off the async schedule */
+ fusbh200->enabled_hrtimer_events &= ~BIT(FUSBH200_HRTIMER_DISABLE_ASYNC);
+
+ /* Don't start the schedule until ASS is 0 */
+ fusbh200_poll_ASS(fusbh200);
+ turn_on_io_watchdog(fusbh200);
+}
+
+static void disable_async(struct fusbh200_hcd *fusbh200)
+{
+ if (--fusbh200->async_count)
+ return;
+
+ /* The async schedule and async_unlink list are supposed to be empty */
+ WARN_ON(fusbh200->async->qh_next.qh || fusbh200->async_unlink);
+
+ /* Don't turn off the schedule until ASS is 1 */
+ fusbh200_poll_ASS(fusbh200);
+}
+
+/* move qh (and its qtds) onto async queue; maybe enable queue. */
+
+static void qh_link_async (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ __hc32 dma = QH_NEXT(fusbh200, qh->qh_dma);
+ struct fusbh200_qh *head;
+
+ /* Don't link a QH if there's a Clear-TT-Buffer pending */
+ if (unlikely(qh->clearing_tt))
+ return;
+
+ WARN_ON(qh->qh_state != QH_STATE_IDLE);
+
+ /* clear halt and/or toggle; and maybe recover from silicon quirk */
+ qh_refresh(fusbh200, qh);
+
+ /* splice right after start */
+ head = fusbh200->async;
+ qh->qh_next = head->qh_next;
+ qh->hw->hw_next = head->hw->hw_next;
+ wmb ();
+
+ head->qh_next.qh = qh;
+ head->hw->hw_next = dma;
+
+ qh->xacterrs = 0;
+ qh->qh_state = QH_STATE_LINKED;
+ /* qtd completions reported later by interrupt */
+
+ enable_async(fusbh200);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * For control/bulk/interrupt, return QH with these TDs appended.
+ * Allocates and initializes the QH if necessary.
+ * Returns null if it can't allocate a QH it needs to.
+ * If the QH has TDs (urbs) already, that's great.
+ */
+static struct fusbh200_qh *qh_append_tds (
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ struct list_head *qtd_list,
+ int epnum,
+ void **ptr
+)
+{
+ struct fusbh200_qh *qh = NULL;
+ __hc32 qh_addr_mask = cpu_to_hc32(fusbh200, 0x7f);
+
+ qh = (struct fusbh200_qh *) *ptr;
+ if (unlikely (qh == NULL)) {
+ /* can't sleep here, we have fusbh200->lock... */
+ qh = qh_make (fusbh200, urb, GFP_ATOMIC);
+ *ptr = qh;
+ }
+ if (likely (qh != NULL)) {
+ struct fusbh200_qtd *qtd;
+
+ if (unlikely (list_empty (qtd_list)))
+ qtd = NULL;
+ else
+ qtd = list_entry (qtd_list->next, struct fusbh200_qtd,
+ qtd_list);
+
+ /* control qh may need patching ... */
+ if (unlikely (epnum == 0)) {
+
+ /* usb_reset_device() briefly reverts to address 0 */
+ if (usb_pipedevice (urb->pipe) == 0)
+ qh->hw->hw_info1 &= ~qh_addr_mask;
+ }
+
+ /* just one way to queue requests: swap with the dummy qtd.
+ * only hc or qh_refresh() ever modify the overlay.
+ */
+ if (likely (qtd != NULL)) {
+ struct fusbh200_qtd *dummy;
+ dma_addr_t dma;
+ __hc32 token;
+
+ /* to avoid racing the HC, use the dummy td instead of
+ * the first td of our list (becomes new dummy). both
+ * tds stay deactivated until we're done, when the
+ * HC is allowed to fetch the old dummy (4.10.2).
+ */
+ token = qtd->hw_token;
+ qtd->hw_token = HALT_BIT(fusbh200);
+
+ dummy = qh->dummy;
+
+ dma = dummy->qtd_dma;
+ *dummy = *qtd;
+ dummy->qtd_dma = dma;
+
+ list_del (&qtd->qtd_list);
+ list_add (&dummy->qtd_list, qtd_list);
+ list_splice_tail(qtd_list, &qh->qtd_list);
+
+ fusbh200_qtd_init(fusbh200, qtd, qtd->qtd_dma);
+ qh->dummy = qtd;
+
+ /* hc must see the new dummy at list end */
+ dma = qtd->qtd_dma;
+ qtd = list_entry (qh->qtd_list.prev,
+ struct fusbh200_qtd, qtd_list);
+ qtd->hw_next = QTD_NEXT(fusbh200, dma);
+
+ /* let the hc process these next qtds */
+ wmb ();
+ dummy->hw_token = token;
+
+ urb->hcpriv = qh;
+ }
+ }
+ return qh;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int
+submit_async (
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ struct list_head *qtd_list,
+ gfp_t mem_flags
+) {
+ int epnum;
+ unsigned long flags;
+ struct fusbh200_qh *qh = NULL;
+ int rc;
+
+ epnum = urb->ep->desc.bEndpointAddress;
+
+#ifdef FUSBH200_URB_TRACE
+ {
+ struct fusbh200_qtd *qtd;
+ qtd = list_entry(qtd_list->next, struct fusbh200_qtd, qtd_list);
+ fusbh200_dbg(fusbh200,
+ "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ __func__, urb->dev->devpath, urb,
+ epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd, urb->ep->hcpriv);
+ }
+#endif
+
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ if (unlikely(!HCD_HW_ACCESSIBLE(fusbh200_to_hcd(fusbh200)))) {
+ rc = -ESHUTDOWN;
+ goto done;
+ }
+ rc = usb_hcd_link_urb_to_ep(fusbh200_to_hcd(fusbh200), urb);
+ if (unlikely(rc))
+ goto done;
+
+ qh = qh_append_tds(fusbh200, urb, qtd_list, epnum, &urb->ep->hcpriv);
+ if (unlikely(qh == NULL)) {
+ usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ /* Control/bulk operations through TTs don't need scheduling,
+ * the HC and TT handle it when the TT has a buffer ready.
+ */
+ if (likely (qh->qh_state == QH_STATE_IDLE))
+ qh_link_async(fusbh200, qh);
+ done:
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ if (unlikely (qh == NULL))
+ qtd_list_free (fusbh200, urb, qtd_list);
+ return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void single_unlink_async(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ struct fusbh200_qh *prev;
+
+ /* Add to the end of the list of QHs waiting for the next IAAD */
+ qh->qh_state = QH_STATE_UNLINK;
+ if (fusbh200->async_unlink)
+ fusbh200->async_unlink_last->unlink_next = qh;
+ else
+ fusbh200->async_unlink = qh;
+ fusbh200->async_unlink_last = qh;
+
+ /* Unlink it from the schedule */
+ prev = fusbh200->async;
+ while (prev->qh_next.qh != qh)
+ prev = prev->qh_next.qh;
+
+ prev->hw->hw_next = qh->hw->hw_next;
+ prev->qh_next = qh->qh_next;
+ if (fusbh200->qh_scan_next == qh)
+ fusbh200->qh_scan_next = qh->qh_next.qh;
+}
+
+static void start_iaa_cycle(struct fusbh200_hcd *fusbh200, bool nested)
+{
+ /*
+ * Do nothing if an IAA cycle is already running or
+ * if one will be started shortly.
+ */
+ if (fusbh200->async_iaa || fusbh200->async_unlinking)
+ return;
+
+ /* Do all the waiting QHs at once */
+ fusbh200->async_iaa = fusbh200->async_unlink;
+ fusbh200->async_unlink = NULL;
+
+ /* If the controller isn't running, we don't have to wait for it */
+ if (unlikely(fusbh200->rh_state < FUSBH200_RH_RUNNING)) {
+ if (!nested) /* Avoid recursion */
+ end_unlink_async(fusbh200);
+
+ /* Otherwise start a new IAA cycle */
+ } else if (likely(fusbh200->rh_state == FUSBH200_RH_RUNNING)) {
+ /* Make sure the unlinks are all visible to the hardware */
+ wmb();
+
+ fusbh200_writel(fusbh200, fusbh200->command | CMD_IAAD,
+ &fusbh200->regs->command);
+ fusbh200_readl(fusbh200, &fusbh200->regs->command);
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_IAA_WATCHDOG, true);
+ }
+}
+
+/* the async qh for the qtds being unlinked are now gone from the HC */
+
+static void end_unlink_async(struct fusbh200_hcd *fusbh200)
+{
+ struct fusbh200_qh *qh;
+
+ /* Process the idle QHs */
+ restart:
+ fusbh200->async_unlinking = true;
+ while (fusbh200->async_iaa) {
+ qh = fusbh200->async_iaa;
+ fusbh200->async_iaa = qh->unlink_next;
+ qh->unlink_next = NULL;
+
+ qh->qh_state = QH_STATE_IDLE;
+ qh->qh_next.qh = NULL;
+
+ qh_completions(fusbh200, qh);
+ if (!list_empty(&qh->qtd_list) &&
+ fusbh200->rh_state == FUSBH200_RH_RUNNING)
+ qh_link_async(fusbh200, qh);
+ disable_async(fusbh200);
+ }
+ fusbh200->async_unlinking = false;
+
+ /* Start a new IAA cycle if any QHs are waiting for it */
+ if (fusbh200->async_unlink) {
+ start_iaa_cycle(fusbh200, true);
+ if (unlikely(fusbh200->rh_state < FUSBH200_RH_RUNNING))
+ goto restart;
+ }
+}
+
+static void unlink_empty_async(struct fusbh200_hcd *fusbh200)
+{
+ struct fusbh200_qh *qh, *next;
+ bool stopped = (fusbh200->rh_state < FUSBH200_RH_RUNNING);
+ bool check_unlinks_later = false;
+
+ /* Unlink all the async QHs that have been empty for a timer cycle */
+ next = fusbh200->async->qh_next.qh;
+ while (next) {
+ qh = next;
+ next = qh->qh_next.qh;
+
+ if (list_empty(&qh->qtd_list) &&
+ qh->qh_state == QH_STATE_LINKED) {
+ if (!stopped && qh->unlink_cycle ==
+ fusbh200->async_unlink_cycle)
+ check_unlinks_later = true;
+ else
+ single_unlink_async(fusbh200, qh);
+ }
+ }
+
+ /* Start a new IAA cycle if any QHs are waiting for it */
+ if (fusbh200->async_unlink)
+ start_iaa_cycle(fusbh200, false);
+
+ /* QHs that haven't been empty for long enough will be handled later */
+ if (check_unlinks_later) {
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_ASYNC_UNLINKS, true);
+ ++fusbh200->async_unlink_cycle;
+ }
+}
+
+/* makes sure the async qh will become idle */
+/* caller must own fusbh200->lock */
+
+static void start_unlink_async(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ /*
+ * If the QH isn't linked then there's nothing we can do
+ * unless we were called during a giveback, in which case
+ * qh_completions() has to deal with it.
+ */
+ if (qh->qh_state != QH_STATE_LINKED) {
+ if (qh->qh_state == QH_STATE_COMPLETING)
+ qh->needs_rescan = 1;
+ return;
+ }
+
+ single_unlink_async(fusbh200, qh);
+ start_iaa_cycle(fusbh200, false);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void scan_async (struct fusbh200_hcd *fusbh200)
+{
+ struct fusbh200_qh *qh;
+ bool check_unlinks_later = false;
+
+ fusbh200->qh_scan_next = fusbh200->async->qh_next.qh;
+ while (fusbh200->qh_scan_next) {
+ qh = fusbh200->qh_scan_next;
+ fusbh200->qh_scan_next = qh->qh_next.qh;
+ rescan:
+ /* clean any finished work for this qh */
+ if (!list_empty(&qh->qtd_list)) {
+ int temp;
+
+ /*
+ * Unlinks could happen here; completion reporting
+ * drops the lock. That's why fusbh200->qh_scan_next
+ * always holds the next qh to scan; if the next qh
+ * gets unlinked then fusbh200->qh_scan_next is adjusted
+ * in single_unlink_async().
+ */
+ temp = qh_completions(fusbh200, qh);
+ if (qh->needs_rescan) {
+ start_unlink_async(fusbh200, qh);
+ } else if (list_empty(&qh->qtd_list)
+ && qh->qh_state == QH_STATE_LINKED) {
+ qh->unlink_cycle = fusbh200->async_unlink_cycle;
+ check_unlinks_later = true;
+ } else if (temp != 0)
+ goto rescan;
+ }
+ }
+
+ /*
+ * Unlink empty entries, reducing DMA usage as well
+ * as HCD schedule-scanning costs. Delay for any qh
+ * we just scanned, there's a not-unusual case that it
+ * doesn't stay idle for long.
+ */
+ if (check_unlinks_later && fusbh200->rh_state == FUSBH200_RH_RUNNING &&
+ !(fusbh200->enabled_hrtimer_events &
+ BIT(FUSBH200_HRTIMER_ASYNC_UNLINKS))) {
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_ASYNC_UNLINKS, true);
+ ++fusbh200->async_unlink_cycle;
+ }
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * EHCI scheduled transaction support: interrupt, iso, split iso
+ * These are called "periodic" transactions in the EHCI spec.
+ *
+ * Note that for interrupt transfers, the QH/QTD manipulation is shared
+ * with the "asynchronous" transaction support (control/bulk transfers).
+ * The only real difference is in how interrupt transfers are scheduled.
+ *
+ * For ISO, we make an "iso_stream" head to serve the same role as a QH.
+ * It keeps track of every ITD (or SITD) that's linked, and holds enough
+ * pre-calculated schedule data to make appending to the queue be quick.
+ */
+
+static int fusbh200_get_frame (struct usb_hcd *hcd);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * periodic_next_shadow - return "next" pointer on shadow list
+ * @periodic: host pointer to qh/itd
+ * @tag: hardware tag for type of this record
+ */
+static union fusbh200_shadow *
+periodic_next_shadow(struct fusbh200_hcd *fusbh200, union fusbh200_shadow *periodic,
+ __hc32 tag)
+{
+ switch (hc32_to_cpu(fusbh200, tag)) {
+ case Q_TYPE_QH:
+ return &periodic->qh->qh_next;
+ case Q_TYPE_FSTN:
+ return &periodic->fstn->fstn_next;
+ default:
+ return &periodic->itd->itd_next;
+ }
+}
+
+static __hc32 *
+shadow_next_periodic(struct fusbh200_hcd *fusbh200, union fusbh200_shadow *periodic,
+ __hc32 tag)
+{
+ switch (hc32_to_cpu(fusbh200, tag)) {
+ /* our fusbh200_shadow.qh is actually software part */
+ case Q_TYPE_QH:
+ return &periodic->qh->hw->hw_next;
+ /* others are hw parts */
+ default:
+ return periodic->hw_next;
+ }
+}
+
+/* caller must hold fusbh200->lock */
+static void periodic_unlink (struct fusbh200_hcd *fusbh200, unsigned frame, void *ptr)
+{
+ union fusbh200_shadow *prev_p = &fusbh200->pshadow[frame];
+ __hc32 *hw_p = &fusbh200->periodic[frame];
+ union fusbh200_shadow here = *prev_p;
+
+ /* find predecessor of "ptr"; hw and shadow lists are in sync */
+ while (here.ptr && here.ptr != ptr) {
+ prev_p = periodic_next_shadow(fusbh200, prev_p,
+ Q_NEXT_TYPE(fusbh200, *hw_p));
+ hw_p = shadow_next_periodic(fusbh200, &here,
+ Q_NEXT_TYPE(fusbh200, *hw_p));
+ here = *prev_p;
+ }
+ /* an interrupt entry (at list end) could have been shared */
+ if (!here.ptr)
+ return;
+
+ /* update shadow and hardware lists ... the old "next" pointers
+ * from ptr may still be in use, the caller updates them.
+ */
+ *prev_p = *periodic_next_shadow(fusbh200, &here,
+ Q_NEXT_TYPE(fusbh200, *hw_p));
+
+ *hw_p = *shadow_next_periodic(fusbh200, &here,
+ Q_NEXT_TYPE(fusbh200, *hw_p));
+}
+
+/* how many of the uframe's 125 usecs are allocated? */
+static unsigned short
+periodic_usecs (struct fusbh200_hcd *fusbh200, unsigned frame, unsigned uframe)
+{
+ __hc32 *hw_p = &fusbh200->periodic [frame];
+ union fusbh200_shadow *q = &fusbh200->pshadow [frame];
+ unsigned usecs = 0;
+ struct fusbh200_qh_hw *hw;
+
+ while (q->ptr) {
+ switch (hc32_to_cpu(fusbh200, Q_NEXT_TYPE(fusbh200, *hw_p))) {
+ case Q_TYPE_QH:
+ hw = q->qh->hw;
+ /* is it in the S-mask? */
+ if (hw->hw_info2 & cpu_to_hc32(fusbh200, 1 << uframe))
+ usecs += q->qh->usecs;
+ /* ... or C-mask? */
+ if (hw->hw_info2 & cpu_to_hc32(fusbh200,
+ 1 << (8 + uframe)))
+ usecs += q->qh->c_usecs;
+ hw_p = &hw->hw_next;
+ q = &q->qh->qh_next;
+ break;
+ // case Q_TYPE_FSTN:
+ default:
+ /* for "save place" FSTNs, count the relevant INTR
+ * bandwidth from the previous frame
+ */
+ if (q->fstn->hw_prev != FUSBH200_LIST_END(fusbh200)) {
+ fusbh200_dbg (fusbh200, "ignoring FSTN cost ...\n");
+ }
+ hw_p = &q->fstn->hw_next;
+ q = &q->fstn->fstn_next;
+ break;
+ case Q_TYPE_ITD:
+ if (q->itd->hw_transaction[uframe])
+ usecs += q->itd->stream->usecs;
+ hw_p = &q->itd->hw_next;
+ q = &q->itd->itd_next;
+ break;
+ }
+ }
+ if (usecs > fusbh200->uframe_periodic_max)
+ fusbh200_err (fusbh200, "uframe %d sched overrun: %d usecs\n",
+ frame * 8 + uframe, usecs);
+ return usecs;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
+{
+ if (!dev1->tt || !dev2->tt)
+ return 0;
+ if (dev1->tt != dev2->tt)
+ return 0;
+ if (dev1->tt->multi)
+ return dev1->ttport == dev2->ttport;
+ else
+ return 1;
+}
+
+/* return true iff the device's transaction translator is available
+ * for a periodic transfer starting at the specified frame, using
+ * all the uframes in the mask.
+ */
+static int tt_no_collision (
+ struct fusbh200_hcd *fusbh200,
+ unsigned period,
+ struct usb_device *dev,
+ unsigned frame,
+ u32 uf_mask
+)
+{
+ if (period == 0) /* error */
+ return 0;
+
+ /* note bandwidth wastage: split never follows csplit
+ * (different dev or endpoint) until the next uframe.
+ * calling convention doesn't make that distinction.
+ */
+ for (; frame < fusbh200->periodic_size; frame += period) {
+ union fusbh200_shadow here;
+ __hc32 type;
+ struct fusbh200_qh_hw *hw;
+
+ here = fusbh200->pshadow [frame];
+ type = Q_NEXT_TYPE(fusbh200, fusbh200->periodic [frame]);
+ while (here.ptr) {
+ switch (hc32_to_cpu(fusbh200, type)) {
+ case Q_TYPE_ITD:
+ type = Q_NEXT_TYPE(fusbh200, here.itd->hw_next);
+ here = here.itd->itd_next;
+ continue;
+ case Q_TYPE_QH:
+ hw = here.qh->hw;
+ if (same_tt (dev, here.qh->dev)) {
+ u32 mask;
+
+ mask = hc32_to_cpu(fusbh200,
+ hw->hw_info2);
+ /* "knows" no gap is needed */
+ mask |= mask >> 8;
+ if (mask & uf_mask)
+ break;
+ }
+ type = Q_NEXT_TYPE(fusbh200, hw->hw_next);
+ here = here.qh->qh_next;
+ continue;
+ // case Q_TYPE_FSTN:
+ default:
+ fusbh200_dbg (fusbh200,
+ "periodic frame %d bogus type %d\n",
+ frame, type);
+ }
+
+ /* collision or error */
+ return 0;
+ }
+ }
+
+ /* no collision */
+ return 1;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void enable_periodic(struct fusbh200_hcd *fusbh200)
+{
+ if (fusbh200->periodic_count++)
+ return;
+
+ /* Stop waiting to turn off the periodic schedule */
+ fusbh200->enabled_hrtimer_events &= ~BIT(FUSBH200_HRTIMER_DISABLE_PERIODIC);
+
+ /* Don't start the schedule until PSS is 0 */
+ fusbh200_poll_PSS(fusbh200);
+ turn_on_io_watchdog(fusbh200);
+}
+
+static void disable_periodic(struct fusbh200_hcd *fusbh200)
+{
+ if (--fusbh200->periodic_count)
+ return;
+
+ /* Don't turn off the schedule until PSS is 1 */
+ fusbh200_poll_PSS(fusbh200);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* periodic schedule slots have iso tds (normal or split) first, then a
+ * sparse tree for active interrupt transfers.
+ *
+ * this just links in a qh; caller guarantees uframe masks are set right.
+ * no FSTN support (yet; fusbh200 0.96+)
+ */
+static void qh_link_periodic(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ unsigned i;
+ unsigned period = qh->period;
+
+ dev_dbg (&qh->dev->dev,
+ "link qh%d-%04x/%p start %d [%d/%d us]\n",
+ period, hc32_to_cpup(fusbh200, &qh->hw->hw_info2)
+ & (QH_CMASK | QH_SMASK),
+ qh, qh->start, qh->usecs, qh->c_usecs);
+
+ /* high bandwidth, or otherwise every microframe */
+ if (period == 0)
+ period = 1;
+
+ for (i = qh->start; i < fusbh200->periodic_size; i += period) {
+ union fusbh200_shadow *prev = &fusbh200->pshadow[i];
+ __hc32 *hw_p = &fusbh200->periodic[i];
+ union fusbh200_shadow here = *prev;
+ __hc32 type = 0;
+
+ /* skip the iso nodes at list head */
+ while (here.ptr) {
+ type = Q_NEXT_TYPE(fusbh200, *hw_p);
+ if (type == cpu_to_hc32(fusbh200, Q_TYPE_QH))
+ break;
+ prev = periodic_next_shadow(fusbh200, prev, type);
+ hw_p = shadow_next_periodic(fusbh200, &here, type);
+ here = *prev;
+ }
+
+ /* sorting each branch by period (slow-->fast)
+ * enables sharing interior tree nodes
+ */
+ while (here.ptr && qh != here.qh) {
+ if (qh->period > here.qh->period)
+ break;
+ prev = &here.qh->qh_next;
+ hw_p = &here.qh->hw->hw_next;
+ here = *prev;
+ }
+ /* link in this qh, unless some earlier pass did that */
+ if (qh != here.qh) {
+ qh->qh_next = here;
+ if (here.qh)
+ qh->hw->hw_next = *hw_p;
+ wmb ();
+ prev->qh = qh;
+ *hw_p = QH_NEXT (fusbh200, qh->qh_dma);
+ }
+ }
+ qh->qh_state = QH_STATE_LINKED;
+ qh->xacterrs = 0;
+
+ /* update per-qh bandwidth for usbfs */
+ fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated += qh->period
+ ? ((qh->usecs + qh->c_usecs) / qh->period)
+ : (qh->usecs * 8);
+
+ list_add(&qh->intr_node, &fusbh200->intr_qh_list);
+
+ /* maybe enable periodic schedule processing */
+ ++fusbh200->intr_count;
+ enable_periodic(fusbh200);
+}
+
+static void qh_unlink_periodic(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ unsigned i;
+ unsigned period;
+
+ /*
+ * If qh is for a low/full-speed device, simply unlinking it
+ * could interfere with an ongoing split transaction. To unlink
+ * it safely would require setting the QH_INACTIVATE bit and
+ * waiting at least one frame, as described in EHCI 4.12.2.5.
+ *
+ * We won't bother with any of this. Instead, we assume that the
+ * only reason for unlinking an interrupt QH while the current URB
+ * is still active is to dequeue all the URBs (flush the whole
+ * endpoint queue).
+ *
+ * If rebalancing the periodic schedule is ever implemented, this
+ * approach will no longer be valid.
+ */
+
+ /* high bandwidth, or otherwise part of every microframe */
+ if ((period = qh->period) == 0)
+ period = 1;
+
+ for (i = qh->start; i < fusbh200->periodic_size; i += period)
+ periodic_unlink (fusbh200, i, qh);
+
+ /* update per-qh bandwidth for usbfs */
+ fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated -= qh->period
+ ? ((qh->usecs + qh->c_usecs) / qh->period)
+ : (qh->usecs * 8);
+
+ dev_dbg (&qh->dev->dev,
+ "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
+ qh->period,
+ hc32_to_cpup(fusbh200, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
+ qh, qh->start, qh->usecs, qh->c_usecs);
+
+ /* qh->qh_next still "live" to HC */
+ qh->qh_state = QH_STATE_UNLINK;
+ qh->qh_next.ptr = NULL;
+
+ if (fusbh200->qh_scan_next == qh)
+ fusbh200->qh_scan_next = list_entry(qh->intr_node.next,
+ struct fusbh200_qh, intr_node);
+ list_del(&qh->intr_node);
+}
+
+static void start_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ /* If the QH isn't linked then there's nothing we can do
+ * unless we were called during a giveback, in which case
+ * qh_completions() has to deal with it.
+ */
+ if (qh->qh_state != QH_STATE_LINKED) {
+ if (qh->qh_state == QH_STATE_COMPLETING)
+ qh->needs_rescan = 1;
+ return;
+ }
+
+ qh_unlink_periodic (fusbh200, qh);
+
+ /* Make sure the unlinks are visible before starting the timer */
+ wmb();
+
+ /*
+ * The EHCI spec doesn't say how long it takes the controller to
+ * stop accessing an unlinked interrupt QH. The timer delay is
+ * 9 uframes; presumably that will be long enough.
+ */
+ qh->unlink_cycle = fusbh200->intr_unlink_cycle;
+
+ /* New entries go at the end of the intr_unlink list */
+ if (fusbh200->intr_unlink)
+ fusbh200->intr_unlink_last->unlink_next = qh;
+ else
+ fusbh200->intr_unlink = qh;
+ fusbh200->intr_unlink_last = qh;
+
+ if (fusbh200->intr_unlinking)
+ ; /* Avoid recursive calls */
+ else if (fusbh200->rh_state < FUSBH200_RH_RUNNING)
+ fusbh200_handle_intr_unlinks(fusbh200);
+ else if (fusbh200->intr_unlink == qh) {
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_UNLINK_INTR, true);
+ ++fusbh200->intr_unlink_cycle;
+ }
+}
+
+static void end_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ struct fusbh200_qh_hw *hw = qh->hw;
+ int rc;
+
+ qh->qh_state = QH_STATE_IDLE;
+ hw->hw_next = FUSBH200_LIST_END(fusbh200);
+
+ qh_completions(fusbh200, qh);
+
+ /* reschedule QH iff another request is queued */
+ if (!list_empty(&qh->qtd_list) && fusbh200->rh_state == FUSBH200_RH_RUNNING) {
+ rc = qh_schedule(fusbh200, qh);
+
+ /* An error here likely indicates handshake failure
+ * or no space left in the schedule. Neither fault
+ * should happen often ...
+ *
+ * FIXME kill the now-dysfunctional queued urbs
+ */
+ if (rc != 0)
+ fusbh200_err(fusbh200, "can't reschedule qh %p, err %d\n",
+ qh, rc);
+ }
+
+ /* maybe turn off periodic schedule */
+ --fusbh200->intr_count;
+ disable_periodic(fusbh200);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int check_period (
+ struct fusbh200_hcd *fusbh200,
+ unsigned frame,
+ unsigned uframe,
+ unsigned period,
+ unsigned usecs
+) {
+ int claimed;
+
+ /* complete split running into next frame?
+ * given FSTN support, we could sometimes check...
+ */
+ if (uframe >= 8)
+ return 0;
+
+ /* convert "usecs we need" to "max already claimed" */
+ usecs = fusbh200->uframe_periodic_max - usecs;
+
+ /* we "know" 2 and 4 uframe intervals were rejected; so
+ * for period 0, check _every_ microframe in the schedule.
+ */
+ if (unlikely (period == 0)) {
+ do {
+ for (uframe = 0; uframe < 7; uframe++) {
+ claimed = periodic_usecs (fusbh200, frame, uframe);
+ if (claimed > usecs)
+ return 0;
+ }
+ } while ((frame += 1) < fusbh200->periodic_size);
+
+ /* just check the specified uframe, at that period */
+ } else {
+ do {
+ claimed = periodic_usecs (fusbh200, frame, uframe);
+ if (claimed > usecs)
+ return 0;
+ } while ((frame += period) < fusbh200->periodic_size);
+ }
+
+ // success!
+ return 1;
+}
+
+static int check_intr_schedule (
+ struct fusbh200_hcd *fusbh200,
+ unsigned frame,
+ unsigned uframe,
+ const struct fusbh200_qh *qh,
+ __hc32 *c_maskp
+)
+{
+ int retval = -ENOSPC;
+ u8 mask = 0;
+
+ if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
+ goto done;
+
+ if (!check_period (fusbh200, frame, uframe, qh->period, qh->usecs))
+ goto done;
+ if (!qh->c_usecs) {
+ retval = 0;
+ *c_maskp = 0;
+ goto done;
+ }
+
+ /* Make sure this tt's buffer is also available for CSPLITs.
+ * We pessimize a bit; probably the typical full speed case
+ * doesn't need the second CSPLIT.
+ *
+ * NOTE: both SPLIT and CSPLIT could be checked in just
+ * one smart pass...
+ */
+ mask = 0x03 << (uframe + qh->gap_uf);
+ *c_maskp = cpu_to_hc32(fusbh200, mask << 8);
+
+ mask |= 1 << uframe;
+ if (tt_no_collision (fusbh200, qh->period, qh->dev, frame, mask)) {
+ if (!check_period (fusbh200, frame, uframe + qh->gap_uf + 1,
+ qh->period, qh->c_usecs))
+ goto done;
+ if (!check_period (fusbh200, frame, uframe + qh->gap_uf,
+ qh->period, qh->c_usecs))
+ goto done;
+ retval = 0;
+ }
+done:
+ return retval;
+}
+
+/* "first fit" scheduling policy used the first time through,
+ * or when the previous schedule slot can't be re-used.
+ */
+static int qh_schedule(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ int status;
+ unsigned uframe;
+ __hc32 c_mask;
+ unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
+ struct fusbh200_qh_hw *hw = qh->hw;
+
+ qh_refresh(fusbh200, qh);
+ hw->hw_next = FUSBH200_LIST_END(fusbh200);
+ frame = qh->start;
+
+ /* reuse the previous schedule slots, if we can */
+ if (frame < qh->period) {
+ uframe = ffs(hc32_to_cpup(fusbh200, &hw->hw_info2) & QH_SMASK);
+ status = check_intr_schedule (fusbh200, frame, --uframe,
+ qh, &c_mask);
+ } else {
+ uframe = 0;
+ c_mask = 0;
+ status = -ENOSPC;
+ }
+
+ /* else scan the schedule to find a group of slots such that all
+ * uframes have enough periodic bandwidth available.
+ */
+ if (status) {
+ /* "normal" case, uframing flexible except with splits */
+ if (qh->period) {
+ int i;
+
+ for (i = qh->period; status && i > 0; --i) {
+ frame = ++fusbh200->random_frame % qh->period;
+ for (uframe = 0; uframe < 8; uframe++) {
+ status = check_intr_schedule (fusbh200,
+ frame, uframe, qh,
+ &c_mask);
+ if (status == 0)
+ break;
+ }
+ }
+
+ /* qh->period == 0 means every uframe */
+ } else {
+ frame = 0;
+ status = check_intr_schedule (fusbh200, 0, 0, qh, &c_mask);
+ }
+ if (status)
+ goto done;
+ qh->start = frame;
+
+ /* reset S-frame and (maybe) C-frame masks */
+ hw->hw_info2 &= cpu_to_hc32(fusbh200, ~(QH_CMASK | QH_SMASK));
+ hw->hw_info2 |= qh->period
+ ? cpu_to_hc32(fusbh200, 1 << uframe)
+ : cpu_to_hc32(fusbh200, QH_SMASK);
+ hw->hw_info2 |= c_mask;
+ } else
+ fusbh200_dbg (fusbh200, "reused qh %p schedule\n", qh);
+
+ /* stuff into the periodic schedule */
+ qh_link_periodic(fusbh200, qh);
+done:
+ return status;
+}
+
+static int intr_submit (
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ struct list_head *qtd_list,
+ gfp_t mem_flags
+) {
+ unsigned epnum;
+ unsigned long flags;
+ struct fusbh200_qh *qh;
+ int status;
+ struct list_head empty;
+
+ /* get endpoint and transfer/schedule data */
+ epnum = urb->ep->desc.bEndpointAddress;
+
+ spin_lock_irqsave (&fusbh200->lock, flags);
+
+ if (unlikely(!HCD_HW_ACCESSIBLE(fusbh200_to_hcd(fusbh200)))) {
+ status = -ESHUTDOWN;
+ goto done_not_linked;
+ }
+ status = usb_hcd_link_urb_to_ep(fusbh200_to_hcd(fusbh200), urb);
+ if (unlikely(status))
+ goto done_not_linked;
+
+ /* get qh and force any scheduling errors */
+ INIT_LIST_HEAD (&empty);
+ qh = qh_append_tds(fusbh200, urb, &empty, epnum, &urb->ep->hcpriv);
+ if (qh == NULL) {
+ status = -ENOMEM;
+ goto done;
+ }
+ if (qh->qh_state == QH_STATE_IDLE) {
+ if ((status = qh_schedule (fusbh200, qh)) != 0)
+ goto done;
+ }
+
+ /* then queue the urb's tds to the qh */
+ qh = qh_append_tds(fusbh200, urb, qtd_list, epnum, &urb->ep->hcpriv);
+ BUG_ON (qh == NULL);
+
+ /* ... update usbfs periodic stats */
+ fusbh200_to_hcd(fusbh200)->self.bandwidth_int_reqs++;
+
+done:
+ if (unlikely(status))
+ usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
+done_not_linked:
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ if (status)
+ qtd_list_free (fusbh200, urb, qtd_list);
+
+ return status;
+}
+
+static void scan_intr(struct fusbh200_hcd *fusbh200)
+{
+ struct fusbh200_qh *qh;
+
+ list_for_each_entry_safe(qh, fusbh200->qh_scan_next, &fusbh200->intr_qh_list,
+ intr_node) {
+ rescan:
+ /* clean any finished work for this qh */
+ if (!list_empty(&qh->qtd_list)) {
+ int temp;
+
+ /*
+ * Unlinks could happen here; completion reporting
+ * drops the lock. That's why fusbh200->qh_scan_next
+ * always holds the next qh to scan; if the next qh
+ * gets unlinked then fusbh200->qh_scan_next is adjusted
+ * in qh_unlink_periodic().
+ */
+ temp = qh_completions(fusbh200, qh);
+ if (unlikely(qh->needs_rescan ||
+ (list_empty(&qh->qtd_list) &&
+ qh->qh_state == QH_STATE_LINKED)))
+ start_unlink_intr(fusbh200, qh);
+ else if (temp != 0)
+ goto rescan;
+ }
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* fusbh200_iso_stream ops work with both ITD and SITD */
+
+static struct fusbh200_iso_stream *
+iso_stream_alloc (gfp_t mem_flags)
+{
+ struct fusbh200_iso_stream *stream;
+
+ stream = kzalloc(sizeof *stream, mem_flags);
+ if (likely (stream != NULL)) {
+ INIT_LIST_HEAD(&stream->td_list);
+ INIT_LIST_HEAD(&stream->free_list);
+ stream->next_uframe = -1;
+ }
+ return stream;
+}
+
+static void
+iso_stream_init (
+ struct fusbh200_hcd *fusbh200,
+ struct fusbh200_iso_stream *stream,
+ struct usb_device *dev,
+ int pipe,
+ unsigned interval
+)
+{
+ u32 buf1;
+ unsigned epnum, maxp;
+ int is_input;
+ long bandwidth;
+ unsigned multi;
+
+ /*
+ * this might be a "high bandwidth" highspeed endpoint,
+ * as encoded in the ep descriptor's wMaxPacket field
+ */
+ epnum = usb_pipeendpoint (pipe);
+ is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
+ maxp = usb_maxpacket(dev, pipe, !is_input);
+ if (is_input) {
+ buf1 = (1 << 11);
+ } else {
+ buf1 = 0;
+ }
+
+ maxp = max_packet(maxp);
+ multi = hb_mult(maxp);
+ buf1 |= maxp;
+ maxp *= multi;
+
+ stream->buf0 = cpu_to_hc32(fusbh200, (epnum << 8) | dev->devnum);
+ stream->buf1 = cpu_to_hc32(fusbh200, buf1);
+ stream->buf2 = cpu_to_hc32(fusbh200, multi);
+
+ /* usbfs wants to report the average usecs per frame tied up
+ * when transfers on this endpoint are scheduled ...
+ */
+ if (dev->speed == USB_SPEED_FULL) {
+ interval <<= 3;
+ stream->usecs = NS_TO_US(usb_calc_bus_time(dev->speed,
+ is_input, 1, maxp));
+ stream->usecs /= 8;
+ } else {
+ stream->highspeed = 1;
+ stream->usecs = HS_USECS_ISO (maxp);
+ }
+ bandwidth = stream->usecs * 8;
+ bandwidth /= interval;
+
+ stream->bandwidth = bandwidth;
+ stream->udev = dev;
+ stream->bEndpointAddress = is_input | epnum;
+ stream->interval = interval;
+ stream->maxp = maxp;
+}
+
+static struct fusbh200_iso_stream *
+iso_stream_find (struct fusbh200_hcd *fusbh200, struct urb *urb)
+{
+ unsigned epnum;
+ struct fusbh200_iso_stream *stream;
+ struct usb_host_endpoint *ep;
+ unsigned long flags;
+
+ epnum = usb_pipeendpoint (urb->pipe);
+ if (usb_pipein(urb->pipe))
+ ep = urb->dev->ep_in[epnum];
+ else
+ ep = urb->dev->ep_out[epnum];
+
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ stream = ep->hcpriv;
+
+ if (unlikely (stream == NULL)) {
+ stream = iso_stream_alloc(GFP_ATOMIC);
+ if (likely (stream != NULL)) {
+ ep->hcpriv = stream;
+ stream->ep = ep;
+ iso_stream_init(fusbh200, stream, urb->dev, urb->pipe,
+ urb->interval);
+ }
+
+ /* if dev->ep [epnum] is a QH, hw is set */
+ } else if (unlikely (stream->hw != NULL)) {
+ fusbh200_dbg (fusbh200, "dev %s ep%d%s, not iso??\n",
+ urb->dev->devpath, epnum,
+ usb_pipein(urb->pipe) ? "in" : "out");
+ stream = NULL;
+ }
+
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ return stream;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* fusbh200_iso_sched ops can be ITD-only or SITD-only */
+
+static struct fusbh200_iso_sched *
+iso_sched_alloc (unsigned packets, gfp_t mem_flags)
+{
+ struct fusbh200_iso_sched *iso_sched;
+ int size = sizeof *iso_sched;
+
+ size += packets * sizeof (struct fusbh200_iso_packet);
+ iso_sched = kzalloc(size, mem_flags);
+ if (likely (iso_sched != NULL)) {
+ INIT_LIST_HEAD (&iso_sched->td_list);
+ }
+ return iso_sched;
+}
+
+static inline void
+itd_sched_init(
+ struct fusbh200_hcd *fusbh200,
+ struct fusbh200_iso_sched *iso_sched,
+ struct fusbh200_iso_stream *stream,
+ struct urb *urb
+)
+{
+ unsigned i;
+ dma_addr_t dma = urb->transfer_dma;
+
+ /* how many uframes are needed for these transfers */
+ iso_sched->span = urb->number_of_packets * stream->interval;
+
+ /* figure out per-uframe itd fields that we'll need later
+ * when we fit new itds into the schedule.
+ */
+ for (i = 0; i < urb->number_of_packets; i++) {
+ struct fusbh200_iso_packet *uframe = &iso_sched->packet [i];
+ unsigned length;
+ dma_addr_t buf;
+ u32 trans;
+
+ length = urb->iso_frame_desc [i].length;
+ buf = dma + urb->iso_frame_desc [i].offset;
+
+ trans = FUSBH200_ISOC_ACTIVE;
+ trans |= buf & 0x0fff;
+ if (unlikely (((i + 1) == urb->number_of_packets))
+ && !(urb->transfer_flags & URB_NO_INTERRUPT))
+ trans |= FUSBH200_ITD_IOC;
+ trans |= length << 16;
+ uframe->transaction = cpu_to_hc32(fusbh200, trans);
+
+ /* might need to cross a buffer page within a uframe */
+ uframe->bufp = (buf & ~(u64)0x0fff);
+ buf += length;
+ if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
+ uframe->cross = 1;
+ }
+}
+
+static void
+iso_sched_free (
+ struct fusbh200_iso_stream *stream,
+ struct fusbh200_iso_sched *iso_sched
+)
+{
+ if (!iso_sched)
+ return;
+ // caller must hold fusbh200->lock!
+ list_splice (&iso_sched->td_list, &stream->free_list);
+ kfree (iso_sched);
+}
+
+static int
+itd_urb_transaction (
+ struct fusbh200_iso_stream *stream,
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ gfp_t mem_flags
+)
+{
+ struct fusbh200_itd *itd;
+ dma_addr_t itd_dma;
+ int i;
+ unsigned num_itds;
+ struct fusbh200_iso_sched *sched;
+ unsigned long flags;
+
+ sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
+ if (unlikely (sched == NULL))
+ return -ENOMEM;
+
+ itd_sched_init(fusbh200, sched, stream, urb);
+
+ if (urb->interval < 8)
+ num_itds = 1 + (sched->span + 7) / 8;
+ else
+ num_itds = urb->number_of_packets;
+
+ /* allocate/init ITDs */
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ for (i = 0; i < num_itds; i++) {
+
+ /*
+ * Use iTDs from the free list, but not iTDs that may
+ * still be in use by the hardware.
+ */
+ if (likely(!list_empty(&stream->free_list))) {
+ itd = list_first_entry(&stream->free_list,
+ struct fusbh200_itd, itd_list);
+ if (itd->frame == fusbh200->now_frame)
+ goto alloc_itd;
+ list_del (&itd->itd_list);
+ itd_dma = itd->itd_dma;
+ } else {
+ alloc_itd:
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ itd = dma_pool_alloc (fusbh200->itd_pool, mem_flags,
+ &itd_dma);
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ if (!itd) {
+ iso_sched_free(stream, sched);
+ spin_unlock_irqrestore(&fusbh200->lock, flags);
+ return -ENOMEM;
+ }
+ }
+
+ memset (itd, 0, sizeof *itd);
+ itd->itd_dma = itd_dma;
+ list_add (&itd->itd_list, &sched->td_list);
+ }
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+
+ /* temporarily store schedule info in hcpriv */
+ urb->hcpriv = sched;
+ urb->error_count = 0;
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline int
+itd_slot_ok (
+ struct fusbh200_hcd *fusbh200,
+ u32 mod,
+ u32 uframe,
+ u8 usecs,
+ u32 period
+)
+{
+ uframe %= period;
+ do {
+ /* can't commit more than uframe_periodic_max usec */
+ if (periodic_usecs (fusbh200, uframe >> 3, uframe & 0x7)
+ > (fusbh200->uframe_periodic_max - usecs))
+ return 0;
+
+ /* we know urb->interval is 2^N uframes */
+ uframe += period;
+ } while (uframe < mod);
+ return 1;
+}
+
+/*
+ * This scheduler plans almost as far into the future as it has actual
+ * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
+ * "as small as possible" to be cache-friendlier.) That limits the size
+ * transfers you can stream reliably; avoid more than 64 msec per urb.
+ * Also avoid queue depths of less than fusbh200's worst irq latency (affected
+ * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
+ * and other factors); or more than about 230 msec total (for portability,
+ * given FUSBH200_TUNE_FLS and the slop). Or, write a smarter scheduler!
+ */
+
+#define SCHEDULE_SLOP 80 /* microframes */
+
+static int
+iso_stream_schedule (
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ struct fusbh200_iso_stream *stream
+)
+{
+ u32 now, next, start, period, span;
+ int status;
+ unsigned mod = fusbh200->periodic_size << 3;
+ struct fusbh200_iso_sched *sched = urb->hcpriv;
+
+ period = urb->interval;
+ span = sched->span;
+
+ if (span > mod - SCHEDULE_SLOP) {
+ fusbh200_dbg (fusbh200, "iso request %p too long\n", urb);
+ status = -EFBIG;
+ goto fail;
+ }
+
+ now = fusbh200_read_frame_index(fusbh200) & (mod - 1);
+
+ /* Typical case: reuse current schedule, stream is still active.
+ * Hopefully there are no gaps from the host falling behind
+ * (irq delays etc), but if there are we'll take the next
+ * slot in the schedule, implicitly assuming URB_ISO_ASAP.
+ */
+ if (likely (!list_empty (&stream->td_list))) {
+ u32 excess;
+
+ /* For high speed devices, allow scheduling within the
+ * isochronous scheduling threshold. For full speed devices
+ * and Intel PCI-based controllers, don't (work around for
+ * Intel ICH9 bug).
+ */
+ if (!stream->highspeed && fusbh200->fs_i_thresh)
+ next = now + fusbh200->i_thresh;
+ else
+ next = now;
+
+ /* Fell behind (by up to twice the slop amount)?
+ * We decide based on the time of the last currently-scheduled
+ * slot, not the time of the next available slot.
+ */
+ excess = (stream->next_uframe - period - next) & (mod - 1);
+ if (excess >= mod - 2 * SCHEDULE_SLOP)
+ start = next + excess - mod + period *
+ DIV_ROUND_UP(mod - excess, period);
+ else
+ start = next + excess + period;
+ if (start - now >= mod) {
+ fusbh200_dbg(fusbh200, "request %p would overflow (%d+%d >= %d)\n",
+ urb, start - now - period, period,
+ mod);
+ status = -EFBIG;
+ goto fail;
+ }
+ }
+
+ /* need to schedule; when's the next (u)frame we could start?
+ * this is bigger than fusbh200->i_thresh allows; scheduling itself
+ * isn't free, the slop should handle reasonably slow cpus. it
+ * can also help high bandwidth if the dma and irq loads don't
+ * jump until after the queue is primed.
+ */
+ else {
+ int done = 0;
+ start = SCHEDULE_SLOP + (now & ~0x07);
+
+ /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
+
+ /* find a uframe slot with enough bandwidth.
+ * Early uframes are more precious because full-speed
+ * iso IN transfers can't use late uframes,
+ * and therefore they should be allocated last.
+ */
+ next = start;
+ start += period;
+ do {
+ start--;
+ /* check schedule: enough space? */
+ if (itd_slot_ok(fusbh200, mod, start,
+ stream->usecs, period))
+ done = 1;
+ } while (start > next && !done);
+
+ /* no room in the schedule */
+ if (!done) {
+ fusbh200_dbg(fusbh200, "iso resched full %p (now %d max %d)\n",
+ urb, now, now + mod);
+ status = -ENOSPC;
+ goto fail;
+ }
+ }
+
+ /* Tried to schedule too far into the future? */
+ if (unlikely(start - now + span - period
+ >= mod - 2 * SCHEDULE_SLOP)) {
+ fusbh200_dbg(fusbh200, "request %p would overflow (%d+%d >= %d)\n",
+ urb, start - now, span - period,
+ mod - 2 * SCHEDULE_SLOP);
+ status = -EFBIG;
+ goto fail;
+ }
+
+ stream->next_uframe = start & (mod - 1);
+
+ /* report high speed start in uframes; full speed, in frames */
+ urb->start_frame = stream->next_uframe;
+ if (!stream->highspeed)
+ urb->start_frame >>= 3;
+
+ /* Make sure scan_isoc() sees these */
+ if (fusbh200->isoc_count == 0)
+ fusbh200->next_frame = now >> 3;
+ return 0;
+
+ fail:
+ iso_sched_free(stream, sched);
+ urb->hcpriv = NULL;
+ return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline void
+itd_init(struct fusbh200_hcd *fusbh200, struct fusbh200_iso_stream *stream,
+ struct fusbh200_itd *itd)
+{
+ int i;
+
+ /* it's been recently zeroed */
+ itd->hw_next = FUSBH200_LIST_END(fusbh200);
+ itd->hw_bufp [0] = stream->buf0;
+ itd->hw_bufp [1] = stream->buf1;
+ itd->hw_bufp [2] = stream->buf2;
+
+ for (i = 0; i < 8; i++)
+ itd->index[i] = -1;
+
+ /* All other fields are filled when scheduling */
+}
+
+static inline void
+itd_patch(
+ struct fusbh200_hcd *fusbh200,
+ struct fusbh200_itd *itd,
+ struct fusbh200_iso_sched *iso_sched,
+ unsigned index,
+ u16 uframe
+)
+{
+ struct fusbh200_iso_packet *uf = &iso_sched->packet [index];
+ unsigned pg = itd->pg;
+
+ // BUG_ON (pg == 6 && uf->cross);
+
+ uframe &= 0x07;
+ itd->index [uframe] = index;
+
+ itd->hw_transaction[uframe] = uf->transaction;
+ itd->hw_transaction[uframe] |= cpu_to_hc32(fusbh200, pg << 12);
+ itd->hw_bufp[pg] |= cpu_to_hc32(fusbh200, uf->bufp & ~(u32)0);
+ itd->hw_bufp_hi[pg] |= cpu_to_hc32(fusbh200, (u32)(uf->bufp >> 32));
+
+ /* iso_frame_desc[].offset must be strictly increasing */
+ if (unlikely (uf->cross)) {
+ u64 bufp = uf->bufp + 4096;
+
+ itd->pg = ++pg;
+ itd->hw_bufp[pg] |= cpu_to_hc32(fusbh200, bufp & ~(u32)0);
+ itd->hw_bufp_hi[pg] |= cpu_to_hc32(fusbh200, (u32)(bufp >> 32));
+ }
+}
+
+static inline void
+itd_link (struct fusbh200_hcd *fusbh200, unsigned frame, struct fusbh200_itd *itd)
+{
+ union fusbh200_shadow *prev = &fusbh200->pshadow[frame];
+ __hc32 *hw_p = &fusbh200->periodic[frame];
+ union fusbh200_shadow here = *prev;
+ __hc32 type = 0;
+
+ /* skip any iso nodes which might belong to previous microframes */
+ while (here.ptr) {
+ type = Q_NEXT_TYPE(fusbh200, *hw_p);
+ if (type == cpu_to_hc32(fusbh200, Q_TYPE_QH))
+ break;
+ prev = periodic_next_shadow(fusbh200, prev, type);
+ hw_p = shadow_next_periodic(fusbh200, &here, type);
+ here = *prev;
+ }
+
+ itd->itd_next = here;
+ itd->hw_next = *hw_p;
+ prev->itd = itd;
+ itd->frame = frame;
+ wmb ();
+ *hw_p = cpu_to_hc32(fusbh200, itd->itd_dma | Q_TYPE_ITD);
+}
+
+/* fit urb's itds into the selected schedule slot; activate as needed */
+static void itd_link_urb(
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ unsigned mod,
+ struct fusbh200_iso_stream *stream
+)
+{
+ int packet;
+ unsigned next_uframe, uframe, frame;
+ struct fusbh200_iso_sched *iso_sched = urb->hcpriv;
+ struct fusbh200_itd *itd;
+
+ next_uframe = stream->next_uframe & (mod - 1);
+
+ if (unlikely (list_empty(&stream->td_list))) {
+ fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated
+ += stream->bandwidth;
+ fusbh200_dbg(fusbh200,
+ "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
+ urb->dev->devpath, stream->bEndpointAddress & 0x0f,
+ (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
+ urb->interval,
+ next_uframe >> 3, next_uframe & 0x7);
+ }
+
+ /* fill iTDs uframe by uframe */
+ for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) {
+ if (itd == NULL) {
+ /* ASSERT: we have all necessary itds */
+ // BUG_ON (list_empty (&iso_sched->td_list));
+
+ /* ASSERT: no itds for this endpoint in this uframe */
+
+ itd = list_entry (iso_sched->td_list.next,
+ struct fusbh200_itd, itd_list);
+ list_move_tail (&itd->itd_list, &stream->td_list);
+ itd->stream = stream;
+ itd->urb = urb;
+ itd_init (fusbh200, stream, itd);
+ }
+
+ uframe = next_uframe & 0x07;
+ frame = next_uframe >> 3;
+
+ itd_patch(fusbh200, itd, iso_sched, packet, uframe);
+
+ next_uframe += stream->interval;
+ next_uframe &= mod - 1;
+ packet++;
+
+ /* link completed itds into the schedule */
+ if (((next_uframe >> 3) != frame)
+ || packet == urb->number_of_packets) {
+ itd_link(fusbh200, frame & (fusbh200->periodic_size - 1), itd);
+ itd = NULL;
+ }
+ }
+ stream->next_uframe = next_uframe;
+
+ /* don't need that schedule data any more */
+ iso_sched_free (stream, iso_sched);
+ urb->hcpriv = NULL;
+
+ ++fusbh200->isoc_count;
+ enable_periodic(fusbh200);
+}
+
+#define ISO_ERRS (FUSBH200_ISOC_BUF_ERR | FUSBH200_ISOC_BABBLE | FUSBH200_ISOC_XACTERR)
+
+/* Process and recycle a completed ITD. Return true iff its urb completed,
+ * and hence its completion callback probably added things to the hardware
+ * schedule.
+ *
+ * Note that we carefully avoid recycling this descriptor until after any
+ * completion callback runs, so that it won't be reused quickly. That is,
+ * assuming (a) no more than two urbs per frame on this endpoint, and also
+ * (b) only this endpoint's completions submit URBs. It seems some silicon
+ * corrupts things if you reuse completed descriptors very quickly...
+ */
+static bool itd_complete(struct fusbh200_hcd *fusbh200, struct fusbh200_itd *itd)
+{
+ struct urb *urb = itd->urb;
+ struct usb_iso_packet_descriptor *desc;
+ u32 t;
+ unsigned uframe;
+ int urb_index = -1;
+ struct fusbh200_iso_stream *stream = itd->stream;
+ struct usb_device *dev;
+ bool retval = false;
+
+ /* for each uframe with a packet */
+ for (uframe = 0; uframe < 8; uframe++) {
+ if (likely (itd->index[uframe] == -1))
+ continue;
+ urb_index = itd->index[uframe];
+ desc = &urb->iso_frame_desc [urb_index];
+
+ t = hc32_to_cpup(fusbh200, &itd->hw_transaction [uframe]);
+ itd->hw_transaction [uframe] = 0;
+
+ /* report transfer status */
+ if (unlikely (t & ISO_ERRS)) {
+ urb->error_count++;
+ if (t & FUSBH200_ISOC_BUF_ERR)
+ desc->status = usb_pipein (urb->pipe)
+ ? -ENOSR /* hc couldn't read */
+ : -ECOMM; /* hc couldn't write */
+ else if (t & FUSBH200_ISOC_BABBLE)
+ desc->status = -EOVERFLOW;
+ else /* (t & FUSBH200_ISOC_XACTERR) */
+ desc->status = -EPROTO;
+
+ /* HC need not update length with this error */
+ if (!(t & FUSBH200_ISOC_BABBLE)) {
+ desc->actual_length = fusbh200_itdlen(urb, desc, t);
+ urb->actual_length += desc->actual_length;
+ }
+ } else if (likely ((t & FUSBH200_ISOC_ACTIVE) == 0)) {
+ desc->status = 0;
+ desc->actual_length = fusbh200_itdlen(urb, desc, t);
+ urb->actual_length += desc->actual_length;
+ } else {
+ /* URB was too late */
+ desc->status = -EXDEV;
+ }
+ }
+
+ /* handle completion now? */
+ if (likely ((urb_index + 1) != urb->number_of_packets))
+ goto done;
+
+ /* ASSERT: it's really the last itd for this urb
+ list_for_each_entry (itd, &stream->td_list, itd_list)
+ BUG_ON (itd->urb == urb);
+ */
+
+ /* give urb back to the driver; completion often (re)submits */
+ dev = urb->dev;
+ fusbh200_urb_done(fusbh200, urb, 0);
+ retval = true;
+ urb = NULL;
+
+ --fusbh200->isoc_count;
+ disable_periodic(fusbh200);
+
+ if (unlikely(list_is_singular(&stream->td_list))) {
+ fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated
+ -= stream->bandwidth;
+ fusbh200_dbg(fusbh200,
+ "deschedule devp %s ep%d%s-iso\n",
+ dev->devpath, stream->bEndpointAddress & 0x0f,
+ (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
+ }
+
+done:
+ itd->urb = NULL;
+
+ /* Add to the end of the free list for later reuse */
+ list_move_tail(&itd->itd_list, &stream->free_list);
+
+ /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
+ if (list_empty(&stream->td_list)) {
+ list_splice_tail_init(&stream->free_list,
+ &fusbh200->cached_itd_list);
+ start_free_itds(fusbh200);
+ }
+
+ return retval;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int itd_submit (struct fusbh200_hcd *fusbh200, struct urb *urb,
+ gfp_t mem_flags)
+{
+ int status = -EINVAL;
+ unsigned long flags;
+ struct fusbh200_iso_stream *stream;
+
+ /* Get iso_stream head */
+ stream = iso_stream_find (fusbh200, urb);
+ if (unlikely (stream == NULL)) {
+ fusbh200_dbg (fusbh200, "can't get iso stream\n");
+ return -ENOMEM;
+ }
+ if (unlikely (urb->interval != stream->interval &&
+ fusbh200_port_speed(fusbh200, 0) == USB_PORT_STAT_HIGH_SPEED)) {
+ fusbh200_dbg (fusbh200, "can't change iso interval %d --> %d\n",
+ stream->interval, urb->interval);
+ goto done;
+ }
+
+#ifdef FUSBH200_URB_TRACE
+ fusbh200_dbg (fusbh200,
+ "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
+ __func__, urb->dev->devpath, urb,
+ usb_pipeendpoint (urb->pipe),
+ usb_pipein (urb->pipe) ? "in" : "out",
+ urb->transfer_buffer_length,
+ urb->number_of_packets, urb->interval,
+ stream);
+#endif
+
+ /* allocate ITDs w/o locking anything */
+ status = itd_urb_transaction (stream, fusbh200, urb, mem_flags);
+ if (unlikely (status < 0)) {
+ fusbh200_dbg (fusbh200, "can't init itds\n");
+ goto done;
+ }
+
+ /* schedule ... need to lock */
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ if (unlikely(!HCD_HW_ACCESSIBLE(fusbh200_to_hcd(fusbh200)))) {
+ status = -ESHUTDOWN;
+ goto done_not_linked;
+ }
+ status = usb_hcd_link_urb_to_ep(fusbh200_to_hcd(fusbh200), urb);
+ if (unlikely(status))
+ goto done_not_linked;
+ status = iso_stream_schedule(fusbh200, urb, stream);
+ if (likely (status == 0))
+ itd_link_urb (fusbh200, urb, fusbh200->periodic_size << 3, stream);
+ else
+ usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
+ done_not_linked:
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ done:
+ return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void scan_isoc(struct fusbh200_hcd *fusbh200)
+{
+ unsigned uf, now_frame, frame;
+ unsigned fmask = fusbh200->periodic_size - 1;
+ bool modified, live;
+
+ /*
+ * When running, scan from last scan point up to "now"
+ * else clean up by scanning everything that's left.
+ * Touches as few pages as possible: cache-friendly.
+ */
+ if (fusbh200->rh_state >= FUSBH200_RH_RUNNING) {
+ uf = fusbh200_read_frame_index(fusbh200);
+ now_frame = (uf >> 3) & fmask;
+ live = true;
+ } else {
+ now_frame = (fusbh200->next_frame - 1) & fmask;
+ live = false;
+ }
+ fusbh200->now_frame = now_frame;
+
+ frame = fusbh200->next_frame;
+ for (;;) {
+ union fusbh200_shadow q, *q_p;
+ __hc32 type, *hw_p;
+
+restart:
+ /* scan each element in frame's queue for completions */
+ q_p = &fusbh200->pshadow [frame];
+ hw_p = &fusbh200->periodic [frame];
+ q.ptr = q_p->ptr;
+ type = Q_NEXT_TYPE(fusbh200, *hw_p);
+ modified = false;
+
+ while (q.ptr != NULL) {
+ switch (hc32_to_cpu(fusbh200, type)) {
+ case Q_TYPE_ITD:
+ /* If this ITD is still active, leave it for
+ * later processing ... check the next entry.
+ * No need to check for activity unless the
+ * frame is current.
+ */
+ if (frame == now_frame && live) {
+ rmb();
+ for (uf = 0; uf < 8; uf++) {
+ if (q.itd->hw_transaction[uf] &
+ ITD_ACTIVE(fusbh200))
+ break;
+ }
+ if (uf < 8) {
+ q_p = &q.itd->itd_next;
+ hw_p = &q.itd->hw_next;
+ type = Q_NEXT_TYPE(fusbh200,
+ q.itd->hw_next);
+ q = *q_p;
+ break;
+ }
+ }
+
+ /* Take finished ITDs out of the schedule
+ * and process them: recycle, maybe report
+ * URB completion. HC won't cache the
+ * pointer for much longer, if at all.
+ */
+ *q_p = q.itd->itd_next;
+ *hw_p = q.itd->hw_next;
+ type = Q_NEXT_TYPE(fusbh200, q.itd->hw_next);
+ wmb();
+ modified = itd_complete (fusbh200, q.itd);
+ q = *q_p;
+ break;
+ default:
+ fusbh200_dbg(fusbh200, "corrupt type %d frame %d shadow %p\n",
+ type, frame, q.ptr);
+ // BUG ();
+ /* FALL THROUGH */
+ case Q_TYPE_QH:
+ case Q_TYPE_FSTN:
+ /* End of the iTDs and siTDs */
+ q.ptr = NULL;
+ break;
+ }
+
+ /* assume completion callbacks modify the queue */
+ if (unlikely(modified && fusbh200->isoc_count > 0))
+ goto restart;
+ }
+
+ /* Stop when we have reached the current frame */
+ if (frame == now_frame)
+ break;
+ frame = (frame + 1) & fmask;
+ }
+ fusbh200->next_frame = now_frame;
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * Display / Set uframe_periodic_max
+ */
+static ssize_t show_uframe_periodic_max(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fusbh200_hcd *fusbh200;
+ int n;
+
+ fusbh200 = hcd_to_fusbh200(bus_to_hcd(dev_get_drvdata(dev)));
+ n = scnprintf(buf, PAGE_SIZE, "%d\n", fusbh200->uframe_periodic_max);
+ return n;
+}
+
+
+static ssize_t store_uframe_periodic_max(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fusbh200_hcd *fusbh200;
+ unsigned uframe_periodic_max;
+ unsigned frame, uframe;
+ unsigned short allocated_max;
+ unsigned long flags;
+ ssize_t ret;
+
+ fusbh200 = hcd_to_fusbh200(bus_to_hcd(dev_get_drvdata(dev)));
+ if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
+ return -EINVAL;
+
+ if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
+ fusbh200_info(fusbh200, "rejecting invalid request for "
+ "uframe_periodic_max=%u\n", uframe_periodic_max);
+ return -EINVAL;
+ }
+
+ ret = -EINVAL;
+
+ /*
+ * lock, so that our checking does not race with possible periodic
+ * bandwidth allocation through submitting new urbs.
+ */
+ spin_lock_irqsave (&fusbh200->lock, flags);
+
+ /*
+ * for request to decrease max periodic bandwidth, we have to check
+ * every microframe in the schedule to see whether the decrease is
+ * possible.
+ */
+ if (uframe_periodic_max < fusbh200->uframe_periodic_max) {
+ allocated_max = 0;
+
+ for (frame = 0; frame < fusbh200->periodic_size; ++frame)
+ for (uframe = 0; uframe < 7; ++uframe)
+ allocated_max = max(allocated_max,
+ periodic_usecs (fusbh200, frame, uframe));
+
+ if (allocated_max > uframe_periodic_max) {
+ fusbh200_info(fusbh200,
+ "cannot decrease uframe_periodic_max becase "
+ "periodic bandwidth is already allocated "
+ "(%u > %u)\n",
+ allocated_max, uframe_periodic_max);
+ goto out_unlock;
+ }
+ }
+
+ /* increasing is always ok */
+
+ fusbh200_info(fusbh200, "setting max periodic bandwidth to %u%% "
+ "(== %u usec/uframe)\n",
+ 100*uframe_periodic_max/125, uframe_periodic_max);
+
+ if (uframe_periodic_max != 100)
+ fusbh200_warn(fusbh200, "max periodic bandwidth set is non-standard\n");
+
+ fusbh200->uframe_periodic_max = uframe_periodic_max;
+ ret = count;
+
+out_unlock:
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ return ret;
+}
+static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max, store_uframe_periodic_max);
+
+
+static inline int create_sysfs_files(struct fusbh200_hcd *fusbh200)
+{
+ struct device *controller = fusbh200_to_hcd(fusbh200)->self.controller;
+ int i = 0;
+
+ if (i)
+ goto out;
+
+ i = device_create_file(controller, &dev_attr_uframe_periodic_max);
+out:
+ return i;
+}
+
+static inline void remove_sysfs_files(struct fusbh200_hcd *fusbh200)
+{
+ struct device *controller = fusbh200_to_hcd(fusbh200)->self.controller;
+
+ device_remove_file(controller, &dev_attr_uframe_periodic_max);
+}
+/*-------------------------------------------------------------------------*/
+
+/* On some systems, leaving remote wakeup enabled prevents system shutdown.
+ * The firmware seems to think that powering off is a wakeup event!
+ * This routine turns off remote wakeup and everything else, on all ports.
+ */
+static void fusbh200_turn_off_all_ports(struct fusbh200_hcd *fusbh200)
+{
+ u32 __iomem *status_reg = &fusbh200->regs->port_status;
+
+ fusbh200_writel(fusbh200, PORT_RWC_BITS, status_reg);
+}
+
+/*
+ * Halt HC, turn off all ports, and let the BIOS use the companion controllers.
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static void fusbh200_silence_controller(struct fusbh200_hcd *fusbh200)
+{
+ fusbh200_halt(fusbh200);
+
+ spin_lock_irq(&fusbh200->lock);
+ fusbh200->rh_state = FUSBH200_RH_HALTED;
+ fusbh200_turn_off_all_ports(fusbh200);
+ spin_unlock_irq(&fusbh200->lock);
+}
+
+/* fusbh200_shutdown kick in for silicon on any bus (not just pci, etc).
+ * This forcibly disables dma and IRQs, helping kexec and other cases
+ * where the next system software may expect clean state.
+ */
+static void fusbh200_shutdown(struct usb_hcd *hcd)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
+
+ spin_lock_irq(&fusbh200->lock);
+ fusbh200->shutdown = true;
+ fusbh200->rh_state = FUSBH200_RH_STOPPING;
+ fusbh200->enabled_hrtimer_events = 0;
+ spin_unlock_irq(&fusbh200->lock);
+
+ fusbh200_silence_controller(fusbh200);
+
+ hrtimer_cancel(&fusbh200->hrtimer);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * fusbh200_work is called from some interrupts, timers, and so on.
+ * it calls driver completion functions, after dropping fusbh200->lock.
+ */
+static void fusbh200_work (struct fusbh200_hcd *fusbh200)
+{
+ /* another CPU may drop fusbh200->lock during a schedule scan while
+ * it reports urb completions. this flag guards against bogus
+ * attempts at re-entrant schedule scanning.
+ */
+ if (fusbh200->scanning) {
+ fusbh200->need_rescan = true;
+ return;
+ }
+ fusbh200->scanning = true;
+
+ rescan:
+ fusbh200->need_rescan = false;
+ if (fusbh200->async_count)
+ scan_async(fusbh200);
+ if (fusbh200->intr_count > 0)
+ scan_intr(fusbh200);
+ if (fusbh200->isoc_count > 0)
+ scan_isoc(fusbh200);
+ if (fusbh200->need_rescan)
+ goto rescan;
+ fusbh200->scanning = false;
+
+ /* the IO watchdog guards against hardware or driver bugs that
+ * misplace IRQs, and should let us run completely without IRQs.
+ * such lossage has been observed on both VT6202 and VT8235.
+ */
+ turn_on_io_watchdog(fusbh200);
+}
+
+/*
+ * Called when the fusbh200_hcd module is removed.
+ */
+static void fusbh200_stop (struct usb_hcd *hcd)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
+
+ fusbh200_dbg (fusbh200, "stop\n");
+
+ /* no more interrupts ... */
+
+ spin_lock_irq(&fusbh200->lock);
+ fusbh200->enabled_hrtimer_events = 0;
+ spin_unlock_irq(&fusbh200->lock);
+
+ fusbh200_quiesce(fusbh200);
+ fusbh200_silence_controller(fusbh200);
+ fusbh200_reset (fusbh200);
+
+ hrtimer_cancel(&fusbh200->hrtimer);
+ remove_sysfs_files(fusbh200);
+ remove_debug_files (fusbh200);
+
+ /* root hub is shut down separately (first, when possible) */
+ spin_lock_irq (&fusbh200->lock);
+ end_free_itds(fusbh200);
+ spin_unlock_irq (&fusbh200->lock);
+ fusbh200_mem_cleanup (fusbh200);
+
+ fusbh200_dbg(fusbh200, "irq normal %ld err %ld iaa %ld (lost %ld)\n",
+ fusbh200->stats.normal, fusbh200->stats.error, fusbh200->stats.iaa,
+ fusbh200->stats.lost_iaa);
+ fusbh200_dbg (fusbh200, "complete %ld unlink %ld\n",
+ fusbh200->stats.complete, fusbh200->stats.unlink);
+
+ dbg_status (fusbh200, "fusbh200_stop completed",
+ fusbh200_readl(fusbh200, &fusbh200->regs->status));
+}
+
+/* one-time init, only for memory state */
+static int hcd_fusbh200_init(struct usb_hcd *hcd)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
+ u32 temp;
+ int retval;
+ u32 hcc_params;
+ struct fusbh200_qh_hw *hw;
+
+ spin_lock_init(&fusbh200->lock);
+
+ /*
+ * keep io watchdog by default, those good HCDs could turn off it later
+ */
+ fusbh200->need_io_watchdog = 1;
+
+ hrtimer_init(&fusbh200->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ fusbh200->hrtimer.function = fusbh200_hrtimer_func;
+ fusbh200->next_hrtimer_event = FUSBH200_HRTIMER_NO_EVENT;
+
+ hcc_params = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
+
+ /*
+ * by default set standard 80% (== 100 usec/uframe) max periodic
+ * bandwidth as required by USB 2.0
+ */
+ fusbh200->uframe_periodic_max = 100;
+
+ /*
+ * hw default: 1K periodic list heads, one per frame.
+ * periodic_size can shrink by USBCMD update if hcc_params allows.
+ */
+ fusbh200->periodic_size = DEFAULT_I_TDPS;
+ INIT_LIST_HEAD(&fusbh200->intr_qh_list);
+ INIT_LIST_HEAD(&fusbh200->cached_itd_list);
+
+ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+ /* periodic schedule size can be smaller than default */
+ switch (FUSBH200_TUNE_FLS) {
+ case 0: fusbh200->periodic_size = 1024; break;
+ case 1: fusbh200->periodic_size = 512; break;
+ case 2: fusbh200->periodic_size = 256; break;
+ default: BUG();
+ }
+ }
+ if ((retval = fusbh200_mem_init(fusbh200, GFP_KERNEL)) < 0)
+ return retval;
+
+ /* controllers may cache some of the periodic schedule ... */
+ fusbh200->i_thresh = 2;
+
+ /*
+ * dedicate a qh for the async ring head, since we couldn't unlink
+ * a 'real' qh without stopping the async schedule [4.8]. use it
+ * as the 'reclamation list head' too.
+ * its dummy is used in hw_alt_next of many tds, to prevent the qh
+ * from automatically advancing to the next td after short reads.
+ */
+ fusbh200->async->qh_next.qh = NULL;
+ hw = fusbh200->async->hw;
+ hw->hw_next = QH_NEXT(fusbh200, fusbh200->async->qh_dma);
+ hw->hw_info1 = cpu_to_hc32(fusbh200, QH_HEAD);
+ hw->hw_token = cpu_to_hc32(fusbh200, QTD_STS_HALT);
+ hw->hw_qtd_next = FUSBH200_LIST_END(fusbh200);
+ fusbh200->async->qh_state = QH_STATE_LINKED;
+ hw->hw_alt_next = QTD_NEXT(fusbh200, fusbh200->async->dummy->qtd_dma);
+
+ /* clear interrupt enables, set irq latency */
+ if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
+ log2_irq_thresh = 0;
+ temp = 1 << (16 + log2_irq_thresh);
+ if (HCC_CANPARK(hcc_params)) {
+ /* HW default park == 3, on hardware that supports it (like
+ * NVidia and ALI silicon), maximizes throughput on the async
+ * schedule by avoiding QH fetches between transfers.
+ *
+ * With fast usb storage devices and NForce2, "park" seems to
+ * make problems: throughput reduction (!), data errors...
+ */
+ if (park) {
+ park = min(park, (unsigned) 3);
+ temp |= CMD_PARK;
+ temp |= park << 8;
+ }
+ fusbh200_dbg(fusbh200, "park %d\n", park);
+ }
+ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+ /* periodic schedule size can be smaller than default */
+ temp &= ~(3 << 2);
+ temp |= (FUSBH200_TUNE_FLS << 2);
+ }
+ fusbh200->command = temp;
+
+ /* Accept arbitrarily long scatter-gather lists */
+ if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+ hcd->self.sg_tablesize = ~0;
+ return 0;
+}
+
+/* start HC running; it's halted, hcd_fusbh200_init() has been run (once) */
+static int fusbh200_run (struct usb_hcd *hcd)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
+ u32 temp;
+ u32 hcc_params;
+
+ hcd->uses_new_polling = 1;
+
+ /* EHCI spec section 4.1 */
+
+ fusbh200_writel(fusbh200, fusbh200->periodic_dma, &fusbh200->regs->frame_list);
+ fusbh200_writel(fusbh200, (u32)fusbh200->async->qh_dma, &fusbh200->regs->async_next);
+
+ /*
+ * hcc_params controls whether fusbh200->regs->segment must (!!!)
+ * be used; it constrains QH/ITD/SITD and QTD locations.
+ * pci_pool consistent memory always uses segment zero.
+ * streaming mappings for I/O buffers, like pci_map_single(),
+ * can return segments above 4GB, if the device allows.
+ *
+ * NOTE: the dma mask is visible through dma_supported(), so
+ * drivers can pass this info along ... like NETIF_F_HIGHDMA,
+ * Scsi_Host.highmem_io, and so forth. It's readonly to all
+ * host side drivers though.
+ */
+ hcc_params = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
+
+ // Philips, Intel, and maybe others need CMD_RUN before the
+ // root hub will detect new devices (why?); NEC doesn't
+ fusbh200->command &= ~(CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
+ fusbh200->command |= CMD_RUN;
+ fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
+ dbg_cmd (fusbh200, "init", fusbh200->command);
+
+ /*
+ * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
+ * are explicitly handed to companion controller(s), so no TT is
+ * involved with the root hub. (Except where one is integrated,
+ * and there's no companion controller unless maybe for USB OTG.)
+ *
+ * Turning on the CF flag will transfer ownership of all ports
+ * from the companions to the EHCI controller. If any of the
+ * companions are in the middle of a port reset at the time, it
+ * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
+ * guarantees that no resets are in progress. After we set CF,
+ * a short delay lets the hardware catch up; new resets shouldn't
+ * be started before the port switching actions could complete.
+ */
+ down_write(&ehci_cf_port_reset_rwsem);
+ fusbh200->rh_state = FUSBH200_RH_RUNNING;
+ fusbh200_readl(fusbh200, &fusbh200->regs->command); /* unblock posted writes */
+ msleep(5);
+ up_write(&ehci_cf_port_reset_rwsem);
+ fusbh200->last_periodic_enable = ktime_get_real();
+
+ temp = HC_VERSION(fusbh200, fusbh200_readl(fusbh200, &fusbh200->caps->hc_capbase));
+ fusbh200_info (fusbh200,
+ "USB %x.%x started, EHCI %x.%02x\n",
+ ((fusbh200->sbrn & 0xf0)>>4), (fusbh200->sbrn & 0x0f),
+ temp >> 8, temp & 0xff);
+
+ fusbh200_writel(fusbh200, INTR_MASK,
+ &fusbh200->regs->intr_enable); /* Turn On Interrupts */
+
+ /* GRR this is run-once init(), being done every time the HC starts.
+ * So long as they're part of class devices, we can't do it init()
+ * since the class device isn't created that early.
+ */
+ create_debug_files(fusbh200);
+ create_sysfs_files(fusbh200);
+
+ return 0;
+}
+
+static int fusbh200_setup(struct usb_hcd *hcd)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
+ int retval;
+
+ fusbh200->regs = (void __iomem *)fusbh200->caps +
+ HC_LENGTH(fusbh200, fusbh200_readl(fusbh200, &fusbh200->caps->hc_capbase));
+ dbg_hcs_params(fusbh200, "reset");
+ dbg_hcc_params(fusbh200, "reset");
+
+ /* cache this readonly data; minimize chip reads */
+ fusbh200->hcs_params = fusbh200_readl(fusbh200, &fusbh200->caps->hcs_params);
+
+ fusbh200->sbrn = HCD_USB2;
+
+ /* data structure init */
+ retval = hcd_fusbh200_init(hcd);
+ if (retval)
+ return retval;
+
+ retval = fusbh200_halt(fusbh200);
+ if (retval)
+ return retval;
+
+ fusbh200_reset(fusbh200);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static irqreturn_t fusbh200_irq (struct usb_hcd *hcd)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
+ u32 status, masked_status, pcd_status = 0, cmd;
+ int bh;
+
+ spin_lock (&fusbh200->lock);
+
+ status = fusbh200_readl(fusbh200, &fusbh200->regs->status);
+
+ /* e.g. cardbus physical eject */
+ if (status == ~(u32) 0) {
+ fusbh200_dbg (fusbh200, "device removed\n");
+ goto dead;
+ }
+
+ /*
+ * We don't use STS_FLR, but some controllers don't like it to
+ * remain on, so mask it out along with the other status bits.
+ */
+ masked_status = status & (INTR_MASK | STS_FLR);
+
+ /* Shared IRQ? */
+ if (!masked_status || unlikely(fusbh200->rh_state == FUSBH200_RH_HALTED)) {
+ spin_unlock(&fusbh200->lock);
+ return IRQ_NONE;
+ }
+
+ /* clear (just) interrupts */
+ fusbh200_writel(fusbh200, masked_status, &fusbh200->regs->status);
+ cmd = fusbh200_readl(fusbh200, &fusbh200->regs->command);
+ bh = 0;
+
+ /* normal [4.15.1.2] or error [4.15.1.1] completion */
+ if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
+ if (likely ((status & STS_ERR) == 0))
+ COUNT (fusbh200->stats.normal);
+ else
+ COUNT (fusbh200->stats.error);
+ bh = 1;
+ }
+
+ /* complete the unlinking of some qh [4.15.2.3] */
+ if (status & STS_IAA) {
+
+ /* Turn off the IAA watchdog */
+ fusbh200->enabled_hrtimer_events &= ~BIT(FUSBH200_HRTIMER_IAA_WATCHDOG);
+
+ /*
+ * Mild optimization: Allow another IAAD to reset the
+ * hrtimer, if one occurs before the next expiration.
+ * In theory we could always cancel the hrtimer, but
+ * tests show that about half the time it will be reset
+ * for some other event anyway.
+ */
+ if (fusbh200->next_hrtimer_event == FUSBH200_HRTIMER_IAA_WATCHDOG)
+ ++fusbh200->next_hrtimer_event;
+
+ /* guard against (alleged) silicon errata */
+ if (cmd & CMD_IAAD)
+ fusbh200_dbg(fusbh200, "IAA with IAAD still set?\n");
+ if (fusbh200->async_iaa) {
+ COUNT(fusbh200->stats.iaa);
+ end_unlink_async(fusbh200);
+ } else
+ fusbh200_dbg(fusbh200, "IAA with nothing unlinked?\n");
+ }
+
+ /* remote wakeup [4.3.1] */
+ if (status & STS_PCD) {
+ int pstatus;
+ u32 __iomem *status_reg = &fusbh200->regs->port_status;
+
+ /* kick root hub later */
+ pcd_status = status;
+
+ /* resume root hub? */
+ if (fusbh200->rh_state == FUSBH200_RH_SUSPENDED)
+ usb_hcd_resume_root_hub(hcd);
+
+ pstatus = fusbh200_readl(fusbh200, status_reg);
+
+ if (test_bit(0, &fusbh200->suspended_ports) &&
+ ((pstatus & PORT_RESUME) ||
+ !(pstatus & PORT_SUSPEND)) &&
+ (pstatus & PORT_PE) &&
+ fusbh200->reset_done[0] == 0) {
+
+ /* start 20 msec resume signaling from this port,
+ * and make khubd collect PORT_STAT_C_SUSPEND to
+ * stop that signaling. Use 5 ms extra for safety,
+ * like usb_port_resume() does.
+ */
+ fusbh200->reset_done[0] = jiffies + msecs_to_jiffies(25);
+ set_bit(0, &fusbh200->resuming_ports);
+ fusbh200_dbg (fusbh200, "port 1 remote wakeup\n");
+ mod_timer(&hcd->rh_timer, fusbh200->reset_done[0]);
+ }
+ }
+
+ /* PCI errors [4.15.2.4] */
+ if (unlikely ((status & STS_FATAL) != 0)) {
+ fusbh200_err(fusbh200, "fatal error\n");
+ dbg_cmd(fusbh200, "fatal", cmd);
+ dbg_status(fusbh200, "fatal", status);
+dead:
+ usb_hc_died(hcd);
+
+ /* Don't let the controller do anything more */
+ fusbh200->shutdown = true;
+ fusbh200->rh_state = FUSBH200_RH_STOPPING;
+ fusbh200->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
+ fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
+ fusbh200_writel(fusbh200, 0, &fusbh200->regs->intr_enable);
+ fusbh200_handle_controller_death(fusbh200);
+
+ /* Handle completions when the controller stops */
+ bh = 0;
+ }
+
+ if (bh)
+ fusbh200_work (fusbh200);
+ spin_unlock (&fusbh200->lock);
+ if (pcd_status)
+ usb_hcd_poll_rh_status(hcd);
+ return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * non-error returns are a promise to giveback() the urb later
+ * we drop ownership so next owner (or urb unlink) can get it
+ *
+ * urb + dev is in hcd.self.controller.urb_list
+ * we're queueing TDs onto software and hardware lists
+ *
+ * hcd-specific init for hcpriv hasn't been done yet
+ *
+ * NOTE: control, bulk, and interrupt share the same code to append TDs
+ * to a (possibly active) QH, and the same QH scanning code.
+ */
+static int fusbh200_urb_enqueue (
+ struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags
+) {
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
+ struct list_head qtd_list;
+
+ INIT_LIST_HEAD (&qtd_list);
+
+ switch (usb_pipetype (urb->pipe)) {
+ case PIPE_CONTROL:
+ /* qh_completions() code doesn't handle all the fault cases
+ * in multi-TD control transfers. Even 1KB is rare anyway.
+ */
+ if (urb->transfer_buffer_length > (16 * 1024))
+ return -EMSGSIZE;
+ /* FALLTHROUGH */
+ /* case PIPE_BULK: */
+ default:
+ if (!qh_urb_transaction (fusbh200, urb, &qtd_list, mem_flags))
+ return -ENOMEM;
+ return submit_async(fusbh200, urb, &qtd_list, mem_flags);
+
+ case PIPE_INTERRUPT:
+ if (!qh_urb_transaction (fusbh200, urb, &qtd_list, mem_flags))
+ return -ENOMEM;
+ return intr_submit(fusbh200, urb, &qtd_list, mem_flags);
+
+ case PIPE_ISOCHRONOUS:
+ return itd_submit (fusbh200, urb, mem_flags);
+ }
+}
+
+/* remove from hardware lists
+ * completions normally happen asynchronously
+ */
+
+static int fusbh200_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
+ struct fusbh200_qh *qh;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc)
+ goto done;
+
+ switch (usb_pipetype (urb->pipe)) {
+ // case PIPE_CONTROL:
+ // case PIPE_BULK:
+ default:
+ qh = (struct fusbh200_qh *) urb->hcpriv;
+ if (!qh)
+ break;
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ case QH_STATE_COMPLETING:
+ start_unlink_async(fusbh200, qh);
+ break;
+ case QH_STATE_UNLINK:
+ case QH_STATE_UNLINK_WAIT:
+ /* already started */
+ break;
+ case QH_STATE_IDLE:
+ /* QH might be waiting for a Clear-TT-Buffer */
+ qh_completions(fusbh200, qh);
+ break;
+ }
+ break;
+
+ case PIPE_INTERRUPT:
+ qh = (struct fusbh200_qh *) urb->hcpriv;
+ if (!qh)
+ break;
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ case QH_STATE_COMPLETING:
+ start_unlink_intr(fusbh200, qh);
+ break;
+ case QH_STATE_IDLE:
+ qh_completions (fusbh200, qh);
+ break;
+ default:
+ fusbh200_dbg (fusbh200, "bogus qh %p state %d\n",
+ qh, qh->qh_state);
+ goto done;
+ }
+ break;
+
+ case PIPE_ISOCHRONOUS:
+ // itd...
+
+ // wait till next completion, do it then.
+ // completion irqs can wait up to 1024 msec,
+ break;
+ }
+done:
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+
+// bulk qh holds the data toggle
+
+static void
+fusbh200_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
+ unsigned long flags;
+ struct fusbh200_qh *qh, *tmp;
+
+ /* ASSERT: any requests/urbs are being unlinked */
+ /* ASSERT: nobody can be submitting urbs for this any more */
+
+rescan:
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ qh = ep->hcpriv;
+ if (!qh)
+ goto done;
+
+ /* endpoints can be iso streams. for now, we don't
+ * accelerate iso completions ... so spin a while.
+ */
+ if (qh->hw == NULL) {
+ struct fusbh200_iso_stream *stream = ep->hcpriv;
+
+ if (!list_empty(&stream->td_list))
+ goto idle_timeout;
+
+ /* BUG_ON(!list_empty(&stream->free_list)); */
+ kfree(stream);
+ goto done;
+ }
+
+ if (fusbh200->rh_state < FUSBH200_RH_RUNNING)
+ qh->qh_state = QH_STATE_IDLE;
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ case QH_STATE_COMPLETING:
+ for (tmp = fusbh200->async->qh_next.qh;
+ tmp && tmp != qh;
+ tmp = tmp->qh_next.qh)
+ continue;
+ /* periodic qh self-unlinks on empty, and a COMPLETING qh
+ * may already be unlinked.
+ */
+ if (tmp)
+ start_unlink_async(fusbh200, qh);
+ /* FALL THROUGH */
+ case QH_STATE_UNLINK: /* wait for hw to finish? */
+ case QH_STATE_UNLINK_WAIT:
+idle_timeout:
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ schedule_timeout_uninterruptible(1);
+ goto rescan;
+ case QH_STATE_IDLE: /* fully unlinked */
+ if (qh->clearing_tt)
+ goto idle_timeout;
+ if (list_empty (&qh->qtd_list)) {
+ qh_destroy(fusbh200, qh);
+ break;
+ }
+ /* else FALL THROUGH */
+ default:
+ /* caller was supposed to have unlinked any requests;
+ * that's not our job. just leak this memory.
+ */
+ fusbh200_err (fusbh200, "qh %p (#%02x) state %d%s\n",
+ qh, ep->desc.bEndpointAddress, qh->qh_state,
+ list_empty (&qh->qtd_list) ? "" : "(has tds)");
+ break;
+ }
+ done:
+ ep->hcpriv = NULL;
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+}
+
+static void
+fusbh200_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
+ struct fusbh200_qh *qh;
+ int eptype = usb_endpoint_type(&ep->desc);
+ int epnum = usb_endpoint_num(&ep->desc);
+ int is_out = usb_endpoint_dir_out(&ep->desc);
+ unsigned long flags;
+
+ if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
+ return;
+
+ spin_lock_irqsave(&fusbh200->lock, flags);
+ qh = ep->hcpriv;
+
+ /* For Bulk and Interrupt endpoints we maintain the toggle state
+ * in the hardware; the toggle bits in udev aren't used at all.
+ * When an endpoint is reset by usb_clear_halt() we must reset
+ * the toggle bit in the QH.
+ */
+ if (qh) {
+ usb_settoggle(qh->dev, epnum, is_out, 0);
+ if (!list_empty(&qh->qtd_list)) {
+ WARN_ONCE(1, "clear_halt for a busy endpoint\n");
+ } else if (qh->qh_state == QH_STATE_LINKED ||
+ qh->qh_state == QH_STATE_COMPLETING) {
+
+ /* The toggle value in the QH can't be updated
+ * while the QH is active. Unlink it now;
+ * re-linking will call qh_refresh().
+ */
+ if (eptype == USB_ENDPOINT_XFER_BULK)
+ start_unlink_async(fusbh200, qh);
+ else
+ start_unlink_intr(fusbh200, qh);
+ }
+ }
+ spin_unlock_irqrestore(&fusbh200->lock, flags);
+}
+
+static int fusbh200_get_frame (struct usb_hcd *hcd)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
+ return (fusbh200_read_frame_index(fusbh200) >> 3) % fusbh200->periodic_size;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * The EHCI in ChipIdea HDRC cannot be a separate module or device,
+ * because its registers (and irq) are shared between host/gadget/otg
+ * functions and in order to facilitate role switching we cannot
+ * give the fusbh200 driver exclusive access to those.
+ */
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR (DRIVER_AUTHOR);
+MODULE_LICENSE ("GPL");
+
+static const struct hc_driver fusbh200_fusbh200_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Faraday USB2.0 Host Controller",
+ .hcd_priv_size = sizeof(struct fusbh200_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = fusbh200_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = hcd_fusbh200_init,
+ .start = fusbh200_run,
+ .stop = fusbh200_stop,
+ .shutdown = fusbh200_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = fusbh200_urb_enqueue,
+ .urb_dequeue = fusbh200_urb_dequeue,
+ .endpoint_disable = fusbh200_endpoint_disable,
+ .endpoint_reset = fusbh200_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = fusbh200_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = fusbh200_hub_status_data,
+ .hub_control = fusbh200_hub_control,
+ .bus_suspend = fusbh200_bus_suspend,
+ .bus_resume = fusbh200_bus_resume,
+
+ .relinquish_port = fusbh200_relinquish_port,
+ .port_handed_over = fusbh200_port_handed_over,
+
+ .clear_tt_buffer_complete = fusbh200_clear_tt_buffer_complete,
+};
+
+static void fusbh200_init(struct fusbh200_hcd *fusbh200)
+{
+ u32 reg;
+
+ reg = fusbh200_readl(fusbh200, &fusbh200->regs->bmcsr);
+ reg |= BMCSR_INT_POLARITY;
+ reg &= ~BMCSR_VBUS_OFF;
+ fusbh200_writel(fusbh200, reg, &fusbh200->regs->bmcsr);
+
+ reg = fusbh200_readl(fusbh200, &fusbh200->regs->bmier);
+ fusbh200_writel(fusbh200, reg | BMIER_OVC_EN | BMIER_VBUS_ERR_EN,
+ &fusbh200->regs->bmier);
+}
+
+/**
+ * fusbh200_hcd_probe - initialize faraday FUSBH200 HCDs
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int fusbh200_hcd_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd;
+ struct resource *res;
+ int irq;
+ int retval = -ENODEV;
+ struct fusbh200_hcd *fusbh200;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ pdev->dev.power.power_state = PMSG_ON;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev,
+ "Found HC with no IRQ. Check %s setup!\n",
+ dev_name(dev));
+ return -ENODEV;
+ }
+
+ irq = res->start;
+
+ hcd = usb_create_hcd(&fusbh200_fusbh200_hc_driver, dev,
+ dev_name(dev));
+ if (!hcd) {
+ dev_err(dev, "failed to create hcd with err %d\n", retval);
+ retval = -ENOMEM;
+ goto fail_create_hcd;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev,
+ "Found HC with no register addr. Check %s setup!\n",
+ dev_name(dev));
+ retval = -ENODEV;
+ goto fail_request_resource;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->has_tt = 1;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
+ fusbh200_fusbh200_hc_driver.description)) {
+ dev_dbg(dev, "controller already in use\n");
+ retval = -EBUSY;
+ goto fail_request_resource;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(dev,
+ "Found HC with no register addr. Check %s setup!\n",
+ dev_name(dev));
+ retval = -ENODEV;
+ goto fail_request_resource;
+ }
+
+ hcd->regs = ioremap_nocache(res->start, resource_size(res));
+ if (hcd->regs == NULL) {
+ dev_dbg(dev, "error mapping memory\n");
+ retval = -EFAULT;
+ goto fail_ioremap;
+ }
+
+ fusbh200 = hcd_to_fusbh200(hcd);
+
+ fusbh200->caps = hcd->regs;
+
+ retval = fusbh200_setup(hcd);
+ if (retval)
+ goto fail_add_hcd;
+
+ fusbh200_init(fusbh200);
+
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (retval) {
+ dev_err(dev, "failed to add hcd with err %d\n", retval);
+ goto fail_add_hcd;
+ }
+ device_wakeup_enable(hcd->self.controller);
+
+ return retval;
+
+fail_add_hcd:
+ iounmap(hcd->regs);
+fail_ioremap:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+fail_request_resource:
+ usb_put_hcd(hcd);
+fail_create_hcd:
+ dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval);
+ return retval;
+}
+
+/**
+ * fusbh200_hcd_remove - shutdown processing for EHCI HCDs
+ * @dev: USB Host Controller being removed
+ *
+ * Reverses the effect of fotg2xx_usb_hcd_probe(), first invoking
+ * the HCD's stop() method. It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ */
+static int fusbh200_hcd_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ if (!hcd)
+ return 0;
+
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static struct platform_driver fusbh200_hcd_fusbh200_driver = {
+ .driver = {
+ .name = "fusbh200",
+ },
+ .probe = fusbh200_hcd_probe,
+ .remove = fusbh200_hcd_remove,
+};
+
+static int __init fusbh200_hcd_init(void)
+{
+ int retval = 0;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
+ set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+ if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
+ test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
+ printk(KERN_WARNING "Warning! fusbh200_hcd should always be loaded"
+ " before uhci_hcd and ohci_hcd, not after\n");
+
+ pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd\n",
+ hcd_name,
+ sizeof(struct fusbh200_qh), sizeof(struct fusbh200_qtd),
+ sizeof(struct fusbh200_itd));
+
+ fusbh200_debug_root = debugfs_create_dir("fusbh200", usb_debug_root);
+ if (!fusbh200_debug_root) {
+ retval = -ENOENT;
+ goto err_debug;
+ }
+
+ retval = platform_driver_register(&fusbh200_hcd_fusbh200_driver);
+ if (retval < 0)
+ goto clean;
+ return retval;
+
+ platform_driver_unregister(&fusbh200_hcd_fusbh200_driver);
+clean:
+ debugfs_remove(fusbh200_debug_root);
+ fusbh200_debug_root = NULL;
+err_debug:
+ clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+ return retval;
+}
+module_init(fusbh200_hcd_init);
+
+static void __exit fusbh200_hcd_cleanup(void)
+{
+ platform_driver_unregister(&fusbh200_hcd_fusbh200_driver);
+ debugfs_remove(fusbh200_debug_root);
+ clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+}
+module_exit(fusbh200_hcd_cleanup);
diff --git a/drivers/usb/host/fusbh200.h b/drivers/usb/host/fusbh200.h
new file mode 100644
index 00000000000..6b719e066c3
--- /dev/null
+++ b/drivers/usb/host/fusbh200.h
@@ -0,0 +1,731 @@
+#ifndef __LINUX_FUSBH200_H
+#define __LINUX_FUSBH200_H
+
+/* definitions used for the EHCI driver */
+
+/*
+ * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
+ * __leXX (normally) or __beXX (given FUSBH200_BIG_ENDIAN_DESC), depending on
+ * the host controller implementation.
+ *
+ * To facilitate the strongest possible byte-order checking from "sparse"
+ * and so on, we use __leXX unless that's not practical.
+ */
+#define __hc32 __le32
+#define __hc16 __le16
+
+/* statistics can be kept for tuning/monitoring */
+struct fusbh200_stats {
+ /* irq usage */
+ unsigned long normal;
+ unsigned long error;
+ unsigned long iaa;
+ unsigned long lost_iaa;
+
+ /* termination of urbs from core */
+ unsigned long complete;
+ unsigned long unlink;
+};
+
+/* fusbh200_hcd->lock guards shared data against other CPUs:
+ * fusbh200_hcd: async, unlink, periodic (and shadow), ...
+ * usb_host_endpoint: hcpriv
+ * fusbh200_qh: qh_next, qtd_list
+ * fusbh200_qtd: qtd_list
+ *
+ * Also, hold this lock when talking to HC registers or
+ * when updating hw_* fields in shared qh/qtd/... structures.
+ */
+
+#define FUSBH200_MAX_ROOT_PORTS 1 /* see HCS_N_PORTS */
+
+/*
+ * fusbh200_rh_state values of FUSBH200_RH_RUNNING or above mean that the
+ * controller may be doing DMA. Lower values mean there's no DMA.
+ */
+enum fusbh200_rh_state {
+ FUSBH200_RH_HALTED,
+ FUSBH200_RH_SUSPENDED,
+ FUSBH200_RH_RUNNING,
+ FUSBH200_RH_STOPPING
+};
+
+/*
+ * Timer events, ordered by increasing delay length.
+ * Always update event_delays_ns[] and event_handlers[] (defined in
+ * ehci-timer.c) in parallel with this list.
+ */
+enum fusbh200_hrtimer_event {
+ FUSBH200_HRTIMER_POLL_ASS, /* Poll for async schedule off */
+ FUSBH200_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */
+ FUSBH200_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */
+ FUSBH200_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */
+ FUSBH200_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */
+ FUSBH200_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */
+ FUSBH200_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */
+ FUSBH200_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */
+ FUSBH200_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */
+ FUSBH200_HRTIMER_IO_WATCHDOG, /* Check for missing IRQs */
+ FUSBH200_HRTIMER_NUM_EVENTS /* Must come last */
+};
+#define FUSBH200_HRTIMER_NO_EVENT 99
+
+struct fusbh200_hcd { /* one per controller */
+ /* timing support */
+ enum fusbh200_hrtimer_event next_hrtimer_event;
+ unsigned enabled_hrtimer_events;
+ ktime_t hr_timeouts[FUSBH200_HRTIMER_NUM_EVENTS];
+ struct hrtimer hrtimer;
+
+ int PSS_poll_count;
+ int ASS_poll_count;
+ int died_poll_count;
+
+ /* glue to PCI and HCD framework */
+ struct fusbh200_caps __iomem *caps;
+ struct fusbh200_regs __iomem *regs;
+ struct fusbh200_dbg_port __iomem *debug;
+
+ __u32 hcs_params; /* cached register copy */
+ spinlock_t lock;
+ enum fusbh200_rh_state rh_state;
+
+ /* general schedule support */
+ bool scanning:1;
+ bool need_rescan:1;
+ bool intr_unlinking:1;
+ bool async_unlinking:1;
+ bool shutdown:1;
+ struct fusbh200_qh *qh_scan_next;
+
+ /* async schedule support */
+ struct fusbh200_qh *async;
+ struct fusbh200_qh *dummy; /* For AMD quirk use */
+ struct fusbh200_qh *async_unlink;
+ struct fusbh200_qh *async_unlink_last;
+ struct fusbh200_qh *async_iaa;
+ unsigned async_unlink_cycle;
+ unsigned async_count; /* async activity count */
+
+ /* periodic schedule support */
+#define DEFAULT_I_TDPS 1024 /* some HCs can do less */
+ unsigned periodic_size;
+ __hc32 *periodic; /* hw periodic table */
+ dma_addr_t periodic_dma;
+ struct list_head intr_qh_list;
+ unsigned i_thresh; /* uframes HC might cache */
+
+ union fusbh200_shadow *pshadow; /* mirror hw periodic table */
+ struct fusbh200_qh *intr_unlink;
+ struct fusbh200_qh *intr_unlink_last;
+ unsigned intr_unlink_cycle;
+ unsigned now_frame; /* frame from HC hardware */
+ unsigned next_frame; /* scan periodic, start here */
+ unsigned intr_count; /* intr activity count */
+ unsigned isoc_count; /* isoc activity count */
+ unsigned periodic_count; /* periodic activity count */
+ unsigned uframe_periodic_max; /* max periodic time per uframe */
+
+
+ /* list of itds completed while now_frame was still active */
+ struct list_head cached_itd_list;
+ struct fusbh200_itd *last_itd_to_free;
+
+ /* per root hub port */
+ unsigned long reset_done [FUSBH200_MAX_ROOT_PORTS];
+
+ /* bit vectors (one bit per port) */
+ unsigned long bus_suspended; /* which ports were
+ already suspended at the start of a bus suspend */
+ unsigned long companion_ports; /* which ports are
+ dedicated to the companion controller */
+ unsigned long owned_ports; /* which ports are
+ owned by the companion during a bus suspend */
+ unsigned long port_c_suspend; /* which ports have
+ the change-suspend feature turned on */
+ unsigned long suspended_ports; /* which ports are
+ suspended */
+ unsigned long resuming_ports; /* which ports have
+ started to resume */
+
+ /* per-HC memory pools (could be per-bus, but ...) */
+ struct dma_pool *qh_pool; /* qh per active urb */
+ struct dma_pool *qtd_pool; /* one or more per qh */
+ struct dma_pool *itd_pool; /* itd per iso urb */
+
+ unsigned random_frame;
+ unsigned long next_statechange;
+ ktime_t last_periodic_enable;
+ u32 command;
+
+ /* SILICON QUIRKS */
+ unsigned need_io_watchdog:1;
+ unsigned fs_i_thresh:1; /* Intel iso scheduling */
+
+ u8 sbrn; /* packed release number */
+
+ /* irq statistics */
+ struct fusbh200_stats stats;
+# define COUNT(x) do { (x)++; } while (0)
+
+ /* debug files */
+ struct dentry *debug_dir;
+};
+
+/* convert between an HCD pointer and the corresponding FUSBH200_HCD */
+static inline struct fusbh200_hcd *hcd_to_fusbh200 (struct usb_hcd *hcd)
+{
+ return (struct fusbh200_hcd *) (hcd->hcd_priv);
+}
+static inline struct usb_hcd *fusbh200_to_hcd (struct fusbh200_hcd *fusbh200)
+{
+ return container_of ((void *) fusbh200, struct usb_hcd, hcd_priv);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
+
+/* Section 2.2 Host Controller Capability Registers */
+struct fusbh200_caps {
+ /* these fields are specified as 8 and 16 bit registers,
+ * but some hosts can't perform 8 or 16 bit PCI accesses.
+ * some hosts treat caplength and hciversion as parts of a 32-bit
+ * register, others treat them as two separate registers, this
+ * affects the memory map for big endian controllers.
+ */
+ u32 hc_capbase;
+#define HC_LENGTH(fusbh200, p) (0x00ff&((p) >> /* bits 7:0 / offset 00h */ \
+ (fusbh200_big_endian_capbase(fusbh200) ? 24 : 0)))
+#define HC_VERSION(fusbh200, p) (0xffff&((p) >> /* bits 31:16 / offset 02h */ \
+ (fusbh200_big_endian_capbase(fusbh200) ? 0 : 16)))
+ u32 hcs_params; /* HCSPARAMS - offset 0x4 */
+#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
+
+ u32 hcc_params; /* HCCPARAMS - offset 0x8 */
+#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
+#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/
+ u8 portroute[8]; /* nibbles for routing - offset 0xC */
+};
+
+
+/* Section 2.3 Host Controller Operational Registers */
+struct fusbh200_regs {
+
+ /* USBCMD: offset 0x00 */
+ u32 command;
+
+/* EHCI 1.1 addendum */
+/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
+#define CMD_PARK (1<<11) /* enable "park" on async qh */
+#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
+#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
+#define CMD_ASE (1<<5) /* async schedule enable */
+#define CMD_PSE (1<<4) /* periodic schedule enable */
+/* 3:2 is periodic frame list size */
+#define CMD_RESET (1<<1) /* reset HC not bus */
+#define CMD_RUN (1<<0) /* start/stop HC */
+
+ /* USBSTS: offset 0x04 */
+ u32 status;
+#define STS_ASS (1<<15) /* Async Schedule Status */
+#define STS_PSS (1<<14) /* Periodic Schedule Status */
+#define STS_RECL (1<<13) /* Reclamation */
+#define STS_HALT (1<<12) /* Not running (any reason) */
+/* some bits reserved */
+ /* these STS_* flags are also intr_enable bits (USBINTR) */
+#define STS_IAA (1<<5) /* Interrupted on async advance */
+#define STS_FATAL (1<<4) /* such as some PCI access errors */
+#define STS_FLR (1<<3) /* frame list rolled over */
+#define STS_PCD (1<<2) /* port change detect */
+#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */
+#define STS_INT (1<<0) /* "normal" completion (short, ...) */
+
+ /* USBINTR: offset 0x08 */
+ u32 intr_enable;
+
+ /* FRINDEX: offset 0x0C */
+ u32 frame_index; /* current microframe number */
+ /* CTRLDSSEGMENT: offset 0x10 */
+ u32 segment; /* address bits 63:32 if needed */
+ /* PERIODICLISTBASE: offset 0x14 */
+ u32 frame_list; /* points to periodic list */
+ /* ASYNCLISTADDR: offset 0x18 */
+ u32 async_next; /* address of next async queue head */
+
+ u32 reserved1;
+ /* PORTSC: offset 0x20 */
+ u32 port_status;
+/* 31:23 reserved */
+#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
+#define PORT_RESET (1<<8) /* reset port */
+#define PORT_SUSPEND (1<<7) /* suspend port */
+#define PORT_RESUME (1<<6) /* resume it */
+#define PORT_PEC (1<<3) /* port enable change */
+#define PORT_PE (1<<2) /* port enable */
+#define PORT_CSC (1<<1) /* connect status change */
+#define PORT_CONNECT (1<<0) /* device connected */
+#define PORT_RWC_BITS (PORT_CSC | PORT_PEC)
+
+ u32 reserved2[3];
+
+ /* BMCSR: offset 0x30 */
+ u32 bmcsr; /* Bus Moniter Control/Status Register */
+#define BMCSR_HOST_SPD_TYP (3<<9)
+#define BMCSR_VBUS_OFF (1<<4)
+#define BMCSR_INT_POLARITY (1<<3)
+
+ /* BMISR: offset 0x34 */
+ u32 bmisr; /* Bus Moniter Interrupt Status Register*/
+#define BMISR_OVC (1<<1)
+
+ /* BMIER: offset 0x38 */
+ u32 bmier; /* Bus Moniter Interrupt Enable Register */
+#define BMIER_OVC_EN (1<<1)
+#define BMIER_VBUS_ERR_EN (1<<0)
+};
+
+/* Appendix C, Debug port ... intended for use with special "debug devices"
+ * that can help if there's no serial console. (nonstandard enumeration.)
+ */
+struct fusbh200_dbg_port {
+ u32 control;
+#define DBGP_OWNER (1<<30)
+#define DBGP_ENABLED (1<<28)
+#define DBGP_DONE (1<<16)
+#define DBGP_INUSE (1<<10)
+#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
+# define DBGP_ERR_BAD 1
+# define DBGP_ERR_SIGNAL 2
+#define DBGP_ERROR (1<<6)
+#define DBGP_GO (1<<5)
+#define DBGP_OUT (1<<4)
+#define DBGP_LEN(x) (((x)>>0)&0x0f)
+ u32 pids;
+#define DBGP_PID_GET(x) (((x)>>16)&0xff)
+#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
+ u32 data03;
+ u32 data47;
+ u32 address;
+#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
+};
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+#include <linux/init.h>
+extern int __init early_dbgp_init(char *s);
+extern struct console early_dbgp_console;
+#endif /* CONFIG_EARLY_PRINTK_DBGP */
+
+struct usb_hcd;
+
+static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd)
+{
+ return 1; /* Shouldn't this be 0? */
+}
+
+static inline int xen_dbgp_external_startup(struct usb_hcd *hcd)
+{
+ return -1;
+}
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+/* Call backs from fusbh200 host driver to fusbh200 debug driver */
+extern int dbgp_external_startup(struct usb_hcd *);
+extern int dbgp_reset_prep(struct usb_hcd *hcd);
+#else
+static inline int dbgp_reset_prep(struct usb_hcd *hcd)
+{
+ return xen_dbgp_reset_prep(hcd);
+}
+static inline int dbgp_external_startup(struct usb_hcd *hcd)
+{
+ return xen_dbgp_external_startup(hcd);
+}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#define QTD_NEXT(fusbh200, dma) cpu_to_hc32(fusbh200, (u32)dma)
+
+/*
+ * EHCI Specification 0.95 Section 3.5
+ * QTD: describe data transfer components (buffer, direction, ...)
+ * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
+ *
+ * These are associated only with "QH" (Queue Head) structures,
+ * used with control, bulk, and interrupt transfers.
+ */
+struct fusbh200_qtd {
+ /* first part defined by EHCI spec */
+ __hc32 hw_next; /* see EHCI 3.5.1 */
+ __hc32 hw_alt_next; /* see EHCI 3.5.2 */
+ __hc32 hw_token; /* see EHCI 3.5.3 */
+#define QTD_TOGGLE (1 << 31) /* data toggle */
+#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff)
+#define QTD_IOC (1 << 15) /* interrupt on complete */
+#define QTD_CERR(tok) (((tok)>>10) & 0x3)
+#define QTD_PID(tok) (((tok)>>8) & 0x3)
+#define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */
+#define QTD_STS_HALT (1 << 6) /* halted on error */
+#define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */
+#define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */
+#define QTD_STS_XACT (1 << 3) /* device gave illegal response */
+#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */
+#define QTD_STS_STS (1 << 1) /* split transaction state */
+#define QTD_STS_PING (1 << 0) /* issue PING? */
+
+#define ACTIVE_BIT(fusbh200) cpu_to_hc32(fusbh200, QTD_STS_ACTIVE)
+#define HALT_BIT(fusbh200) cpu_to_hc32(fusbh200, QTD_STS_HALT)
+#define STATUS_BIT(fusbh200) cpu_to_hc32(fusbh200, QTD_STS_STS)
+
+ __hc32 hw_buf [5]; /* see EHCI 3.5.4 */
+ __hc32 hw_buf_hi [5]; /* Appendix B */
+
+ /* the rest is HCD-private */
+ dma_addr_t qtd_dma; /* qtd address */
+ struct list_head qtd_list; /* sw qtd list */
+ struct urb *urb; /* qtd's urb */
+ size_t length; /* length of buffer */
+} __attribute__ ((aligned (32)));
+
+/* mask NakCnt+T in qh->hw_alt_next */
+#define QTD_MASK(fusbh200) cpu_to_hc32 (fusbh200, ~0x1f)
+
+#define IS_SHORT_READ(token) (QTD_LENGTH (token) != 0 && QTD_PID (token) == 1)
+
+/*-------------------------------------------------------------------------*/
+
+/* type tag from {qh,itd,fstn}->hw_next */
+#define Q_NEXT_TYPE(fusbh200,dma) ((dma) & cpu_to_hc32(fusbh200, 3 << 1))
+
+/*
+ * Now the following defines are not converted using the
+ * cpu_to_le32() macro anymore, since we have to support
+ * "dynamic" switching between be and le support, so that the driver
+ * can be used on one system with SoC EHCI controller using big-endian
+ * descriptors as well as a normal little-endian PCI EHCI controller.
+ */
+/* values for that type tag */
+#define Q_TYPE_ITD (0 << 1)
+#define Q_TYPE_QH (1 << 1)
+#define Q_TYPE_SITD (2 << 1)
+#define Q_TYPE_FSTN (3 << 1)
+
+/* next async queue entry, or pointer to interrupt/periodic QH */
+#define QH_NEXT(fusbh200,dma) (cpu_to_hc32(fusbh200, (((u32)dma)&~0x01f)|Q_TYPE_QH))
+
+/* for periodic/async schedules and qtd lists, mark end of list */
+#define FUSBH200_LIST_END(fusbh200) cpu_to_hc32(fusbh200, 1) /* "null pointer" to hw */
+
+/*
+ * Entries in periodic shadow table are pointers to one of four kinds
+ * of data structure. That's dictated by the hardware; a type tag is
+ * encoded in the low bits of the hardware's periodic schedule. Use
+ * Q_NEXT_TYPE to get the tag.
+ *
+ * For entries in the async schedule, the type tag always says "qh".
+ */
+union fusbh200_shadow {
+ struct fusbh200_qh *qh; /* Q_TYPE_QH */
+ struct fusbh200_itd *itd; /* Q_TYPE_ITD */
+ struct fusbh200_fstn *fstn; /* Q_TYPE_FSTN */
+ __hc32 *hw_next; /* (all types) */
+ void *ptr;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.95 Section 3.6
+ * QH: describes control/bulk/interrupt endpoints
+ * See Fig 3-7 "Queue Head Structure Layout".
+ *
+ * These appear in both the async and (for interrupt) periodic schedules.
+ */
+
+/* first part defined by EHCI spec */
+struct fusbh200_qh_hw {
+ __hc32 hw_next; /* see EHCI 3.6.1 */
+ __hc32 hw_info1; /* see EHCI 3.6.2 */
+#define QH_CONTROL_EP (1 << 27) /* FS/LS control endpoint */
+#define QH_HEAD (1 << 15) /* Head of async reclamation list */
+#define QH_TOGGLE_CTL (1 << 14) /* Data toggle control */
+#define QH_HIGH_SPEED (2 << 12) /* Endpoint speed */
+#define QH_LOW_SPEED (1 << 12)
+#define QH_FULL_SPEED (0 << 12)
+#define QH_INACTIVATE (1 << 7) /* Inactivate on next transaction */
+ __hc32 hw_info2; /* see EHCI 3.6.2 */
+#define QH_SMASK 0x000000ff
+#define QH_CMASK 0x0000ff00
+#define QH_HUBADDR 0x007f0000
+#define QH_HUBPORT 0x3f800000
+#define QH_MULT 0xc0000000
+ __hc32 hw_current; /* qtd list - see EHCI 3.6.4 */
+
+ /* qtd overlay (hardware parts of a struct fusbh200_qtd) */
+ __hc32 hw_qtd_next;
+ __hc32 hw_alt_next;
+ __hc32 hw_token;
+ __hc32 hw_buf [5];
+ __hc32 hw_buf_hi [5];
+} __attribute__ ((aligned(32)));
+
+struct fusbh200_qh {
+ struct fusbh200_qh_hw *hw; /* Must come first */
+ /* the rest is HCD-private */
+ dma_addr_t qh_dma; /* address of qh */
+ union fusbh200_shadow qh_next; /* ptr to qh; or periodic */
+ struct list_head qtd_list; /* sw qtd list */
+ struct list_head intr_node; /* list of intr QHs */
+ struct fusbh200_qtd *dummy;
+ struct fusbh200_qh *unlink_next; /* next on unlink list */
+
+ unsigned unlink_cycle;
+
+ u8 needs_rescan; /* Dequeue during giveback */
+ u8 qh_state;
+#define QH_STATE_LINKED 1 /* HC sees this */
+#define QH_STATE_UNLINK 2 /* HC may still see this */
+#define QH_STATE_IDLE 3 /* HC doesn't see this */
+#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on unlink q */
+#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
+
+ u8 xacterrs; /* XactErr retry counter */
+#define QH_XACTERR_MAX 32 /* XactErr retry limit */
+
+ /* periodic schedule info */
+ u8 usecs; /* intr bandwidth */
+ u8 gap_uf; /* uframes split/csplit gap */
+ u8 c_usecs; /* ... split completion bw */
+ u16 tt_usecs; /* tt downstream bandwidth */
+ unsigned short period; /* polling interval */
+ unsigned short start; /* where polling starts */
+#define NO_FRAME ((unsigned short)~0) /* pick new start */
+
+ struct usb_device *dev; /* access to TT */
+ unsigned is_out:1; /* bulk or intr OUT */
+ unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* description of one iso transaction (up to 3 KB data if highspeed) */
+struct fusbh200_iso_packet {
+ /* These will be copied to iTD when scheduling */
+ u64 bufp; /* itd->hw_bufp{,_hi}[pg] |= */
+ __hc32 transaction; /* itd->hw_transaction[i] |= */
+ u8 cross; /* buf crosses pages */
+ /* for full speed OUT splits */
+ u32 buf1;
+};
+
+/* temporary schedule data for packets from iso urbs (both speeds)
+ * each packet is one logical usb transaction to the device (not TT),
+ * beginning at stream->next_uframe
+ */
+struct fusbh200_iso_sched {
+ struct list_head td_list;
+ unsigned span;
+ struct fusbh200_iso_packet packet [0];
+};
+
+/*
+ * fusbh200_iso_stream - groups all (s)itds for this endpoint.
+ * acts like a qh would, if EHCI had them for ISO.
+ */
+struct fusbh200_iso_stream {
+ /* first field matches fusbh200_hq, but is NULL */
+ struct fusbh200_qh_hw *hw;
+
+ u8 bEndpointAddress;
+ u8 highspeed;
+ struct list_head td_list; /* queued itds */
+ struct list_head free_list; /* list of unused itds */
+ struct usb_device *udev;
+ struct usb_host_endpoint *ep;
+
+ /* output of (re)scheduling */
+ int next_uframe;
+ __hc32 splits;
+
+ /* the rest is derived from the endpoint descriptor,
+ * trusting urb->interval == f(epdesc->bInterval) and
+ * including the extra info for hw_bufp[0..2]
+ */
+ u8 usecs, c_usecs;
+ u16 interval;
+ u16 tt_usecs;
+ u16 maxp;
+ u16 raw_mask;
+ unsigned bandwidth;
+
+ /* This is used to initialize iTD's hw_bufp fields */
+ __hc32 buf0;
+ __hc32 buf1;
+ __hc32 buf2;
+
+ /* this is used to initialize sITD's tt info */
+ __hc32 address;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.95 Section 3.3
+ * Fig 3-4 "Isochronous Transaction Descriptor (iTD)"
+ *
+ * Schedule records for high speed iso xfers
+ */
+struct fusbh200_itd {
+ /* first part defined by EHCI spec */
+ __hc32 hw_next; /* see EHCI 3.3.1 */
+ __hc32 hw_transaction [8]; /* see EHCI 3.3.2 */
+#define FUSBH200_ISOC_ACTIVE (1<<31) /* activate transfer this slot */
+#define FUSBH200_ISOC_BUF_ERR (1<<30) /* Data buffer error */
+#define FUSBH200_ISOC_BABBLE (1<<29) /* babble detected */
+#define FUSBH200_ISOC_XACTERR (1<<28) /* XactErr - transaction error */
+#define FUSBH200_ITD_LENGTH(tok) (((tok)>>16) & 0x0fff)
+#define FUSBH200_ITD_IOC (1 << 15) /* interrupt on complete */
+
+#define ITD_ACTIVE(fusbh200) cpu_to_hc32(fusbh200, FUSBH200_ISOC_ACTIVE)
+
+ __hc32 hw_bufp [7]; /* see EHCI 3.3.3 */
+ __hc32 hw_bufp_hi [7]; /* Appendix B */
+
+ /* the rest is HCD-private */
+ dma_addr_t itd_dma; /* for this itd */
+ union fusbh200_shadow itd_next; /* ptr to periodic q entry */
+
+ struct urb *urb;
+ struct fusbh200_iso_stream *stream; /* endpoint's queue */
+ struct list_head itd_list; /* list of stream's itds */
+
+ /* any/all hw_transactions here may be used by that urb */
+ unsigned frame; /* where scheduled */
+ unsigned pg;
+ unsigned index[8]; /* in urb->iso_frame_desc */
+} __attribute__ ((aligned (32)));
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.96 Section 3.7
+ * Periodic Frame Span Traversal Node (FSTN)
+ *
+ * Manages split interrupt transactions (using TT) that span frame boundaries
+ * into uframes 0/1; see 4.12.2.2. In those uframes, a "save place" FSTN
+ * makes the HC jump (back) to a QH to scan for fs/ls QH completions until
+ * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work.
+ */
+struct fusbh200_fstn {
+ __hc32 hw_next; /* any periodic q entry */
+ __hc32 hw_prev; /* qh or FUSBH200_LIST_END */
+
+ /* the rest is HCD-private */
+ dma_addr_t fstn_dma;
+ union fusbh200_shadow fstn_next; /* ptr to periodic q entry */
+} __attribute__ ((aligned (32)));
+
+/*-------------------------------------------------------------------------*/
+
+/* Prepare the PORTSC wakeup flags during controller suspend/resume */
+
+#define fusbh200_prepare_ports_for_controller_suspend(fusbh200, do_wakeup) \
+ fusbh200_adjust_port_wakeup_flags(fusbh200, true, do_wakeup);
+
+#define fusbh200_prepare_ports_for_controller_resume(fusbh200) \
+ fusbh200_adjust_port_wakeup_flags(fusbh200, false, false);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Some EHCI controllers have a Transaction Translator built into the
+ * root hub. This is a non-standard feature. Each controller will need
+ * to add code to the following inline functions, and call them as
+ * needed (mostly in root hub code).
+ */
+
+static inline unsigned int
+fusbh200_get_speed(struct fusbh200_hcd *fusbh200, unsigned int portsc)
+{
+ return (readl(&fusbh200->regs->bmcsr)
+ & BMCSR_HOST_SPD_TYP) >> 9;
+}
+
+/* Returns the speed of a device attached to a port on the root hub. */
+static inline unsigned int
+fusbh200_port_speed(struct fusbh200_hcd *fusbh200, unsigned int portsc)
+{
+ switch (fusbh200_get_speed(fusbh200, portsc)) {
+ case 0:
+ return 0;
+ case 1:
+ return USB_PORT_STAT_LOW_SPEED;
+ case 2:
+ default:
+ return USB_PORT_STAT_HIGH_SPEED;
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define fusbh200_has_fsl_portno_bug(e) (0)
+
+/*
+ * While most USB host controllers implement their registers in
+ * little-endian format, a minority (celleb companion chip) implement
+ * them in big endian format.
+ *
+ * This attempts to support either format at compile time without a
+ * runtime penalty, or both formats with the additional overhead
+ * of checking a flag bit.
+ *
+ */
+
+#define fusbh200_big_endian_mmio(e) 0
+#define fusbh200_big_endian_capbase(e) 0
+
+static inline unsigned int fusbh200_readl(const struct fusbh200_hcd *fusbh200,
+ __u32 __iomem * regs)
+{
+ return readl(regs);
+}
+
+static inline void fusbh200_writel(const struct fusbh200_hcd *fusbh200,
+ const unsigned int val, __u32 __iomem *regs)
+{
+ writel(val, regs);
+}
+
+/* cpu to fusbh200 */
+static inline __hc32 cpu_to_hc32 (const struct fusbh200_hcd *fusbh200, const u32 x)
+{
+ return cpu_to_le32(x);
+}
+
+/* fusbh200 to cpu */
+static inline u32 hc32_to_cpu (const struct fusbh200_hcd *fusbh200, const __hc32 x)
+{
+ return le32_to_cpu(x);
+}
+
+static inline u32 hc32_to_cpup (const struct fusbh200_hcd *fusbh200, const __hc32 *x)
+{
+ return le32_to_cpup(x);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline unsigned fusbh200_read_frame_index(struct fusbh200_hcd *fusbh200)
+{
+ return fusbh200_readl(fusbh200, &fusbh200->regs->frame_index);
+}
+
+#define fusbh200_itdlen(urb, desc, t) ({ \
+ usb_pipein((urb)->pipe) ? \
+ (desc)->length - FUSBH200_ITD_LENGTH(t) : \
+ FUSBH200_ITD_LENGTH(t); \
+})
+/*-------------------------------------------------------------------------*/
+
+#endif /* __LINUX_FUSBH200_H */
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index 9bfac657572..d0d8fadf706 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -54,7 +54,6 @@
* DWA).
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/workqueue.h>
@@ -86,7 +85,7 @@ static int __hwahc_set_cluster_id(struct hwahc *hwahc, u8 cluster_id)
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
cluster_id,
wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(dev, "Cannot set WUSB Cluster ID to 0x%02x: %d\n",
cluster_id, result);
@@ -106,7 +105,7 @@ static int __hwahc_op_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
interval << 8 | slots,
wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
}
/*
@@ -161,6 +160,13 @@ static int hwahc_op_start(struct usb_hcd *usb_hcd)
usb_hcd->uses_new_polling = 1;
set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags);
usb_hcd->state = HC_STATE_RUNNING;
+
+ /*
+ * prevent USB core from suspending the root hub since
+ * bus_suspend and bus_resume are not yet supported.
+ */
+ pm_runtime_get_noresume(&usb_hcd->self.root_hub->dev);
+
result = 0;
out:
mutex_unlock(&wusbhc->mutex);
@@ -192,10 +198,14 @@ static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
- dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
- usb_hcd, hwahc);
- return -ENOSYS;
+ /*
+ * We cannot query the HWA for the WUSB time since that requires sending
+ * a synchronous URB and this function can be called in_interrupt.
+ * Instead, query the USB frame number for our parent and use that.
+ */
+ return usb_get_current_frame_number(wa->usb_dev);
}
static int hwahc_op_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
@@ -213,7 +223,7 @@ static int hwahc_op_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb,
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- return wa_urb_dequeue(&hwahc->wa, urb);
+ return wa_urb_dequeue(&hwahc->wa, urb, status);
}
/*
@@ -251,8 +261,44 @@ static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc)
dev_err(dev, "cannot listen to notifications: %d\n", result);
goto error_stop;
}
+ /*
+ * If WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS is set,
+ * disable transfer notifications.
+ */
+ if (hwahc->wa.quirks &
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS) {
+ struct usb_host_interface *cur_altsetting =
+ hwahc->wa.usb_iface->cur_altsetting;
+
+ result = usb_control_msg(hwahc->wa.usb_dev,
+ usb_sndctrlpipe(hwahc->wa.usb_dev, 0),
+ WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
+ WA_REQ_ALEREON_FEATURE_SET,
+ cur_altsetting->desc.bInterfaceNumber,
+ NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ /*
+ * If we successfully sent the control message, start DTI here
+ * because no transfer notifications will be received which is
+ * where DTI is normally started.
+ */
+ if (result == 0)
+ result = wa_dti_start(&hwahc->wa);
+ else
+ result = 0; /* OK. Continue normally. */
+
+ if (result < 0) {
+ dev_err(dev, "cannot start DTI: %d\n", result);
+ goto error_dti_start;
+ }
+ }
+
return result;
+error_dti_start:
+ wa_nep_disarm(&hwahc->wa);
error_stop:
__wa_clear_feature(&hwahc->wa, WA_ENABLE);
return result;
@@ -270,7 +316,7 @@ static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc, int delay)
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
delay * 1000,
iface_no,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
if (ret == 0)
msleep(delay);
@@ -299,7 +345,7 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
stream_index,
wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Cannot set WUSB stream index: %d\n", result);
goto out;
@@ -310,7 +356,7 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
WUSB_REQ_SET_WUSB_MAS,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- mas_le, 32, 1000 /* FIXME: arbitrary */);
+ mas_le, 32, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
out:
@@ -344,7 +390,7 @@ static int __hwahc_op_mmcie_add(struct wusbhc *wusbhc, u8 interval,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
interval << 8 | repeat_cnt,
handle << 8 | iface_no,
- wuie, wuie->bLength, 1000 /* FIXME: arbitrary */);
+ wuie, wuie->bLength, USB_CTRL_SET_TIMEOUT);
}
/*
@@ -361,7 +407,7 @@ static int __hwahc_op_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
WUSB_REQ_REMOVE_MMC_IE,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, handle << 8 | iface_no,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
}
/*
@@ -404,7 +450,7 @@ static int __hwahc_op_dev_info_set(struct wusbhc *wusbhc,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, wusb_dev->port_idx << 8 | iface_no,
dev_info, sizeof(struct hwa_dev_info),
- 1000 /* FIXME: arbitrary */);
+ USB_CTRL_SET_TIMEOUT);
kfree(dev_info);
return ret;
}
@@ -444,7 +490,7 @@ static int __hwahc_dev_set_key(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
USB_DT_KEY << 8 | key_idx,
port_idx << 8 | iface_no,
- keyd, keyd_len, 1000 /* FIXME: arbitrary */);
+ keyd, keyd_len, USB_CTRL_SET_TIMEOUT);
kzfree(keyd); /* clear keys etc. */
return result;
@@ -481,12 +527,12 @@ static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
encryption_value = 0;
}
- /* Set the encryption type for commmunicating with the device */
+ /* Set the encryption type for communicating with the device */
result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_SET_ENCRYPTION,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
encryption_value, port_idx << 8 | iface_no,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(wusbhc->dev, "Can't set host's WUSB encryption for "
"port index %u to %s (value %d): %d\n", port_idx,
@@ -559,14 +605,10 @@ found:
goto error;
}
wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr;
- /* Make LE fields CPU order */
- wa_descr->bcdWAVersion = le16_to_cpu(wa_descr->bcdWAVersion);
- wa_descr->wNumRPipes = le16_to_cpu(wa_descr->wNumRPipes);
- wa_descr->wRPipeMaxBlock = le16_to_cpu(wa_descr->wRPipeMaxBlock);
- if (wa_descr->bcdWAVersion > 0x0100)
+ if (le16_to_cpu(wa_descr->bcdWAVersion) > 0x0100)
dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n",
- wa_descr->bcdWAVersion & 0xff00 >> 8,
- wa_descr->bcdWAVersion & 0x00ff);
+ le16_to_cpu(wa_descr->bcdWAVersion) & 0xff00 >> 8,
+ le16_to_cpu(wa_descr->bcdWAVersion) & 0x00ff);
result = 0;
error:
return result;
@@ -577,7 +619,7 @@ static struct hc_driver hwahc_hc_driver = {
.product_desc = "Wireless USB HWA host controller",
.hcd_priv_size = sizeof(struct hwahc) - sizeof(struct usb_hcd),
.irq = NULL, /* FIXME */
- .flags = HCD_USB2, /* FIXME */
+ .flags = HCD_USB25,
.reset = hwahc_op_reset,
.start = hwahc_op_start,
.stop = hwahc_op_stop,
@@ -588,8 +630,6 @@ static struct hc_driver hwahc_hc_driver = {
.hub_status_data = wusbhc_rh_status_data,
.hub_control = wusbhc_rh_control,
- .bus_suspend = wusbhc_rh_suspend,
- .bus_resume = wusbhc_rh_resume,
.start_port_reset = wusbhc_rh_start_port_reset,
};
@@ -674,7 +714,8 @@ static void hwahc_security_release(struct hwahc *hwahc)
/* nothing to do here so far... */
}
-static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface)
+static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface,
+ kernel_ulong_t quirks)
{
int result;
struct device *dev = &iface->dev;
@@ -685,12 +726,9 @@ static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface)
wa->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */
wa->usb_iface = usb_get_intf(iface);
wusbhc->dev = dev;
- wusbhc->uwb_rc = uwb_rc_get_by_grandpa(iface->dev.parent);
- if (wusbhc->uwb_rc == NULL) {
- result = -ENODEV;
- dev_err(dev, "Cannot get associated UWB Host Controller\n");
- goto error_rc_get;
- }
+ /* defer getting the uwb_rc handle until it is needed since it
+ * may not have been registered by the hwa_rc driver yet. */
+ wusbhc->uwb_rc = NULL;
result = wa_fill_descr(wa); /* Get the device descriptor */
if (result < 0)
goto error_fill_descriptor;
@@ -722,7 +760,7 @@ static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface)
dev_err(dev, "Can't create WUSB HC structures: %d\n", result);
goto error_wusbhc_create;
}
- result = wa_create(&hwahc->wa, iface);
+ result = wa_create(&hwahc->wa, iface, quirks);
if (result < 0)
goto error_wa_create;
return 0;
@@ -733,8 +771,6 @@ error_wusbhc_create:
/* WA Descr fill allocs no resources */
error_security_create:
error_fill_descriptor:
- uwb_rc_put(wusbhc->uwb_rc);
-error_rc_get:
usb_put_intf(iface);
usb_put_dev(usb_dev);
return result;
@@ -776,11 +812,11 @@ static int hwahc_probe(struct usb_interface *usb_iface,
goto error_alloc;
}
usb_hcd->wireless = 1;
- set_bit(HCD_FLAG_SAW_IRQ, &usb_hcd->flags);
+ usb_hcd->self.sg_tablesize = ~0;
wusbhc = usb_hcd_to_wusbhc(usb_hcd);
hwahc = container_of(wusbhc, struct hwahc, wusbhc);
hwahc_init(hwahc);
- result = hwahc_create(hwahc, usb_iface);
+ result = hwahc_create(hwahc, usb_iface, id->driver_info);
if (result < 0) {
dev_err(dev, "Cannot initialize internals: %d\n", result);
goto error_hwahc_create;
@@ -790,6 +826,7 @@ static int hwahc_probe(struct usb_interface *usb_iface,
dev_err(dev, "Cannot add HCD: %d\n", result);
goto error_add_hcd;
}
+ device_wakeup_enable(usb_hcd->self.controller);
result = wusbhc_b_create(&hwahc->wusbhc);
if (result < 0) {
dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result);
@@ -824,6 +861,14 @@ static void hwahc_disconnect(struct usb_interface *usb_iface)
}
static struct usb_device_id hwahc_id_table[] = {
+ /* Alereon 5310 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5310, 0xe0, 0x02, 0x01),
+ .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC |
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS },
+ /* Alereon 5611 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5611, 0xe0, 0x02, 0x01),
+ .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC |
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS },
/* FIXME: use class labels for this */
{ USB_INTERFACE_INFO(0xe0, 0x02, 0x01), },
{},
@@ -837,18 +882,7 @@ static struct usb_driver hwahc_driver = {
.id_table = hwahc_id_table,
};
-static int __init hwahc_driver_init(void)
-{
- return usb_register(&hwahc_driver);
-}
-module_init(hwahc_driver_init);
-
-static void __exit hwahc_driver_exit(void)
-{
- usb_deregister(&hwahc_driver);
-}
-module_exit(hwahc_driver_exit);
-
+module_usb_driver(hwahc_driver);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Host Wired Adapter USB Host Control Driver");
diff --git a/drivers/usb/host/imx21-dbg.c b/drivers/usb/host/imx21-dbg.c
index 512f647448c..4f320d050da 100644
--- a/drivers/usb/host/imx21-dbg.c
+++ b/drivers/usb/host/imx21-dbg.c
@@ -18,6 +18,10 @@
/* this file is part of imx21-hcd.c */
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define DEBUG
+#endif
+
#ifndef DEBUG
static inline void create_debug_files(struct imx21 *imx21) { }
@@ -239,7 +243,7 @@ static int debug_status_show(struct seq_file *s, void *v)
"ETDs allocated: %d/%d (max=%d)\n"
"ETDs in use sw: %d\n"
"ETDs in use hw: %d\n"
- "DMEM alocated: %d/%d (max=%d)\n"
+ "DMEM allocated: %d/%d (max=%d)\n"
"DMEM blocks: %d\n"
"Queued waiting for ETD: %d\n"
"Queued waiting for DMEM: %d\n",
@@ -384,7 +388,7 @@ static void debug_isoc_show_one(struct seq_file *s,
seq_printf(s, "%s %d:\n"
"cc=0X%02X\n"
"scheduled frame %d (%d)\n"
- "submittted frame %d (%d)\n"
+ "submitted frame %d (%d)\n"
"completed frame %d (%d)\n"
"requested length=%d\n"
"completed length=%d\n\n",
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index e49b75a7800..207bad99301 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -58,9 +58,14 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/dma-mapping.h>
+#include <linux/module.h>
#include "imx21-hcd.h"
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define DEBUG
+#endif
+
#ifdef DEBUG
#define DEBUG_LOG_FRAME(imx21, etd, event) \
(etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
@@ -473,7 +478,7 @@ static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
/* End handling */
/* =========================================== */
-/* Endpoint now idle - release it's ETD(s) or asssign to queued request */
+/* Endpoint now idle - release its ETD(s) or assign to queued request */
static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
{
int i;
@@ -808,26 +813,36 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
/* calculate frame */
cur_frame = imx21_hc_get_frame(hcd);
- if (urb->transfer_flags & URB_ISO_ASAP) {
- if (list_empty(&ep_priv->td_list))
- urb->start_frame = cur_frame + 5;
- else
- urb->start_frame = list_entry(
- ep_priv->td_list.prev,
- struct td, list)->frame + urb->interval;
- }
- urb->start_frame = wrap_frame(urb->start_frame);
- if (frame_after(cur_frame, urb->start_frame)) {
- dev_dbg(imx21->dev,
- "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
- urb->start_frame, cur_frame,
- (urb->transfer_flags & URB_ISO_ASAP) != 0);
- urb->start_frame = wrap_frame(cur_frame + 1);
+ i = 0;
+ if (list_empty(&ep_priv->td_list)) {
+ urb->start_frame = wrap_frame(cur_frame + 5);
+ } else {
+ urb->start_frame = wrap_frame(list_entry(ep_priv->td_list.prev,
+ struct td, list)->frame + urb->interval);
+
+ if (frame_after(cur_frame, urb->start_frame)) {
+ dev_dbg(imx21->dev,
+ "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
+ urb->start_frame, cur_frame,
+ (urb->transfer_flags & URB_ISO_ASAP) != 0);
+ i = DIV_ROUND_UP(wrap_frame(
+ cur_frame - urb->start_frame),
+ urb->interval);
+
+ /* Treat underruns as if URB_ISO_ASAP was set */
+ if ((urb->transfer_flags & URB_ISO_ASAP) ||
+ i >= urb->number_of_packets) {
+ urb->start_frame = wrap_frame(urb->start_frame
+ + i * urb->interval);
+ i = 0;
+ }
+ }
}
/* set up transfers */
+ urb_priv->isoc_remaining = urb->number_of_packets - i;
td = urb_priv->isoc_td;
- for (i = 0; i < urb->number_of_packets; i++, td++) {
+ for (; i < urb->number_of_packets; i++, td++) {
unsigned int offset = urb->iso_frame_desc[i].offset;
td->ep = ep;
td->urb = urb;
@@ -839,7 +854,6 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
list_add_tail(&td->list, &ep_priv->td_list);
}
- urb_priv->isoc_remaining = urb->number_of_packets;
dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
urb->number_of_packets, urb->start_frame, td->frame);
@@ -927,7 +941,8 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
if (state == US_CTRL_SETUP) {
dir = TD_DIR_SETUP;
if (unsuitable_for_dma(urb->setup_dma))
- unmap_urb_setup_for_dma(imx21->hcd, urb);
+ usb_hcd_unmap_urb_setup_for_dma(imx21->hcd,
+ urb);
etd->dma_handle = urb->setup_dma;
etd->cpu_buffer = urb->setup_packet;
bufround = 0;
@@ -943,7 +958,7 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
bufround = (dir == TD_DIR_IN) ? 1 : 0;
if (unsuitable_for_dma(urb->transfer_dma))
- unmap_urb_for_dma(imx21->hcd, urb);
+ usb_hcd_unmap_urb_for_dma(imx21->hcd, urb);
etd->dma_handle = urb->transfer_dma;
etd->cpu_buffer = urb->transfer_buffer;
@@ -1322,7 +1337,7 @@ static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
* (and hence no interrupt occurs).
* This causes the transfer in question to hang.
* The kludge below checks for this condition at each SOF and processes any
- * blocked ETDs (after an arbitary 10 frame wait)
+ * blocked ETDs (after an arbitrary 10 frame wait)
*
* With a single active transfer the usbtest test suite will run for days
* without the kludge.
@@ -1471,8 +1486,8 @@ static int get_hub_descriptor(struct usb_hcd *hcd,
0x0010 | /* No over current protection */
0);
- desc->bitmap[0] = 1 << 1;
- desc->bitmap[1] = ~0;
+ desc->u.hs.DeviceRemovable[0] = 1 << 1;
+ desc->u.hs.DeviceRemovable[1] = ~0;
return 0;
}
@@ -1658,7 +1673,7 @@ static int imx21_hc_reset(struct usb_hcd *hcd)
spin_lock_irqsave(&imx21->lock, flags);
- /* Reset the Host controler modules */
+ /* Reset the Host controller modules */
writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
imx21->regs + USBOTG_RST_CTRL);
@@ -1679,7 +1694,7 @@ static int imx21_hc_reset(struct usb_hcd *hcd)
return 0;
}
-static int __devinit imx21_hc_start(struct usb_hcd *hcd)
+static int imx21_hc_start(struct usb_hcd *hcd)
{
struct imx21 *imx21 = hcd_to_imx21(hcd);
unsigned long flags;
@@ -1810,7 +1825,7 @@ static int imx21_remove(struct platform_device *pdev)
usb_remove_hcd(hcd);
if (res != NULL) {
- clk_disable(imx21->clk);
+ clk_disable_unprepare(imx21->clk);
clk_put(imx21->clk);
iounmap(imx21->regs);
release_mem_region(res->start, resource_size(res));
@@ -1849,7 +1864,7 @@ static int imx21_probe(struct platform_device *pdev)
imx21 = hcd_to_imx21(hcd);
imx21->hcd = hcd;
imx21->dev = &pdev->dev;
- imx21->pdata = pdev->dev.platform_data;
+ imx21->pdata = dev_get_platdata(&pdev->dev);
if (!imx21->pdata)
imx21->pdata = &default_pdata;
@@ -1883,23 +1898,24 @@ static int imx21_probe(struct platform_device *pdev)
ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000));
if (ret)
goto failed_clock_set;
- ret = clk_enable(imx21->clk);
+ ret = clk_prepare_enable(imx21->clk);
if (ret)
goto failed_clock_enable;
dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
(readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ ret = usb_add_hcd(hcd, irq, 0);
if (ret != 0) {
dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
goto failed_add_hcd;
}
+ device_wakeup_enable(hcd->self.controller);
return 0;
failed_add_hcd:
- clk_disable(imx21->clk);
+ clk_disable_unprepare(imx21->clk);
failed_clock_enable:
failed_clock_set:
clk_put(imx21->clk);
@@ -1915,7 +1931,7 @@ failed_request_mem:
static struct platform_driver imx21_hcd_driver = {
.driver = {
- .name = (char *)hcd_name,
+ .name = hcd_name,
},
.probe = imx21_probe,
.remove = imx21_remove,
@@ -1923,18 +1939,7 @@ static struct platform_driver imx21_hcd_driver = {
.resume = NULL,
};
-static int __init imx21_hcd_init(void)
-{
- return platform_driver_register(&imx21_hcd_driver);
-}
-
-static void __exit imx21_hcd_cleanup(void)
-{
- platform_driver_unregister(&imx21_hcd_driver);
-}
-
-module_init(imx21_hcd_init);
-module_exit(imx21_hcd_cleanup);
+module_platform_driver(imx21_hcd_driver);
MODULE_DESCRIPTION("i.MX21 USB Host controller");
MODULE_AUTHOR("Martin Fuzzey");
diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h
index 87b29fd971b..05122f8a698 100644
--- a/drivers/usb/host/imx21-hcd.h
+++ b/drivers/usb/host/imx21-hcd.h
@@ -24,7 +24,11 @@
#ifndef __LINUX_IMX21_HCD_H__
#define __LINUX_IMX21_HCD_H__
-#include <mach/mx21-usbhost.h>
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/platform_data/usb-mx2.h>
#define NUM_ISO_ETDS 2
#define USB_NUM_ETD 32
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 0da7fc05f45..240e792c81a 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -60,7 +60,6 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/usb.h>
@@ -70,7 +69,6 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/system.h>
#include <asm/byteorder.h>
#include "isp116x.h"
@@ -612,6 +610,7 @@ static irqreturn_t isp116x_irq(struct usb_hcd *hcd)
/* IRQ's are off, we do no DMA,
perfectly ready to die ... */
hcd->state = HC_STATE_HALT;
+ usb_hc_died(hcd);
ret = IRQ_HANDLED;
goto done;
}
@@ -951,9 +950,9 @@ static void isp116x_hub_descriptor(struct isp116x *isp116x,
/* Power switching, device type, overcurrent. */
desc->wHubCharacteristics = cpu_to_le16((u16) ((reg >> 8) & 0x1f));
desc->bPwrOn2PwrGood = (u8) ((reg >> 24) & 0xff);
- /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
- desc->bitmap[0] = 0;
- desc->bitmap[1] = ~0;
+ /* ports removable, and legacy PortPwrCtrlMask */
+ desc->u.hs.DeviceRemovable[0] = 0;
+ desc->u.hs.DeviceRemovable[1] = ~0;
}
/* Perform reset of a given port.
@@ -1557,7 +1556,7 @@ static int isp116x_remove(struct platform_device *pdev)
return 0;
}
-static int __devinit isp116x_probe(struct platform_device *pdev)
+static int isp116x_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct isp116x *isp116x;
@@ -1568,6 +1567,9 @@ static int __devinit isp116x_probe(struct platform_device *pdev)
int ret = 0;
unsigned long irqflags;
+ if (usb_disabled())
+ return -ENODEV;
+
if (pdev->num_resources < 3) {
ret = -ENODEV;
goto err1;
@@ -1623,7 +1625,7 @@ static int __devinit isp116x_probe(struct platform_device *pdev)
isp116x->addr_reg = addr_reg;
spin_lock_init(&isp116x->lock);
INIT_LIST_HEAD(&isp116x->async);
- isp116x->board = pdev->dev.platform_data;
+ isp116x->board = dev_get_platdata(&pdev->dev);
if (!isp116x->board) {
ERR("Platform data structure not initialized\n");
@@ -1638,10 +1640,12 @@ static int __devinit isp116x_probe(struct platform_device *pdev)
goto err6;
}
- ret = usb_add_hcd(hcd, irq, irqflags | IRQF_DISABLED);
+ ret = usb_add_hcd(hcd, irq, irqflags);
if (ret)
goto err6;
+ device_wakeup_enable(hcd->self.controller);
+
ret = create_debug_file(isp116x);
if (ret) {
ERR("Couldn't create debugfs entry\n");
@@ -1702,27 +1706,9 @@ static struct platform_driver isp116x_driver = {
.suspend = isp116x_suspend,
.resume = isp116x_resume,
.driver = {
- .name = (char *)hcd_name,
+ .name = hcd_name,
.owner = THIS_MODULE,
},
};
-/*-----------------------------------------------------------------*/
-
-static int __init isp116x_init(void)
-{
- if (usb_disabled())
- return -ENODEV;
-
- INFO("driver %s, %s\n", hcd_name, DRIVER_VERSION);
- return platform_driver_register(&isp116x_driver);
-}
-
-module_init(isp116x_init);
-
-static void __exit isp116x_cleanup(void)
-{
- platform_driver_unregister(&isp116x_driver);
-}
-
-module_exit(isp116x_cleanup);
+module_platform_driver(isp116x_driver);
diff --git a/drivers/usb/host/isp116x.h b/drivers/usb/host/isp116x.h
index 12db961acdf..dd34b7a3396 100644
--- a/drivers/usb/host/isp116x.h
+++ b/drivers/usb/host/isp116x.h
@@ -13,7 +13,7 @@
/* Full speed: max # of bytes to transfer for a single urb
at a time must be < 1024 && must be multiple of 64.
- 832 allows transfering 4kiB within 5 frames. */
+ 832 allows transferring 4kiB within 5 frames. */
#define MAX_TRANSFER_SIZE_FULLSPEED 832
/* Low speed: there is no reason to schedule in very big
@@ -325,11 +325,7 @@ struct isp116x_ep {
/*-------------------------------------------------------------------------*/
-#ifdef DEBUG
-#define DBG(stuff...) printk(KERN_DEBUG "116x: " stuff)
-#else
-#define DBG(stuff...) do{}while(0)
-#endif
+#define DBG(stuff...) pr_debug("116x: " stuff)
#ifdef VERBOSE
# define VDBG DBG
@@ -358,15 +354,8 @@ struct isp116x_ep {
#define isp116x_check_platform_delay(h) 0
#endif
-#if defined(DEBUG)
-#define IRQ_TEST() BUG_ON(!irqs_disabled())
-#else
-#define IRQ_TEST() do{}while(0)
-#endif
-
static inline void isp116x_write_addr(struct isp116x *isp116x, unsigned reg)
{
- IRQ_TEST();
writew(reg & 0xff, isp116x->addr_reg);
isp116x_delay(isp116x, 300);
}
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 8196fa11fec..875bcfd3ec1 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -37,11 +37,7 @@
* recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
*/
-#ifdef CONFIG_USB_DEBUG
-# define ISP1362_DEBUG
-#else
-# undef ISP1362_DEBUG
-#endif
+#undef ISP1362_DEBUG
/*
* The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
@@ -70,9 +66,7 @@
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
@@ -82,9 +76,11 @@
#include <linux/pm.h>
#include <linux/io.h>
#include <linux/bitmap.h>
+#include <linux/prefetch.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include <asm/irq.h>
-#include <asm/system.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
@@ -93,7 +89,6 @@ static int dbg_level;
module_param(dbg_level, int, 0644);
#else
module_param(dbg_level, int, 0);
-#define STUB_DEBUG_FILE
#endif
#include "../core/usb.h"
@@ -227,7 +222,6 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
{
- int index = ep->ptd_index;
int last = ep->ptd_index + ep->num_ptds;
if (last > epq->buf_count)
@@ -237,10 +231,8 @@ static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1
epq->buf_map, epq->skip_map);
BUG_ON(last > epq->buf_count);
- for (; index < last; index++) {
- __clear_bit(index, &epq->buf_map);
- __set_bit(index, &epq->skip_map);
- }
+ bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds);
+ bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds);
epq->buf_avail += ep->num_ptds;
epq->ptd_count--;
@@ -354,8 +346,6 @@ static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep
struct ptd *ptd = &ep->ptd;
int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
- _BUG_ON(ep->ptd_offset < 0);
-
prefetch(ptd);
isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
if (len)
@@ -547,12 +537,12 @@ static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
short_ok ? "" : "not_",
PTD_GET_COUNT(ptd), ep->maxpacket, len);
+ /* save the data underrun error code for later and
+ * proceed with the status stage
+ */
+ urb->actual_length += PTD_GET_COUNT(ptd);
if (usb_pipecontrol(urb->pipe)) {
ep->nextpid = USB_PID_ACK;
- /* save the data underrun error code for later and
- * procede with the status stage
- */
- urb->actual_length += PTD_GET_COUNT(ptd);
BUG_ON(urb->actual_length > urb->transfer_buffer_length);
if (urb->status == -EINPROGRESS)
@@ -1556,9 +1546,9 @@ static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
- /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
- desc->bitmap[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
- desc->bitmap[1] = ~0;
+ /* ports removable, and legacy PortPwrCtrlMask */
+ desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
+ desc->u.hs.DeviceRemovable[1] = ~0;
DBG(3, "%s: exit\n", __func__);
}
@@ -1579,12 +1569,12 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
DBG(0, "ClearHubFeature: ");
switch (wValue) {
case C_HUB_OVER_CURRENT:
- _DBG(0, "C_HUB_OVER_CURRENT\n");
+ DBG(0, "C_HUB_OVER_CURRENT\n");
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
case C_HUB_LOCAL_POWER:
- _DBG(0, "C_HUB_LOCAL_POWER\n");
+ DBG(0, "C_HUB_LOCAL_POWER\n");
break;
default:
goto error;
@@ -1595,7 +1585,7 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
switch (wValue) {
case C_HUB_OVER_CURRENT:
case C_HUB_LOCAL_POWER:
- _DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
+ DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
break;
default:
goto error;
@@ -1626,36 +1616,36 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
- _DBG(0, "USB_PORT_FEAT_ENABLE\n");
+ DBG(0, "USB_PORT_FEAT_ENABLE\n");
tmp = RH_PS_CCS;
break;
case USB_PORT_FEAT_C_ENABLE:
- _DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
+ DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
tmp = RH_PS_PESC;
break;
case USB_PORT_FEAT_SUSPEND:
- _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
+ DBG(0, "USB_PORT_FEAT_SUSPEND\n");
tmp = RH_PS_POCI;
break;
case USB_PORT_FEAT_C_SUSPEND:
- _DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
+ DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
tmp = RH_PS_PSSC;
break;
case USB_PORT_FEAT_POWER:
- _DBG(0, "USB_PORT_FEAT_POWER\n");
+ DBG(0, "USB_PORT_FEAT_POWER\n");
tmp = RH_PS_LSDA;
break;
case USB_PORT_FEAT_C_CONNECTION:
- _DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
+ DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
tmp = RH_PS_CSC;
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
- _DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
+ DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
tmp = RH_PS_OCIC;
break;
case USB_PORT_FEAT_C_RESET:
- _DBG(0, "USB_PORT_FEAT_C_RESET\n");
+ DBG(0, "USB_PORT_FEAT_C_RESET\n");
tmp = RH_PS_PRSC;
break;
default:
@@ -1675,7 +1665,7 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
wIndex--;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
+ DBG(0, "USB_PORT_FEAT_SUSPEND\n");
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
isp1362_hcd->rhport[wIndex] =
@@ -1683,7 +1673,7 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
break;
case USB_PORT_FEAT_POWER:
- _DBG(0, "USB_PORT_FEAT_POWER\n");
+ DBG(0, "USB_PORT_FEAT_POWER\n");
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
isp1362_hcd->rhport[wIndex] =
@@ -1691,7 +1681,7 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
break;
case USB_PORT_FEAT_RESET:
- _DBG(0, "USB_PORT_FEAT_RESET\n");
+ DBG(0, "USB_PORT_FEAT_RESET\n");
spin_lock_irqsave(&isp1362_hcd->lock, flags);
t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
@@ -1725,7 +1715,7 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
default:
error:
/* "protocol stall" on error */
- _DBG(0, "PROTOCOL STALL\n");
+ DBG(0, "PROTOCOL STALL\n");
retval = -EPIPE;
}
@@ -1917,20 +1907,6 @@ static int isp1362_bus_resume(struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
-#ifdef STUB_DEBUG_FILE
-
-static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd)
-{
-}
-static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
-{
-}
-
-#else
-
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-
static void dump_irq(struct seq_file *s, char *label, u16 mask)
{
seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
@@ -2073,7 +2049,7 @@ static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
}
-static int proc_isp1362_show(struct seq_file *s, void *unused)
+static int isp1362_show(struct seq_file *s, void *unused)
{
struct isp1362_hcd *isp1362_hcd = s->private;
struct isp1362_ep *ep;
@@ -2131,7 +2107,7 @@ static int proc_isp1362_show(struct seq_file *s, void *unused)
default:
s = "?";
break;
- };
+ }
s;}), ep->maxpacket) ;
list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
seq_printf(s, " urb%p, %d/%d\n", urb,
@@ -2177,44 +2153,31 @@ static int proc_isp1362_show(struct seq_file *s, void *unused)
return 0;
}
-static int proc_isp1362_open(struct inode *inode, struct file *file)
+static int isp1362_open(struct inode *inode, struct file *file)
{
- return single_open(file, proc_isp1362_show, PDE(inode)->data);
+ return single_open(file, isp1362_show, inode);
}
-static const struct file_operations proc_ops = {
- .open = proc_isp1362_open,
+static const struct file_operations debug_ops = {
+ .open = isp1362_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/* expect just one isp1362_hcd per system */
-static const char proc_filename[] = "driver/isp1362";
-
static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
{
- struct proc_dir_entry *pde;
-
- pde = create_proc_entry(proc_filename, 0, NULL);
- if (pde == NULL) {
- pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename);
- return;
- }
-
- pde->proc_fops = &proc_ops;
- pde->data = isp1362_hcd;
- isp1362_hcd->pde = pde;
+ isp1362_hcd->debug_file = debugfs_create_file("isp1362", S_IRUGO,
+ usb_debug_root,
+ isp1362_hcd, &debug_ops);
}
static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
{
- if (isp1362_hcd->pde)
- remove_proc_entry(proc_filename, NULL);
+ debugfs_remove(isp1362_hcd->debug_file);
}
-#endif
-
/*-------------------------------------------------------------------------*/
static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
@@ -2361,7 +2324,7 @@ static int isp1362_hc_reset(struct usb_hcd *hcd)
unsigned long flags;
int clkrdy = 0;
- pr_info("%s:\n", __func__);
+ pr_debug("%s:\n", __func__);
if (isp1362_hcd->board && isp1362_hcd->board->reset) {
isp1362_hcd->board->reset(hcd->self.controller, 1);
@@ -2398,7 +2361,7 @@ static void isp1362_hc_stop(struct usb_hcd *hcd)
unsigned long flags;
u32 tmp;
- pr_info("%s:\n", __func__);
+ pr_debug("%s:\n", __func__);
del_timer_sync(&hcd->rh_timer);
@@ -2526,7 +2489,7 @@ static int isp1362_hc_start(struct usb_hcd *hcd)
u16 chipid;
unsigned long flags;
- pr_info("%s:\n", __func__);
+ pr_debug("%s:\n", __func__);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
@@ -2649,7 +2612,7 @@ static struct hc_driver isp1362_hc_driver = {
/*-------------------------------------------------------------------------*/
-static int __devexit isp1362_remove(struct platform_device *pdev)
+static int isp1362_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
@@ -2684,7 +2647,7 @@ static int __devexit isp1362_remove(struct platform_device *pdev)
return 0;
}
-static int __init isp1362_probe(struct platform_device *pdev)
+static int isp1362_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct isp1362_hcd *isp1362_hcd;
@@ -2696,6 +2659,9 @@ static int __init isp1362_probe(struct platform_device *pdev)
struct resource *irq_res;
unsigned int irq_flags = 0;
+ if (usb_disabled())
+ return -ENODEV;
+
/* basic sanity checks first. board-specific init logic should
* have initialized this the three resources and probably board
* specific platform_data. we don't probe for IRQs, and do only
@@ -2758,7 +2724,7 @@ static int __init isp1362_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&isp1362_hcd->periodic);
INIT_LIST_HEAD(&isp1362_hcd->isoc);
INIT_LIST_HEAD(&isp1362_hcd->remove_list);
- isp1362_hcd->board = pdev->dev.platform_data;
+ isp1362_hcd->board = dev_get_platdata(&pdev->dev);
#if USE_PLATFORM_DELAY
if (!isp1362_hcd->board->delay) {
dev_err(hcd->self.controller, "No platform delay function given\n");
@@ -2776,9 +2742,11 @@ static int __init isp1362_probe(struct platform_device *pdev)
if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
irq_flags |= IRQF_TRIGGER_LOW;
- retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_DISABLED | IRQF_SHARED);
+ retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED);
if (retval != 0)
goto err6;
+ device_wakeup_enable(hcd->self.controller);
+
pr_info("%s, irq %d\n", hcd->product_desc, irq);
create_debug_file(isp1362_hcd);
@@ -2857,29 +2825,14 @@ static int isp1362_resume(struct platform_device *pdev)
static struct platform_driver isp1362_driver = {
.probe = isp1362_probe,
- .remove = __devexit_p(isp1362_remove),
+ .remove = isp1362_remove,
.suspend = isp1362_suspend,
.resume = isp1362_resume,
.driver = {
- .name = (char *)hcd_name,
+ .name = hcd_name,
.owner = THIS_MODULE,
},
};
-/*-------------------------------------------------------------------------*/
-
-static int __init isp1362_init(void)
-{
- if (usb_disabled())
- return -ENODEV;
- pr_info("driver %s, %s\n", hcd_name, DRIVER_VERSION);
- return platform_driver_register(&isp1362_driver);
-}
-module_init(isp1362_init);
-
-static void __exit isp1362_cleanup(void)
-{
- platform_driver_unregister(&isp1362_driver);
-}
-module_exit(isp1362_cleanup);
+module_platform_driver(isp1362_driver);
diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h
index 0f97820e65b..3b0b4847c3a 100644
--- a/drivers/usb/host/isp1362.h
+++ b/drivers/usb/host/isp1362.h
@@ -76,14 +76,14 @@ static inline void delayed_insw(unsigned int addr, void *buf, int len)
#define ISP1362_REG_WRITE_OFFSET 0x80
-#ifdef ISP1362_DEBUG
-typedef const unsigned int isp1362_reg_t;
-
#define REG_WIDTH_16 0x000
#define REG_WIDTH_32 0x100
#define REG_WIDTH_MASK 0x100
#define REG_NO_MASK 0x0ff
+#ifdef ISP1362_DEBUG
+typedef const unsigned int isp1362_reg_t;
+
#define REG_ACCESS_R 0x200
#define REG_ACCESS_W 0x400
#define REG_ACCESS_RW 0x600
@@ -91,9 +91,6 @@ typedef const unsigned int isp1362_reg_t;
#define ISP1362_REG_NO(r) ((r) & REG_NO_MASK)
-#define _BUG_ON(x) BUG_ON(x)
-#define _WARN_ON(x) WARN_ON(x)
-
#define ISP1362_REG(name, addr, width, rw) \
static isp1362_reg_t ISP1362_REG_##name = ((addr) | (width) | (rw))
@@ -102,8 +99,6 @@ static isp1362_reg_t ISP1362_REG_##name = ((addr) | (width) | (rw))
#else
typedef const unsigned char isp1362_reg_t;
#define ISP1362_REG_NO(r) (r)
-#define _BUG_ON(x) do {} while (0)
-#define _WARN_ON(x) do {} while (0)
#define ISP1362_REG(name, addr, width, rw) \
static isp1362_reg_t ISP1362_REG_##name = addr
@@ -485,7 +480,7 @@ struct isp1362_hcd {
struct isp1362_platform_data *board;
- struct proc_dir_entry *pde;
+ struct dentry *debug_file;
unsigned long stat1, stat2, stat4, stat8, stat16;
/* HC registers */
@@ -587,21 +582,11 @@ static inline struct usb_hcd *isp1362_hcd_to_hcd(struct isp1362_hcd *isp1362_hcd
* ISP1362 HW Interface
*/
-#ifdef ISP1362_DEBUG
#define DBG(level, fmt...) \
do { \
if (dbg_level > level) \
pr_debug(fmt); \
} while (0)
-#define _DBG(level, fmt...) \
- do { \
- if (dbg_level > level) \
- printk(fmt); \
- } while (0)
-#else
-#define DBG(fmt...) do {} while (0)
-#define _DBG DBG
-#endif
#ifdef VERBOSE
# define VDBG(fmt...) DBG(3, fmt)
@@ -645,9 +630,7 @@ static inline struct usb_hcd *isp1362_hcd_to_hcd(struct isp1362_hcd *isp1362_hcd
*/
static void isp1362_write_addr(struct isp1362_hcd *isp1362_hcd, isp1362_reg_t reg)
{
- /*_BUG_ON((reg & ISP1362_REG_WRITE_OFFSET) && !(reg & REG_ACCESS_W));*/
REG_ACCESS_TEST(reg);
- _BUG_ON(!irqs_disabled());
DUMMY_DELAY_ACCESS;
writew(ISP1362_REG_NO(reg), isp1362_hcd->addr_reg);
DUMMY_DELAY_ACCESS;
@@ -656,7 +639,6 @@ static void isp1362_write_addr(struct isp1362_hcd *isp1362_hcd, isp1362_reg_t re
static void isp1362_write_data16(struct isp1362_hcd *isp1362_hcd, u16 val)
{
- _BUG_ON(!irqs_disabled());
DUMMY_DELAY_ACCESS;
writew(val, isp1362_hcd->data_reg);
}
@@ -665,7 +647,6 @@ static u16 isp1362_read_data16(struct isp1362_hcd *isp1362_hcd)
{
u16 val;
- _BUG_ON(!irqs_disabled());
DUMMY_DELAY_ACCESS;
val = readw(isp1362_hcd->data_reg);
@@ -674,7 +655,6 @@ static u16 isp1362_read_data16(struct isp1362_hcd *isp1362_hcd)
static void isp1362_write_data32(struct isp1362_hcd *isp1362_hcd, u32 val)
{
- _BUG_ON(!irqs_disabled());
#if USE_32BIT
DUMMY_DELAY_ACCESS;
writel(val, isp1362_hcd->data_reg);
@@ -690,7 +670,6 @@ static u32 isp1362_read_data32(struct isp1362_hcd *isp1362_hcd)
{
u32 val;
- _BUG_ON(!irqs_disabled());
#if USE_32BIT
DUMMY_DELAY_ACCESS;
val = readl(isp1362_hcd->data_reg);
@@ -713,8 +692,6 @@ static void isp1362_read_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 le
if (!len)
return;
- _BUG_ON(!irqs_disabled());
-
RDBG("%s: Reading %d byte from fifo to mem @ %p\n", __func__, len, buf);
#if USE_32BIT
if (len >= 4) {
@@ -760,8 +737,6 @@ static void isp1362_write_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 l
return;
}
- _BUG_ON(!irqs_disabled());
-
RDBG("%s: Writing %d byte to fifo from memory @%p\n", __func__, len, buf);
#if USE_32BIT
if (len >= 4) {
@@ -854,7 +829,6 @@ static void isp1362_write_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 l
isp1362_write_reg32(d, r, __v & ~m); \
}
-#ifdef ISP1362_DEBUG
#define isp1362_show_reg(d, r) { \
if ((ISP1362_REG_##r & REG_WIDTH_MASK) == REG_WIDTH_32) \
DBG(0, "%-12s[%02x]: %08x\n", #r, \
@@ -863,9 +837,6 @@ static void isp1362_write_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 l
DBG(0, "%-12s[%02x]: %04x\n", #r, \
ISP1362_REG_NO(ISP1362_REG_##r), isp1362_read_reg16(d, r)); \
}
-#else
-#define isp1362_show_reg(d, r) do {} while (0)
-#endif
static void __attribute__((__unused__)) isp1362_show_regs(struct isp1362_hcd *isp1362_hcd)
{
@@ -923,10 +894,6 @@ static void __attribute__((__unused__)) isp1362_show_regs(struct isp1362_hcd *is
static void isp1362_write_diraddr(struct isp1362_hcd *isp1362_hcd, u16 offset, u16 len)
{
- _BUG_ON(offset & 1);
- _BUG_ON(offset >= ISP1362_BUF_SIZE);
- _BUG_ON(len > ISP1362_BUF_SIZE);
- _BUG_ON(offset + len > ISP1362_BUF_SIZE);
len = (len + 1) & ~1;
isp1362_clr_mask16(isp1362_hcd, HCDMACFG, HCDMACFG_CTR_ENABLE);
@@ -936,42 +903,32 @@ static void isp1362_write_diraddr(struct isp1362_hcd *isp1362_hcd, u16 offset, u
static void isp1362_read_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 offset, int len)
{
- _BUG_ON(offset & 1);
-
isp1362_write_diraddr(isp1362_hcd, offset, len);
DBG(3, "%s: Reading %d byte from buffer @%04x to memory @ %p\n",
__func__, len, offset, buf);
isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
- _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
isp1362_write_addr(isp1362_hcd, ISP1362_REG_HCDIRDATA);
isp1362_read_fifo(isp1362_hcd, buf, len);
- _WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
- _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
}
static void isp1362_write_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 offset, int len)
{
- _BUG_ON(offset & 1);
-
isp1362_write_diraddr(isp1362_hcd, offset, len);
DBG(3, "%s: Writing %d byte to buffer @%04x from memory @ %p\n",
__func__, len, offset, buf);
isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
- _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
isp1362_write_addr(isp1362_hcd, ISP1362_REG_HCDIRDATA | ISP1362_REG_WRITE_OFFSET);
isp1362_write_fifo(isp1362_hcd, buf, len);
- _WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
- _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
}
static void __attribute__((unused)) dump_data(char *buf, int len)
@@ -1002,7 +959,7 @@ static void __attribute__((unused)) dump_data(char *buf, int len)
}
}
-#if defined(ISP1362_DEBUG) && defined(PTD_TRACE)
+#if defined(PTD_TRACE)
static void dump_ptd(struct ptd *ptd)
{
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index bdba8c5d844..51a0ae9cdd1 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -8,6 +8,8 @@
*
* (c) 2007 Sebastian Siewior <bigeasy@linutronix.de>
*
+ * (c) 2011 Arvid Brodin <arvid.brodin@enea.com>
+ *
*/
#include <linux/module.h>
#include <linux/kernel.h>
@@ -19,20 +21,33 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/mm.h>
+#include <linux/timer.h>
#include <asm/unaligned.h>
#include <asm/cacheflush.h>
+#include <linux/gpio.h>
#include "isp1760-hcd.h"
static struct kmem_cache *qtd_cachep;
static struct kmem_cache *qh_cachep;
+static struct kmem_cache *urb_listitem_cachep;
+
+enum queue_head_types {
+ QH_CONTROL,
+ QH_BULK,
+ QH_INTERRUPT,
+ QH_END
+};
struct isp1760_hcd {
u32 hcs_params;
spinlock_t lock;
- struct inter_packet_info atl_ints[32];
- struct inter_packet_info int_ints[32];
+ struct slotinfo atl_slots[32];
+ int atl_done_map;
+ struct slotinfo int_slots[32];
+ int int_done_map;
struct memory_chunk memory_pool[BLOCKS];
+ struct list_head qh_list[QH_END];
/* periodic schedule support */
#define DEFAULT_I_TDPS 1024
@@ -41,16 +56,14 @@ struct isp1760_hcd {
unsigned long reset_done;
unsigned long next_statechange;
unsigned int devflags;
+
+ int rst_gpio;
};
static inline struct isp1760_hcd *hcd_to_priv(struct usb_hcd *hcd)
{
return (struct isp1760_hcd *) (hcd->hcd_priv);
}
-static inline struct usb_hcd *priv_to_hcd(struct isp1760_hcd *priv)
-{
- return container_of((void *) priv, struct usb_hcd, hcd_priv);
-}
/* Section 2.2 Host Controller Capability Registers */
#define HC_LENGTH(p) (((p)>>00)&0x00ff) /* bits 7:0 */
@@ -80,217 +93,271 @@ static inline struct usb_hcd *priv_to_hcd(struct isp1760_hcd *priv)
#define PORT_RWC_BITS (PORT_CSC)
struct isp1760_qtd {
- struct isp1760_qtd *hw_next;
u8 packet_type;
- u8 toggle;
-
void *data_buffer;
+ u32 payload_addr;
+
/* the rest is HCD-private */
struct list_head qtd_list;
struct urb *urb;
size_t length;
-
- /* isp special*/
+ size_t actual_length;
+
+ /* QTD_ENQUEUED: waiting for transfer (inactive) */
+ /* QTD_PAYLOAD_ALLOC: chip mem has been allocated for payload */
+ /* QTD_XFER_STARTED: valid ptd has been written to isp176x - only
+ interrupt handler may touch this qtd! */
+ /* QTD_XFER_COMPLETE: payload has been transferred successfully */
+ /* QTD_RETIRE: transfer error/abort qtd */
+#define QTD_ENQUEUED 0
+#define QTD_PAYLOAD_ALLOC 1
+#define QTD_XFER_STARTED 2
+#define QTD_XFER_COMPLETE 3
+#define QTD_RETIRE 4
u32 status;
-#define URB_COMPLETE_NOTIFY (1 << 0)
-#define URB_ENQUEUED (1 << 1)
-#define URB_TYPE_ATL (1 << 2)
-#define URB_TYPE_INT (1 << 3)
};
+/* Queue head, one for each active endpoint */
struct isp1760_qh {
- /* first part defined by EHCI spec */
+ struct list_head qh_list;
struct list_head qtd_list;
- struct isp1760_hcd *priv;
-
- /* periodic schedule info */
- unsigned short period; /* polling interval */
- struct usb_device *dev;
-
u32 toggle;
u32 ping;
+ int slot;
+ int tt_buffer_dirty; /* See USB2.0 spec section 11.17.5 */
};
-#define ehci_port_speed(priv, portsc) USB_PORT_STAT_HIGH_SPEED
+struct urb_listitem {
+ struct list_head urb_list;
+ struct urb *urb;
+};
-static unsigned int isp1760_readl(__u32 __iomem *regs)
+/*
+ * Access functions for isp176x registers (addresses 0..0x03FF).
+ */
+static u32 reg_read32(void __iomem *base, u32 reg)
{
- return readl(regs);
+ return readl(base + reg);
}
-static void isp1760_writel(const unsigned int val, __u32 __iomem *regs)
+static void reg_write32(void __iomem *base, u32 reg, u32 val)
{
- writel(val, regs);
+ writel(val, base + reg);
}
/*
- * The next two copy via MMIO data to/from the device. memcpy_{to|from}io()
+ * Access functions for isp176x memory (offset >= 0x0400).
+ *
+ * bank_reads8() reads memory locations prefetched by an earlier write to
+ * HC_MEMORY_REG (see isp176x datasheet). Unless you want to do fancy multi-
+ * bank optimizations, you should use the more generic mem_reads8() below.
+ *
+ * For access to ptd memory, use the specialized ptd_read() and ptd_write()
+ * below.
+ *
+ * These functions copy via MMIO data to/from the device. memcpy_{to|from}io()
* doesn't quite work because some people have to enforce 32-bit access
*/
-static void priv_read_copy(struct isp1760_hcd *priv, u32 *src,
- __u32 __iomem *dst, u32 len)
+static void bank_reads8(void __iomem *src_base, u32 src_offset, u32 bank_addr,
+ __u32 *dst, u32 bytes)
{
+ __u32 __iomem *src;
u32 val;
- u8 *buff8;
+ __u8 *src_byteptr;
+ __u8 *dst_byteptr;
- if (!src) {
- printk(KERN_ERR "ERROR: buffer: %p len: %d\n", src, len);
- return;
- }
+ src = src_base + (bank_addr | src_offset);
- while (len >= 4) {
- *src = __raw_readl(dst);
- len -= 4;
- src++;
- dst++;
+ if (src_offset < PAYLOAD_OFFSET) {
+ while (bytes >= 4) {
+ *dst = le32_to_cpu(__raw_readl(src));
+ bytes -= 4;
+ src++;
+ dst++;
+ }
+ } else {
+ while (bytes >= 4) {
+ *dst = __raw_readl(src);
+ bytes -= 4;
+ src++;
+ dst++;
+ }
}
- if (!len)
+ if (!bytes)
return;
/* in case we have 3, 2 or 1 by left. The dst buffer may not be fully
* allocated.
*/
- val = isp1760_readl(dst);
-
- buff8 = (u8 *)src;
- while (len) {
-
- *buff8 = val;
- val >>= 8;
- len--;
- buff8++;
+ if (src_offset < PAYLOAD_OFFSET)
+ val = le32_to_cpu(__raw_readl(src));
+ else
+ val = __raw_readl(src);
+
+ dst_byteptr = (void *) dst;
+ src_byteptr = (void *) &val;
+ while (bytes > 0) {
+ *dst_byteptr = *src_byteptr;
+ dst_byteptr++;
+ src_byteptr++;
+ bytes--;
}
}
-static void priv_write_copy(const struct isp1760_hcd *priv, const u32 *src,
- __u32 __iomem *dst, u32 len)
+static void mem_reads8(void __iomem *src_base, u32 src_offset, void *dst,
+ u32 bytes)
{
- while (len >= 4) {
- __raw_writel(*src, dst);
- len -= 4;
- src++;
- dst++;
+ reg_write32(src_base, HC_MEMORY_REG, src_offset + ISP_BANK(0));
+ ndelay(90);
+ bank_reads8(src_base, src_offset, ISP_BANK(0), dst, bytes);
+}
+
+static void mem_writes8(void __iomem *dst_base, u32 dst_offset,
+ __u32 const *src, u32 bytes)
+{
+ __u32 __iomem *dst;
+
+ dst = dst_base + dst_offset;
+
+ if (dst_offset < PAYLOAD_OFFSET) {
+ while (bytes >= 4) {
+ __raw_writel(cpu_to_le32(*src), dst);
+ bytes -= 4;
+ src++;
+ dst++;
+ }
+ } else {
+ while (bytes >= 4) {
+ __raw_writel(*src, dst);
+ bytes -= 4;
+ src++;
+ dst++;
+ }
}
- if (!len)
+ if (!bytes)
return;
- /* in case we have 3, 2 or 1 by left. The buffer is allocated and the
- * extra bytes should not be read by the HW
+ /* in case we have 3, 2 or 1 bytes left. The buffer is allocated and the
+ * extra bytes should not be read by the HW.
*/
- __raw_writel(*src, dst);
+ if (dst_offset < PAYLOAD_OFFSET)
+ __raw_writel(cpu_to_le32(*src), dst);
+ else
+ __raw_writel(*src, dst);
+}
+
+/*
+ * Read and write ptds. 'ptd_offset' should be one of ISO_PTD_OFFSET,
+ * INT_PTD_OFFSET, and ATL_PTD_OFFSET. 'slot' should be less than 32.
+ */
+static void ptd_read(void __iomem *base, u32 ptd_offset, u32 slot,
+ struct ptd *ptd)
+{
+ reg_write32(base, HC_MEMORY_REG,
+ ISP_BANK(0) + ptd_offset + slot*sizeof(*ptd));
+ ndelay(90);
+ bank_reads8(base, ptd_offset + slot*sizeof(*ptd), ISP_BANK(0),
+ (void *) ptd, sizeof(*ptd));
+}
+
+static void ptd_write(void __iomem *base, u32 ptd_offset, u32 slot,
+ struct ptd *ptd)
+{
+ mem_writes8(base, ptd_offset + slot*sizeof(*ptd) + sizeof(ptd->dw0),
+ &ptd->dw1, 7*sizeof(ptd->dw1));
+ /* Make sure dw0 gets written last (after other dw's and after payload)
+ since it contains the enable bit */
+ wmb();
+ mem_writes8(base, ptd_offset + slot*sizeof(*ptd), &ptd->dw0,
+ sizeof(ptd->dw0));
}
+
/* memory management of the 60kb on the chip from 0x1000 to 0xffff */
static void init_memory(struct isp1760_hcd *priv)
{
- int i;
- u32 payload;
+ int i, curr;
+ u32 payload_addr;
- payload = 0x1000;
+ payload_addr = PAYLOAD_OFFSET;
for (i = 0; i < BLOCK_1_NUM; i++) {
- priv->memory_pool[i].start = payload;
+ priv->memory_pool[i].start = payload_addr;
priv->memory_pool[i].size = BLOCK_1_SIZE;
priv->memory_pool[i].free = 1;
- payload += priv->memory_pool[i].size;
+ payload_addr += priv->memory_pool[i].size;
}
-
- for (i = BLOCK_1_NUM; i < BLOCK_1_NUM + BLOCK_2_NUM; i++) {
- priv->memory_pool[i].start = payload;
- priv->memory_pool[i].size = BLOCK_2_SIZE;
- priv->memory_pool[i].free = 1;
- payload += priv->memory_pool[i].size;
+ curr = i;
+ for (i = 0; i < BLOCK_2_NUM; i++) {
+ priv->memory_pool[curr + i].start = payload_addr;
+ priv->memory_pool[curr + i].size = BLOCK_2_SIZE;
+ priv->memory_pool[curr + i].free = 1;
+ payload_addr += priv->memory_pool[curr + i].size;
}
-
- for (i = BLOCK_1_NUM + BLOCK_2_NUM; i < BLOCKS; i++) {
- priv->memory_pool[i].start = payload;
- priv->memory_pool[i].size = BLOCK_3_SIZE;
- priv->memory_pool[i].free = 1;
- payload += priv->memory_pool[i].size;
+ curr = i;
+ for (i = 0; i < BLOCK_3_NUM; i++) {
+ priv->memory_pool[curr + i].start = payload_addr;
+ priv->memory_pool[curr + i].size = BLOCK_3_SIZE;
+ priv->memory_pool[curr + i].free = 1;
+ payload_addr += priv->memory_pool[curr + i].size;
}
- BUG_ON(payload - priv->memory_pool[i - 1].size > PAYLOAD_SIZE);
+ WARN_ON(payload_addr - priv->memory_pool[0].start > PAYLOAD_AREA_SIZE);
}
-static u32 alloc_mem(struct isp1760_hcd *priv, u32 size)
+static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
int i;
- if (!size)
- return ISP1760_NULL_POINTER;
+ WARN_ON(qtd->payload_addr);
+
+ if (!qtd->length)
+ return;
for (i = 0; i < BLOCKS; i++) {
- if (priv->memory_pool[i].size >= size &&
+ if (priv->memory_pool[i].size >= qtd->length &&
priv->memory_pool[i].free) {
-
priv->memory_pool[i].free = 0;
- return priv->memory_pool[i].start;
+ qtd->payload_addr = priv->memory_pool[i].start;
+ return;
}
}
-
- printk(KERN_ERR "ISP1760 MEM: can not allocate %d bytes of memory\n",
- size);
- printk(KERN_ERR "Current memory map:\n");
- for (i = 0; i < BLOCKS; i++) {
- printk(KERN_ERR "Pool %2d size %4d status: %d\n",
- i, priv->memory_pool[i].size,
- priv->memory_pool[i].free);
- }
- /* XXX maybe -ENOMEM could be possible */
- BUG();
- return 0;
}
-static void free_mem(struct isp1760_hcd *priv, u32 mem)
+static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
int i;
- if (mem == ISP1760_NULL_POINTER)
+ if (!qtd->payload_addr)
return;
for (i = 0; i < BLOCKS; i++) {
- if (priv->memory_pool[i].start == mem) {
-
- BUG_ON(priv->memory_pool[i].free);
-
+ if (priv->memory_pool[i].start == qtd->payload_addr) {
+ WARN_ON(priv->memory_pool[i].free);
priv->memory_pool[i].free = 1;
- return ;
+ qtd->payload_addr = 0;
+ return;
}
}
- printk(KERN_ERR "Trying to free not-here-allocated memory :%08x\n",
- mem);
- BUG();
+ dev_err(hcd->self.controller, "%s: Invalid pointer: %08x\n",
+ __func__, qtd->payload_addr);
+ WARN_ON(1);
+ qtd->payload_addr = 0;
}
-static void isp1760_init_regs(struct usb_hcd *hcd)
-{
- isp1760_writel(0, hcd->regs + HC_BUFFER_STATUS_REG);
- isp1760_writel(NO_TRANSFER_ACTIVE, hcd->regs +
- HC_ATL_PTD_SKIPMAP_REG);
- isp1760_writel(NO_TRANSFER_ACTIVE, hcd->regs +
- HC_INT_PTD_SKIPMAP_REG);
- isp1760_writel(NO_TRANSFER_ACTIVE, hcd->regs +
- HC_ISO_PTD_SKIPMAP_REG);
-
- isp1760_writel(~NO_TRANSFER_ACTIVE, hcd->regs +
- HC_ATL_PTD_DONEMAP_REG);
- isp1760_writel(~NO_TRANSFER_ACTIVE, hcd->regs +
- HC_INT_PTD_DONEMAP_REG);
- isp1760_writel(~NO_TRANSFER_ACTIVE, hcd->regs +
- HC_ISO_PTD_DONEMAP_REG);
-}
-
-static int handshake(struct isp1760_hcd *priv, void __iomem *ptr,
+static int handshake(struct usb_hcd *hcd, u32 reg,
u32 mask, u32 done, int usec)
{
u32 result;
do {
- result = isp1760_readl(ptr);
+ result = reg_read32(hcd->regs, reg);
if (result == ~0)
return -ENODEV;
result &= mask;
@@ -303,57 +370,56 @@ static int handshake(struct isp1760_hcd *priv, void __iomem *ptr,
}
/* reset a non-running (STS_HALT == 1) controller */
-static int ehci_reset(struct isp1760_hcd *priv)
+static int ehci_reset(struct usb_hcd *hcd)
{
int retval;
- struct usb_hcd *hcd = priv_to_hcd(priv);
- u32 command = isp1760_readl(hcd->regs + HC_USBCMD);
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+
+ u32 command = reg_read32(hcd->regs, HC_USBCMD);
command |= CMD_RESET;
- isp1760_writel(command, hcd->regs + HC_USBCMD);
+ reg_write32(hcd->regs, HC_USBCMD, command);
hcd->state = HC_STATE_HALT;
priv->next_statechange = jiffies;
- retval = handshake(priv, hcd->regs + HC_USBCMD,
+ retval = handshake(hcd, HC_USBCMD,
CMD_RESET, 0, 250 * 1000);
return retval;
}
-static void qh_destroy(struct isp1760_qh *qh)
-{
- BUG_ON(!list_empty(&qh->qtd_list));
- kmem_cache_free(qh_cachep, qh);
-}
-
-static struct isp1760_qh *isp1760_qh_alloc(struct isp1760_hcd *priv,
- gfp_t flags)
+static struct isp1760_qh *qh_alloc(gfp_t flags)
{
struct isp1760_qh *qh;
qh = kmem_cache_zalloc(qh_cachep, flags);
if (!qh)
- return qh;
+ return NULL;
+ INIT_LIST_HEAD(&qh->qh_list);
INIT_LIST_HEAD(&qh->qtd_list);
- qh->priv = priv;
+ qh->slot = -1;
+
return qh;
}
-/* magic numbers that can affect system performance */
-#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
-#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
-#define EHCI_TUNE_RL_TT 0
-#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
-#define EHCI_TUNE_MULT_TT 1
-#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
+static void qh_free(struct isp1760_qh *qh)
+{
+ WARN_ON(!list_empty(&qh->qtd_list));
+ WARN_ON(qh->slot > -1);
+ kmem_cache_free(qh_cachep, qh);
+}
/* one-time init, only for memory state */
static int priv_init(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 hcc_params;
+ int i;
spin_lock_init(&priv->lock);
+ for (i = 0; i < QH_END; i++)
+ INIT_LIST_HEAD(&priv->qh_list[i]);
+
/*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
@@ -361,7 +427,7 @@ static int priv_init(struct usb_hcd *hcd)
priv->periodic_size = DEFAULT_I_TDPS;
/* controllers may cache some of the periodic schedule ... */
- hcc_params = isp1760_readl(hcd->regs + HC_HCCPARAMS);
+ hcc_params = reg_read32(hcd->regs, HC_HCCPARAMS);
/* full frame cache */
if (HCC_ISOC_CACHE(hcc_params))
priv->i_thresh = 8;
@@ -377,6 +443,18 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
int result;
u32 scratch, hwmode;
+ /* low-level chip reset */
+ if (gpio_is_valid(priv->rst_gpio)) {
+ unsigned int rst_lvl;
+
+ rst_lvl = (priv->devflags &
+ ISP1760_FLAG_RESET_ACTIVE_HIGH) ? 1 : 0;
+
+ gpio_set_value(priv->rst_gpio, rst_lvl);
+ mdelay(50);
+ gpio_set_value(priv->rst_gpio, !rst_lvl);
+ }
+
/* Setup HW Mode Control: This assumes a level active-low interrupt */
hwmode = HW_DATA_BUS_32BIT;
@@ -398,46 +476,48 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
* Write it twice to ensure correct upper bits if switching
* to 16-bit mode.
*/
- isp1760_writel(hwmode, hcd->regs + HC_HW_MODE_CTRL);
- isp1760_writel(hwmode, hcd->regs + HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
- isp1760_writel(0xdeadbabe, hcd->regs + HC_SCRATCH_REG);
+ reg_write32(hcd->regs, HC_SCRATCH_REG, 0xdeadbabe);
/* Change bus pattern */
- scratch = isp1760_readl(hcd->regs + HC_CHIP_ID_REG);
- scratch = isp1760_readl(hcd->regs + HC_SCRATCH_REG);
+ scratch = reg_read32(hcd->regs, HC_CHIP_ID_REG);
+ scratch = reg_read32(hcd->regs, HC_SCRATCH_REG);
if (scratch != 0xdeadbabe) {
- printk(KERN_ERR "ISP1760: Scratch test failed.\n");
+ dev_err(hcd->self.controller, "Scratch test failed.\n");
return -ENODEV;
}
/* pre reset */
- isp1760_init_regs(hcd);
+ reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, 0);
+ reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
+ reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
+ reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
/* reset */
- isp1760_writel(SW_RESET_RESET_ALL, hcd->regs + HC_RESET_REG);
+ reg_write32(hcd->regs, HC_RESET_REG, SW_RESET_RESET_ALL);
mdelay(100);
- isp1760_writel(SW_RESET_RESET_HC, hcd->regs + HC_RESET_REG);
+ reg_write32(hcd->regs, HC_RESET_REG, SW_RESET_RESET_HC);
mdelay(100);
- result = ehci_reset(priv);
+ result = ehci_reset(hcd);
if (result)
return result;
/* Step 11 passed */
- isp1760_info(priv, "bus width: %d, oc: %s\n",
+ dev_info(hcd->self.controller, "bus width: %d, oc: %s\n",
(priv->devflags & ISP1760_FLAG_BUS_WIDTH_16) ?
16 : 32, (priv->devflags & ISP1760_FLAG_ANALOG_OC) ?
"analog" : "digital");
/* ATL reset */
- isp1760_writel(hwmode | ALL_ATX_RESET, hcd->regs + HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode | ALL_ATX_RESET);
mdelay(10);
- isp1760_writel(hwmode, hcd->regs + HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
- isp1760_writel(INTERRUPT_ENABLE_MASK, hcd->regs + HC_INTERRUPT_REG);
- isp1760_writel(INTERRUPT_ENABLE_MASK, hcd->regs + HC_INTERRUPT_ENABLE);
+ reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, INTERRUPT_ENABLE_MASK);
/*
* PORT 1 Control register of the ISP1760 is the OTG control
@@ -445,1149 +525,958 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
* support in this driver, we use port 1 as a "normal" USB host port on
* both chips.
*/
- isp1760_writel(PORT1_POWER | PORT1_INIT2,
- hcd->regs + HC_PORT1_CTRL);
+ reg_write32(hcd->regs, HC_PORT1_CTRL, PORT1_POWER | PORT1_INIT2);
mdelay(10);
- priv->hcs_params = isp1760_readl(hcd->regs + HC_HCSPARAMS);
+ priv->hcs_params = reg_read32(hcd->regs, HC_HCSPARAMS);
return priv_init(hcd);
}
-static void isp1760_init_maps(struct usb_hcd *hcd)
-{
- /*set last maps, for iso its only 1, else 32 tds bitmap*/
- isp1760_writel(0x80000000, hcd->regs + HC_ATL_PTD_LASTPTD_REG);
- isp1760_writel(0x80000000, hcd->regs + HC_INT_PTD_LASTPTD_REG);
- isp1760_writel(0x00000001, hcd->regs + HC_ISO_PTD_LASTPTD_REG);
-}
-
-static void isp1760_enable_interrupts(struct usb_hcd *hcd)
+static u32 base_to_chip(u32 base)
{
- isp1760_writel(0, hcd->regs + HC_ATL_IRQ_MASK_AND_REG);
- isp1760_writel(0, hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
- isp1760_writel(0, hcd->regs + HC_INT_IRQ_MASK_AND_REG);
- isp1760_writel(0, hcd->regs + HC_INT_IRQ_MASK_OR_REG);
- isp1760_writel(0, hcd->regs + HC_ISO_IRQ_MASK_AND_REG);
- isp1760_writel(0xffffffff, hcd->regs + HC_ISO_IRQ_MASK_OR_REG);
- /* step 23 passed */
+ return ((base - 0x400) >> 3);
}
-static int isp1760_run(struct usb_hcd *hcd)
+static int last_qtd_of_urb(struct isp1760_qtd *qtd, struct isp1760_qh *qh)
{
- struct isp1760_hcd *priv = hcd_to_priv(hcd);
- int retval;
- u32 temp;
- u32 command;
- u32 chipid;
-
- hcd->uses_new_polling = 1;
-
- hcd->state = HC_STATE_RUNNING;
- isp1760_enable_interrupts(hcd);
- temp = isp1760_readl(hcd->regs + HC_HW_MODE_CTRL);
- isp1760_writel(temp | HW_GLOBAL_INTR_EN, hcd->regs + HC_HW_MODE_CTRL);
-
- command = isp1760_readl(hcd->regs + HC_USBCMD);
- command &= ~(CMD_LRESET|CMD_RESET);
- command |= CMD_RUN;
- isp1760_writel(command, hcd->regs + HC_USBCMD);
-
- retval = handshake(priv, hcd->regs + HC_USBCMD, CMD_RUN, CMD_RUN,
- 250 * 1000);
- if (retval)
- return retval;
-
- /*
- * XXX
- * Spec says to write FLAG_CF as last config action, priv code grabs
- * the semaphore while doing so.
- */
- down_write(&ehci_cf_port_reset_rwsem);
- isp1760_writel(FLAG_CF, hcd->regs + HC_CONFIGFLAG);
-
- retval = handshake(priv, hcd->regs + HC_CONFIGFLAG, FLAG_CF, FLAG_CF,
- 250 * 1000);
- up_write(&ehci_cf_port_reset_rwsem);
- if (retval)
- return retval;
-
- chipid = isp1760_readl(hcd->regs + HC_CHIP_ID_REG);
- isp1760_info(priv, "USB ISP %04x HW rev. %d started\n", chipid & 0xffff,
- chipid >> 16);
+ struct urb *urb;
- /* PTD Register Init Part 2, Step 28 */
- /* enable INTs */
- isp1760_init_maps(hcd);
+ if (list_is_last(&qtd->qtd_list, &qh->qtd_list))
+ return 1;
- /* GRR this is run-once init(), being done every time the HC starts.
- * So long as they're part of class devices, we can't do it init()
- * since the class device isn't created that early.
- */
- return 0;
+ urb = qtd->urb;
+ qtd = list_entry(qtd->qtd_list.next, typeof(*qtd), qtd_list);
+ return (qtd->urb != urb);
}
-static u32 base_to_chip(u32 base)
-{
- return ((base - 0x400) >> 3);
-}
+/* magic numbers that can affect system performance */
+#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
+#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
+#define EHCI_TUNE_RL_TT 0
+#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
+#define EHCI_TUNE_MULT_TT 1
+#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
-static void transform_into_atl(struct isp1760_hcd *priv, struct isp1760_qh *qh,
- struct isp1760_qtd *qtd, struct urb *urb,
- u32 payload, struct ptd *ptd)
+static void create_ptd_atl(struct isp1760_qh *qh,
+ struct isp1760_qtd *qtd, struct ptd *ptd)
{
- u32 dw0;
- u32 dw1;
- u32 dw2;
- u32 dw3;
u32 maxpacket;
u32 multi;
- u32 pid_code;
u32 rl = RL_COUNTER;
u32 nak = NAK_COUNTER;
+ memset(ptd, 0, sizeof(*ptd));
+
/* according to 3.6.2, max packet len can not be > 0x400 */
- maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
+ maxpacket = usb_maxpacket(qtd->urb->dev, qtd->urb->pipe,
+ usb_pipeout(qtd->urb->pipe));
multi = 1 + ((maxpacket >> 11) & 0x3);
maxpacket &= 0x7ff;
/* DW0 */
- dw0 = PTD_VALID;
- dw0 |= PTD_LENGTH(qtd->length);
- dw0 |= PTD_MAXPACKET(maxpacket);
- dw0 |= PTD_ENDPOINT(usb_pipeendpoint(urb->pipe));
- dw1 = usb_pipeendpoint(urb->pipe) >> 1;
+ ptd->dw0 = DW0_VALID_BIT;
+ ptd->dw0 |= TO_DW0_LENGTH(qtd->length);
+ ptd->dw0 |= TO_DW0_MAXPACKET(maxpacket);
+ ptd->dw0 |= TO_DW0_ENDPOINT(usb_pipeendpoint(qtd->urb->pipe));
/* DW1 */
- dw1 |= PTD_DEVICE_ADDR(usb_pipedevice(urb->pipe));
+ ptd->dw1 = usb_pipeendpoint(qtd->urb->pipe) >> 1;
+ ptd->dw1 |= TO_DW1_DEVICE_ADDR(usb_pipedevice(qtd->urb->pipe));
+ ptd->dw1 |= TO_DW1_PID_TOKEN(qtd->packet_type);
- pid_code = qtd->packet_type;
- dw1 |= PTD_PID_TOKEN(pid_code);
+ if (usb_pipebulk(qtd->urb->pipe))
+ ptd->dw1 |= DW1_TRANS_BULK;
+ else if (usb_pipeint(qtd->urb->pipe))
+ ptd->dw1 |= DW1_TRANS_INT;
- if (usb_pipebulk(urb->pipe))
- dw1 |= PTD_TRANS_BULK;
- else if (usb_pipeint(urb->pipe))
- dw1 |= PTD_TRANS_INT;
-
- if (urb->dev->speed != USB_SPEED_HIGH) {
+ if (qtd->urb->dev->speed != USB_SPEED_HIGH) {
/* split transaction */
- dw1 |= PTD_TRANS_SPLIT;
- if (urb->dev->speed == USB_SPEED_LOW)
- dw1 |= PTD_SE_USB_LOSPEED;
+ ptd->dw1 |= DW1_TRANS_SPLIT;
+ if (qtd->urb->dev->speed == USB_SPEED_LOW)
+ ptd->dw1 |= DW1_SE_USB_LOSPEED;
- dw1 |= PTD_PORT_NUM(urb->dev->ttport);
- dw1 |= PTD_HUB_NUM(urb->dev->tt->hub->devnum);
+ ptd->dw1 |= TO_DW1_PORT_NUM(qtd->urb->dev->ttport);
+ ptd->dw1 |= TO_DW1_HUB_NUM(qtd->urb->dev->tt->hub->devnum);
/* SE bit for Split INT transfers */
- if (usb_pipeint(urb->pipe) &&
- (urb->dev->speed == USB_SPEED_LOW))
- dw1 |= 2 << 16;
+ if (usb_pipeint(qtd->urb->pipe) &&
+ (qtd->urb->dev->speed == USB_SPEED_LOW))
+ ptd->dw1 |= 2 << 16;
- dw3 = 0;
rl = 0;
nak = 0;
} else {
- dw0 |= PTD_MULTI(multi);
- if (usb_pipecontrol(urb->pipe) || usb_pipebulk(urb->pipe))
- dw3 = qh->ping;
- else
- dw3 = 0;
+ ptd->dw0 |= TO_DW0_MULTI(multi);
+ if (usb_pipecontrol(qtd->urb->pipe) ||
+ usb_pipebulk(qtd->urb->pipe))
+ ptd->dw3 |= TO_DW3_PING(qh->ping);
}
/* DW2 */
- dw2 = 0;
- dw2 |= PTD_DATA_START_ADDR(base_to_chip(payload));
- dw2 |= PTD_RL_CNT(rl);
- dw3 |= PTD_NAC_CNT(nak);
+ ptd->dw2 = 0;
+ ptd->dw2 |= TO_DW2_DATA_START_ADDR(base_to_chip(qtd->payload_addr));
+ ptd->dw2 |= TO_DW2_RL(rl);
/* DW3 */
- if (usb_pipecontrol(urb->pipe))
- dw3 |= PTD_DATA_TOGGLE(qtd->toggle);
- else
- dw3 |= qh->toggle;
-
+ ptd->dw3 |= TO_DW3_NAKCOUNT(nak);
+ ptd->dw3 |= TO_DW3_DATA_TOGGLE(qh->toggle);
+ if (usb_pipecontrol(qtd->urb->pipe)) {
+ if (qtd->data_buffer == qtd->urb->setup_packet)
+ ptd->dw3 &= ~TO_DW3_DATA_TOGGLE(1);
+ else if (last_qtd_of_urb(qtd, qh))
+ ptd->dw3 |= TO_DW3_DATA_TOGGLE(1);
+ }
- dw3 |= PTD_ACTIVE;
+ ptd->dw3 |= DW3_ACTIVE_BIT;
/* Cerr */
- dw3 |= PTD_CERR(ERR_COUNTER);
-
- memset(ptd, 0, sizeof(*ptd));
-
- ptd->dw0 = cpu_to_le32(dw0);
- ptd->dw1 = cpu_to_le32(dw1);
- ptd->dw2 = cpu_to_le32(dw2);
- ptd->dw3 = cpu_to_le32(dw3);
+ ptd->dw3 |= TO_DW3_CERR(ERR_COUNTER);
}
-static void transform_add_int(struct isp1760_hcd *priv, struct isp1760_qh *qh,
- struct isp1760_qtd *qtd, struct urb *urb,
- u32 payload, struct ptd *ptd)
+static void transform_add_int(struct isp1760_qh *qh,
+ struct isp1760_qtd *qtd, struct ptd *ptd)
{
- u32 maxpacket;
- u32 multi;
- u32 numberofusofs;
- u32 i;
- u32 usofmask, usof;
+ u32 usof;
u32 period;
- maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
- multi = 1 + ((maxpacket >> 11) & 0x3);
- maxpacket &= 0x7ff;
- /* length of the data per uframe */
- maxpacket = multi * maxpacket;
-
- numberofusofs = urb->transfer_buffer_length / maxpacket;
- if (urb->transfer_buffer_length % maxpacket)
- numberofusofs += 1;
-
- usofmask = 1;
- usof = 0;
- for (i = 0; i < numberofusofs; i++) {
- usof |= usofmask;
- usofmask <<= 1;
- }
-
- if (urb->dev->speed != USB_SPEED_HIGH) {
- /* split */
- ptd->dw5 = cpu_to_le32(0x1c);
+ /*
+ * Most of this is guessing. ISP1761 datasheet is quite unclear, and
+ * the algorithm from the original Philips driver code, which was
+ * pretty much used in this driver before as well, is quite horrendous
+ * and, i believe, incorrect. The code below follows the datasheet and
+ * USB2.0 spec as far as I can tell, and plug/unplug seems to be much
+ * more reliable this way (fingers crossed...).
+ */
- if (qh->period >= 32)
- period = qh->period / 2;
+ if (qtd->urb->dev->speed == USB_SPEED_HIGH) {
+ /* urb->interval is in units of microframes (1/8 ms) */
+ period = qtd->urb->interval >> 3;
+
+ if (qtd->urb->interval > 4)
+ usof = 0x01; /* One bit set =>
+ interval 1 ms * uFrame-match */
+ else if (qtd->urb->interval > 2)
+ usof = 0x22; /* Two bits set => interval 1/2 ms */
+ else if (qtd->urb->interval > 1)
+ usof = 0x55; /* Four bits set => interval 1/4 ms */
else
- period = qh->period;
-
+ usof = 0xff; /* All bits set => interval 1/8 ms */
} else {
+ /* urb->interval is in units of frames (1 ms) */
+ period = qtd->urb->interval;
+ usof = 0x0f; /* Execute Start Split on any of the
+ four first uFrames */
- if (qh->period >= 8)
- period = qh->period/8;
- else
- period = qh->period;
-
- if (period >= 32)
- period = 16;
-
- if (qh->period >= 8) {
- /* millisecond period */
- period = (period << 3);
- } else {
- /* usof based tranmsfers */
- /* minimum 4 usofs */
- usof = 0x11;
- }
+ /*
+ * First 8 bits in dw5 is uSCS and "specifies which uSOF the
+ * complete split needs to be sent. Valid only for IN." Also,
+ * "All bits can be set to one for every transfer." (p 82,
+ * ISP1761 data sheet.) 0x1c is from Philips driver. Where did
+ * that number come from? 0xff seems to work fine...
+ */
+ /* ptd->dw5 = 0x1c; */
+ ptd->dw5 = 0xff; /* Execute Complete Split on any uFrame */
}
- ptd->dw2 |= cpu_to_le32(period);
- ptd->dw4 = cpu_to_le32(usof);
-}
+ period = period >> 1;/* Ensure equal or shorter period than requested */
+ period &= 0xf8; /* Mask off too large values and lowest unused 3 bits */
-static void transform_into_int(struct isp1760_hcd *priv, struct isp1760_qh *qh,
- struct isp1760_qtd *qtd, struct urb *urb,
- u32 payload, struct ptd *ptd)
-{
- transform_into_atl(priv, qh, qtd, urb, payload, ptd);
- transform_add_int(priv, qh, qtd, urb, payload, ptd);
+ ptd->dw2 |= period;
+ ptd->dw4 = usof;
}
-static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len,
- u32 token)
+static void create_ptd_int(struct isp1760_qh *qh,
+ struct isp1760_qtd *qtd, struct ptd *ptd)
{
- int count;
-
- qtd->data_buffer = databuffer;
- qtd->packet_type = GET_QTD_TOKEN_TYPE(token);
- qtd->toggle = GET_DATA_TOGGLE(token);
-
- if (len > HC_ATL_PL_SIZE)
- count = HC_ATL_PL_SIZE;
- else
- count = len;
-
- qtd->length = count;
- return count;
+ create_ptd_atl(qh, qtd, ptd);
+ transform_add_int(qh, qtd, ptd);
}
-static int check_error(struct ptd *ptd)
+static void isp1760_urb_done(struct usb_hcd *hcd, struct urb *urb)
+__releases(priv->lock)
+__acquires(priv->lock)
{
- int error = 0;
- u32 dw3;
-
- dw3 = le32_to_cpu(ptd->dw3);
- if (dw3 & DW3_HALT_BIT) {
- error = -EPIPE;
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
- if (dw3 & DW3_ERROR_BIT)
- pr_err("error bit is set in DW3\n");
+ if (!urb->unlinked) {
+ if (urb->status == -EINPROGRESS)
+ urb->status = 0;
}
- if (dw3 & DW3_QTD_ACTIVE) {
- printk(KERN_ERR "transfer active bit is set DW3\n");
- printk(KERN_ERR "nak counter: %d, rl: %d\n", (dw3 >> 19) & 0xf,
- (le32_to_cpu(ptd->dw2) >> 25) & 0xf);
+ if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
+ void *ptr;
+ for (ptr = urb->transfer_buffer;
+ ptr < urb->transfer_buffer + urb->transfer_buffer_length;
+ ptr += PAGE_SIZE)
+ flush_dcache_page(virt_to_page(ptr));
}
- return error;
+ /* complete() can reenter this HCD */
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock(&priv->lock);
+ usb_hcd_giveback_urb(hcd, urb, urb->status);
+ spin_lock(&priv->lock);
}
-static void check_int_err_status(u32 dw4)
+static struct isp1760_qtd *qtd_alloc(gfp_t flags, struct urb *urb,
+ u8 packet_type)
{
- u32 i;
-
- dw4 >>= 8;
+ struct isp1760_qtd *qtd;
- for (i = 0; i < 8; i++) {
- switch (dw4 & 0x7) {
- case INT_UNDERRUN:
- printk(KERN_ERR "ERROR: under run , %d\n", i);
- break;
+ qtd = kmem_cache_zalloc(qtd_cachep, flags);
+ if (!qtd)
+ return NULL;
- case INT_EXACT:
- printk(KERN_ERR "ERROR: transaction error, %d\n", i);
- break;
+ INIT_LIST_HEAD(&qtd->qtd_list);
+ qtd->urb = urb;
+ qtd->packet_type = packet_type;
+ qtd->status = QTD_ENQUEUED;
+ qtd->actual_length = 0;
- case INT_BABBLE:
- printk(KERN_ERR "ERROR: babble error, %d\n", i);
- break;
- }
- dw4 >>= 3;
- }
+ return qtd;
}
-static void enqueue_one_qtd(struct isp1760_qtd *qtd, struct isp1760_hcd *priv,
- u32 payload)
+static void qtd_free(struct isp1760_qtd *qtd)
{
- u32 token;
- struct usb_hcd *hcd = priv_to_hcd(priv);
-
- token = qtd->packet_type;
-
- if (qtd->length && (qtd->length <= HC_ATL_PL_SIZE)) {
- switch (token) {
- case IN_PID:
- break;
- case OUT_PID:
- case SETUP_PID:
- priv_write_copy(priv, qtd->data_buffer,
- hcd->regs + payload,
- qtd->length);
- }
- }
+ WARN_ON(qtd->payload_addr);
+ kmem_cache_free(qtd_cachep, qtd);
}
-static void enqueue_one_atl_qtd(u32 atl_regs, u32 payload,
- struct isp1760_hcd *priv, struct isp1760_qh *qh,
- struct urb *urb, u32 slot, struct isp1760_qtd *qtd)
+static void start_bus_transfer(struct usb_hcd *hcd, u32 ptd_offset, int slot,
+ struct slotinfo *slots, struct isp1760_qtd *qtd,
+ struct isp1760_qh *qh, struct ptd *ptd)
{
- struct ptd ptd;
- struct usb_hcd *hcd = priv_to_hcd(priv);
-
- transform_into_atl(priv, qh, qtd, urb, payload, &ptd);
- priv_write_copy(priv, (u32 *)&ptd, hcd->regs + atl_regs, sizeof(ptd));
- enqueue_one_qtd(qtd, priv, payload);
-
- priv->atl_ints[slot].urb = urb;
- priv->atl_ints[slot].qh = qh;
- priv->atl_ints[slot].qtd = qtd;
- priv->atl_ints[slot].data_buffer = qtd->data_buffer;
- priv->atl_ints[slot].payload = payload;
- qtd->status |= URB_ENQUEUED | URB_TYPE_ATL;
- qtd->status |= slot << 16;
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ int skip_map;
+
+ WARN_ON((slot < 0) || (slot > 31));
+ WARN_ON(qtd->length && !qtd->payload_addr);
+ WARN_ON(slots[slot].qtd);
+ WARN_ON(slots[slot].qh);
+ WARN_ON(qtd->status != QTD_PAYLOAD_ALLOC);
+
+ /* Make sure done map has not triggered from some unlinked transfer */
+ if (ptd_offset == ATL_PTD_OFFSET) {
+ priv->atl_done_map |= reg_read32(hcd->regs,
+ HC_ATL_PTD_DONEMAP_REG);
+ priv->atl_done_map &= ~(1 << slot);
+ } else {
+ priv->int_done_map |= reg_read32(hcd->regs,
+ HC_INT_PTD_DONEMAP_REG);
+ priv->int_done_map &= ~(1 << slot);
+ }
+
+ qh->slot = slot;
+ qtd->status = QTD_XFER_STARTED;
+ slots[slot].timestamp = jiffies;
+ slots[slot].qtd = qtd;
+ slots[slot].qh = qh;
+ ptd_write(hcd->regs, ptd_offset, slot, ptd);
+
+ if (ptd_offset == ATL_PTD_OFFSET) {
+ skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
+ skip_map &= ~(1 << qh->slot);
+ reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
+ } else {
+ skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
+ skip_map &= ~(1 << qh->slot);
+ reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
+ }
}
-static void enqueue_one_int_qtd(u32 int_regs, u32 payload,
- struct isp1760_hcd *priv, struct isp1760_qh *qh,
- struct urb *urb, u32 slot, struct isp1760_qtd *qtd)
+static int is_short_bulk(struct isp1760_qtd *qtd)
{
- struct ptd ptd;
- struct usb_hcd *hcd = priv_to_hcd(priv);
-
- transform_into_int(priv, qh, qtd, urb, payload, &ptd);
- priv_write_copy(priv, (u32 *)&ptd, hcd->regs + int_regs, sizeof(ptd));
- enqueue_one_qtd(qtd, priv, payload);
-
- priv->int_ints[slot].urb = urb;
- priv->int_ints[slot].qh = qh;
- priv->int_ints[slot].qtd = qtd;
- priv->int_ints[slot].data_buffer = qtd->data_buffer;
- priv->int_ints[slot].payload = payload;
- qtd->status |= URB_ENQUEUED | URB_TYPE_INT;
- qtd->status |= slot << 16;
+ return (usb_pipebulk(qtd->urb->pipe) &&
+ (qtd->actual_length < qtd->length));
}
-static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
- struct isp1760_qtd *qtd)
+static void collect_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh,
+ struct list_head *urb_list)
{
- struct isp1760_hcd *priv = hcd_to_priv(hcd);
- u32 skip_map, or_map;
- u32 queue_entry;
- u32 slot;
- u32 atl_regs, payload;
- u32 buffstatus;
-
- /*
- * When this function is called from the interrupt handler to enqueue
- * a follow-up packet, the SKIP register gets written and read back
- * almost immediately. With ISP1761, this register requires a delay of
- * 195ns between a write and subsequent read (see section 15.1.1.3).
- */
- mmiowb();
- ndelay(195);
- skip_map = isp1760_readl(hcd->regs + HC_ATL_PTD_SKIPMAP_REG);
-
- BUG_ON(!skip_map);
- slot = __ffs(skip_map);
- queue_entry = 1 << slot;
+ int last_qtd;
+ struct isp1760_qtd *qtd, *qtd_next;
+ struct urb_listitem *urb_listitem;
- atl_regs = ATL_REGS_OFFSET + slot * sizeof(struct ptd);
-
- payload = alloc_mem(priv, qtd->length);
+ list_for_each_entry_safe(qtd, qtd_next, &qh->qtd_list, qtd_list) {
+ if (qtd->status < QTD_XFER_COMPLETE)
+ break;
- enqueue_one_atl_qtd(atl_regs, payload, priv, qh, qtd->urb, slot, qtd);
+ last_qtd = last_qtd_of_urb(qtd, qh);
+
+ if ((!last_qtd) && (qtd->status == QTD_RETIRE))
+ qtd_next->status = QTD_RETIRE;
+
+ if (qtd->status == QTD_XFER_COMPLETE) {
+ if (qtd->actual_length) {
+ switch (qtd->packet_type) {
+ case IN_PID:
+ mem_reads8(hcd->regs, qtd->payload_addr,
+ qtd->data_buffer,
+ qtd->actual_length);
+ /* Fall through (?) */
+ case OUT_PID:
+ qtd->urb->actual_length +=
+ qtd->actual_length;
+ /* Fall through ... */
+ case SETUP_PID:
+ break;
+ }
+ }
- or_map = isp1760_readl(hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
- or_map |= queue_entry;
- isp1760_writel(or_map, hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
+ if (is_short_bulk(qtd)) {
+ if (qtd->urb->transfer_flags & URB_SHORT_NOT_OK)
+ qtd->urb->status = -EREMOTEIO;
+ if (!last_qtd)
+ qtd_next->status = QTD_RETIRE;
+ }
+ }
- skip_map &= ~queue_entry;
- isp1760_writel(skip_map, hcd->regs + HC_ATL_PTD_SKIPMAP_REG);
+ if (qtd->payload_addr)
+ free_mem(hcd, qtd);
+
+ if (last_qtd) {
+ if ((qtd->status == QTD_RETIRE) &&
+ (qtd->urb->status == -EINPROGRESS))
+ qtd->urb->status = -EPIPE;
+ /* Defer calling of urb_done() since it releases lock */
+ urb_listitem = kmem_cache_zalloc(urb_listitem_cachep,
+ GFP_ATOMIC);
+ if (unlikely(!urb_listitem))
+ break; /* Try again on next call */
+ urb_listitem->urb = qtd->urb;
+ list_add_tail(&urb_listitem->urb_list, urb_list);
+ }
- buffstatus = isp1760_readl(hcd->regs + HC_BUFFER_STATUS_REG);
- buffstatus |= ATL_BUFFER;
- isp1760_writel(buffstatus, hcd->regs + HC_BUFFER_STATUS_REG);
+ list_del(&qtd->qtd_list);
+ qtd_free(qtd);
+ }
}
-static void enqueue_an_INT_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
- struct isp1760_qtd *qtd)
+#define ENQUEUE_DEPTH 2
+static void enqueue_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
- u32 skip_map, or_map;
- u32 queue_entry;
- u32 slot;
- u32 int_regs, payload;
- u32 buffstatus;
-
- /*
- * When this function is called from the interrupt handler to enqueue
- * a follow-up packet, the SKIP register gets written and read back
- * almost immediately. With ISP1761, this register requires a delay of
- * 195ns between a write and subsequent read (see section 15.1.1.3).
- */
- mmiowb();
- ndelay(195);
- skip_map = isp1760_readl(hcd->regs + HC_INT_PTD_SKIPMAP_REG);
-
- BUG_ON(!skip_map);
- slot = __ffs(skip_map);
- queue_entry = 1 << slot;
-
- int_regs = INT_REGS_OFFSET + slot * sizeof(struct ptd);
-
- payload = alloc_mem(priv, qtd->length);
-
- enqueue_one_int_qtd(int_regs, payload, priv, qh, qtd->urb, slot, qtd);
-
- or_map = isp1760_readl(hcd->regs + HC_INT_IRQ_MASK_OR_REG);
- or_map |= queue_entry;
- isp1760_writel(or_map, hcd->regs + HC_INT_IRQ_MASK_OR_REG);
+ int ptd_offset;
+ struct slotinfo *slots;
+ int curr_slot, free_slot;
+ int n;
+ struct ptd ptd;
+ struct isp1760_qtd *qtd;
- skip_map &= ~queue_entry;
- isp1760_writel(skip_map, hcd->regs + HC_INT_PTD_SKIPMAP_REG);
+ if (unlikely(list_empty(&qh->qtd_list))) {
+ WARN_ON(1);
+ return;
+ }
- buffstatus = isp1760_readl(hcd->regs + HC_BUFFER_STATUS_REG);
- buffstatus |= INT_BUFFER;
- isp1760_writel(buffstatus, hcd->regs + HC_BUFFER_STATUS_REG);
-}
+ /* Make sure this endpoint's TT buffer is clean before queueing ptds */
+ if (qh->tt_buffer_dirty)
+ return;
-static void isp1760_urb_done(struct isp1760_hcd *priv, struct urb *urb, int status)
-__releases(priv->lock)
-__acquires(priv->lock)
-{
- if (!urb->unlinked) {
- if (status == -EINPROGRESS)
- status = 0;
+ if (usb_pipeint(list_entry(qh->qtd_list.next, struct isp1760_qtd,
+ qtd_list)->urb->pipe)) {
+ ptd_offset = INT_PTD_OFFSET;
+ slots = priv->int_slots;
+ } else {
+ ptd_offset = ATL_PTD_OFFSET;
+ slots = priv->atl_slots;
}
- if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
- void *ptr;
- for (ptr = urb->transfer_buffer;
- ptr < urb->transfer_buffer + urb->transfer_buffer_length;
- ptr += PAGE_SIZE)
- flush_dcache_page(virt_to_page(ptr));
+ free_slot = -1;
+ for (curr_slot = 0; curr_slot < 32; curr_slot++) {
+ if ((free_slot == -1) && (slots[curr_slot].qtd == NULL))
+ free_slot = curr_slot;
+ if (slots[curr_slot].qh == qh)
+ break;
}
- /* complete() can reenter this HCD */
- usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb);
- spin_unlock(&priv->lock);
- usb_hcd_giveback_urb(priv_to_hcd(priv), urb, status);
- spin_lock(&priv->lock);
-}
-
-static void isp1760_qtd_free(struct isp1760_qtd *qtd)
-{
- kmem_cache_free(qtd_cachep, qtd);
-}
+ n = 0;
+ list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
+ if (qtd->status == QTD_ENQUEUED) {
+ WARN_ON(qtd->payload_addr);
+ alloc_mem(hcd, qtd);
+ if ((qtd->length) && (!qtd->payload_addr))
+ break;
-static struct isp1760_qtd *clean_this_qtd(struct isp1760_qtd *qtd)
-{
- struct isp1760_qtd *tmp_qtd;
+ if ((qtd->length) &&
+ ((qtd->packet_type == SETUP_PID) ||
+ (qtd->packet_type == OUT_PID))) {
+ mem_writes8(hcd->regs, qtd->payload_addr,
+ qtd->data_buffer, qtd->length);
+ }
- tmp_qtd = qtd->hw_next;
- list_del(&qtd->qtd_list);
- isp1760_qtd_free(qtd);
- return tmp_qtd;
-}
+ qtd->status = QTD_PAYLOAD_ALLOC;
+ }
+ if (qtd->status == QTD_PAYLOAD_ALLOC) {
/*
- * Remove this QTD from the QH list and free its memory. If this QTD
- * isn't the last one than remove also his successor(s).
- * Returns the QTD which is part of an new URB and should be enqueued.
- */
-static struct isp1760_qtd *clean_up_qtdlist(struct isp1760_qtd *qtd)
-{
- struct isp1760_qtd *tmp_qtd;
- int last_one;
-
- do {
- tmp_qtd = qtd->hw_next;
- last_one = qtd->status & URB_COMPLETE_NOTIFY;
- list_del(&qtd->qtd_list);
- isp1760_qtd_free(qtd);
- qtd = tmp_qtd;
- } while (!last_one && qtd);
+ if ((curr_slot > 31) && (free_slot == -1))
+ dev_dbg(hcd->self.controller, "%s: No slot "
+ "available for transfer\n", __func__);
+*/
+ /* Start xfer for this endpoint if not already done */
+ if ((curr_slot > 31) && (free_slot > -1)) {
+ if (usb_pipeint(qtd->urb->pipe))
+ create_ptd_int(qh, qtd, &ptd);
+ else
+ create_ptd_atl(qh, qtd, &ptd);
+
+ start_bus_transfer(hcd, ptd_offset, free_slot,
+ slots, qtd, qh, &ptd);
+ curr_slot = free_slot;
+ }
- return qtd;
+ n++;
+ if (n >= ENQUEUE_DEPTH)
+ break;
+ }
+ }
}
-static void do_atl_int(struct usb_hcd *usb_hcd)
+static void schedule_ptds(struct usb_hcd *hcd)
{
- struct isp1760_hcd *priv = hcd_to_priv(usb_hcd);
- u32 done_map, skip_map;
- struct ptd ptd;
- struct urb *urb = NULL;
- u32 atl_regs_base;
- u32 atl_regs;
- u32 queue_entry;
- u32 payload;
- u32 length;
- u32 or_map;
- u32 status = -EINVAL;
- int error;
- struct isp1760_qtd *qtd;
- struct isp1760_qh *qh;
- u32 rl;
- u32 nakcount;
-
- done_map = isp1760_readl(usb_hcd->regs +
- HC_ATL_PTD_DONEMAP_REG);
- skip_map = isp1760_readl(usb_hcd->regs +
- HC_ATL_PTD_SKIPMAP_REG);
-
- or_map = isp1760_readl(usb_hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
- or_map &= ~done_map;
- isp1760_writel(or_map, usb_hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
-
- atl_regs_base = ATL_REGS_OFFSET;
- while (done_map) {
- u32 dw1;
- u32 dw2;
- u32 dw3;
-
- status = 0;
-
- queue_entry = __ffs(done_map);
- done_map &= ~(1 << queue_entry);
- skip_map |= 1 << queue_entry;
+ struct isp1760_hcd *priv;
+ struct isp1760_qh *qh, *qh_next;
+ struct list_head *ep_queue;
+ LIST_HEAD(urb_list);
+ struct urb_listitem *urb_listitem, *urb_listitem_next;
+ int i;
- atl_regs = atl_regs_base + queue_entry * sizeof(struct ptd);
+ if (!hcd) {
+ WARN_ON(1);
+ return;
+ }
- urb = priv->atl_ints[queue_entry].urb;
- qtd = priv->atl_ints[queue_entry].qtd;
- qh = priv->atl_ints[queue_entry].qh;
- payload = priv->atl_ints[queue_entry].payload;
+ priv = hcd_to_priv(hcd);
- if (!qh) {
- printk(KERN_ERR "qh is 0\n");
- continue;
+ /*
+ * check finished/retired xfers, transfer payloads, call urb_done()
+ */
+ for (i = 0; i < QH_END; i++) {
+ ep_queue = &priv->qh_list[i];
+ list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list) {
+ collect_qtds(hcd, qh, &urb_list);
+ if (list_empty(&qh->qtd_list))
+ list_del(&qh->qh_list);
}
- isp1760_writel(atl_regs + ISP_BANK(0), usb_hcd->regs +
- HC_MEMORY_REG);
- isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
- HC_MEMORY_REG);
- /*
- * write bank1 address twice to ensure the 90ns delay (time
- * between BANK0 write and the priv_read_copy() call is at
- * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 109ns)
- */
- isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
- HC_MEMORY_REG);
-
- priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs +
- ISP_BANK(0), sizeof(ptd));
-
- dw1 = le32_to_cpu(ptd.dw1);
- dw2 = le32_to_cpu(ptd.dw2);
- dw3 = le32_to_cpu(ptd.dw3);
- rl = (dw2 >> 25) & 0x0f;
- nakcount = (dw3 >> 19) & 0xf;
-
- /* Transfer Error, *but* active and no HALT -> reload */
- if ((dw3 & DW3_ERROR_BIT) && (dw3 & DW3_QTD_ACTIVE) &&
- !(dw3 & DW3_HALT_BIT)) {
-
- /* according to ppriv code, we have to
- * reload this one if trasfered bytes != requested bytes
- * else act like everything went smooth..
- * XXX This just doesn't feel right and hasn't
- * triggered so far.
- */
+ }
- length = PTD_XFERRED_LENGTH(dw3);
- printk(KERN_ERR "Should reload now.... transfered %d "
- "of %zu\n", length, qtd->length);
- BUG();
- }
+ list_for_each_entry_safe(urb_listitem, urb_listitem_next, &urb_list,
+ urb_list) {
+ isp1760_urb_done(hcd, urb_listitem->urb);
+ kmem_cache_free(urb_listitem_cachep, urb_listitem);
+ }
- if (!nakcount && (dw3 & DW3_QTD_ACTIVE)) {
- u32 buffstatus;
+ /*
+ * Schedule packets for transfer.
+ *
+ * According to USB2.0 specification:
+ *
+ * 1st prio: interrupt xfers, up to 80 % of bandwidth
+ * 2nd prio: control xfers
+ * 3rd prio: bulk xfers
+ *
+ * ... but let's use a simpler scheme here (mostly because ISP1761 doc
+ * is very unclear on how to prioritize traffic):
+ *
+ * 1) Enqueue any queued control transfers, as long as payload chip mem
+ * and PTD ATL slots are available.
+ * 2) Enqueue any queued INT transfers, as long as payload chip mem
+ * and PTD INT slots are available.
+ * 3) Enqueue any queued bulk transfers, as long as payload chip mem
+ * and PTD ATL slots are available.
+ *
+ * Use double buffering (ENQUEUE_DEPTH==2) as a compromise between
+ * conservation of chip mem and performance.
+ *
+ * I'm sure this scheme could be improved upon!
+ */
+ for (i = 0; i < QH_END; i++) {
+ ep_queue = &priv->qh_list[i];
+ list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list)
+ enqueue_qtds(hcd, qh);
+ }
+}
- /*
- * NAKs are handled in HW by the chip. Usually if the
- * device is not able to send data fast enough.
- * This happens mostly on slower hardware.
- */
- printk(KERN_NOTICE "Reloading ptd %p/%p... qh %p read: "
- "%d of %zu done: %08x cur: %08x\n", qtd,
- urb, qh, PTD_XFERRED_LENGTH(dw3),
- qtd->length, done_map,
- (1 << queue_entry));
+#define PTD_STATE_QTD_DONE 1
+#define PTD_STATE_QTD_RELOAD 2
+#define PTD_STATE_URB_RETIRE 3
- /* RL counter = ERR counter */
- dw3 &= ~(0xf << 19);
- dw3 |= rl << 19;
- dw3 &= ~(3 << (55 - 32));
- dw3 |= ERR_COUNTER << (55 - 32);
-
- /*
- * It is not needed to write skip map back because it
- * is unchanged. Just make sure that this entry is
- * unskipped once it gets written to the HW.
- */
- skip_map &= ~(1 << queue_entry);
- or_map = isp1760_readl(usb_hcd->regs +
- HC_ATL_IRQ_MASK_OR_REG);
- or_map |= 1 << queue_entry;
- isp1760_writel(or_map, usb_hcd->regs +
- HC_ATL_IRQ_MASK_OR_REG);
-
- ptd.dw3 = cpu_to_le32(dw3);
- priv_write_copy(priv, (u32 *)&ptd, usb_hcd->regs +
- atl_regs, sizeof(ptd));
-
- ptd.dw0 |= cpu_to_le32(PTD_VALID);
- priv_write_copy(priv, (u32 *)&ptd, usb_hcd->regs +
- atl_regs, sizeof(ptd));
-
- buffstatus = isp1760_readl(usb_hcd->regs +
- HC_BUFFER_STATUS_REG);
- buffstatus |= ATL_BUFFER;
- isp1760_writel(buffstatus, usb_hcd->regs +
- HC_BUFFER_STATUS_REG);
- continue;
- }
+static int check_int_transfer(struct usb_hcd *hcd, struct ptd *ptd,
+ struct urb *urb)
+{
+ __dw dw4;
+ int i;
- error = check_error(&ptd);
- if (error) {
- status = error;
- priv->atl_ints[queue_entry].qh->toggle = 0;
- priv->atl_ints[queue_entry].qh->ping = 0;
- urb->status = -EPIPE;
-
-#if 0
- printk(KERN_ERR "Error in %s().\n", __func__);
- printk(KERN_ERR "IN dw0: %08x dw1: %08x dw2: %08x "
- "dw3: %08x dw4: %08x dw5: %08x dw6: "
- "%08x dw7: %08x\n",
- ptd.dw0, ptd.dw1, ptd.dw2, ptd.dw3,
- ptd.dw4, ptd.dw5, ptd.dw6, ptd.dw7);
-#endif
- } else {
- if (usb_pipetype(urb->pipe) == PIPE_BULK) {
- priv->atl_ints[queue_entry].qh->toggle = dw3 &
- (1 << 25);
- priv->atl_ints[queue_entry].qh->ping = dw3 &
- (1 << 26);
- }
- }
+ dw4 = ptd->dw4;
+ dw4 >>= 8;
- length = PTD_XFERRED_LENGTH(dw3);
- if (length) {
- switch (DW1_GET_PID(dw1)) {
- case IN_PID:
- priv_read_copy(priv,
- priv->atl_ints[queue_entry].data_buffer,
- usb_hcd->regs + payload + ISP_BANK(1),
- length);
+ /* FIXME: ISP1761 datasheet does not say what to do with these. Do we
+ need to handle these errors? Is it done in hardware? */
- case OUT_PID:
+ if (ptd->dw3 & DW3_HALT_BIT) {
- urb->actual_length += length;
+ urb->status = -EPROTO; /* Default unknown error */
- case SETUP_PID:
+ for (i = 0; i < 8; i++) {
+ switch (dw4 & 0x7) {
+ case INT_UNDERRUN:
+ dev_dbg(hcd->self.controller, "%s: underrun "
+ "during uFrame %d\n",
+ __func__, i);
+ urb->status = -ECOMM; /* Could not write data */
+ break;
+ case INT_EXACT:
+ dev_dbg(hcd->self.controller, "%s: transaction "
+ "error during uFrame %d\n",
+ __func__, i);
+ urb->status = -EPROTO; /* timeout, bad CRC, PID
+ error etc. */
+ break;
+ case INT_BABBLE:
+ dev_dbg(hcd->self.controller, "%s: babble "
+ "error during uFrame %d\n",
+ __func__, i);
+ urb->status = -EOVERFLOW;
break;
}
+ dw4 >>= 3;
}
- priv->atl_ints[queue_entry].data_buffer = NULL;
- priv->atl_ints[queue_entry].urb = NULL;
- priv->atl_ints[queue_entry].qtd = NULL;
- priv->atl_ints[queue_entry].qh = NULL;
-
- free_mem(priv, payload);
-
- isp1760_writel(skip_map, usb_hcd->regs +
- HC_ATL_PTD_SKIPMAP_REG);
-
- if (urb->status == -EPIPE) {
- /* HALT was received */
-
- qtd = clean_up_qtdlist(qtd);
- isp1760_urb_done(priv, urb, urb->status);
-
- } else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) {
- /* short BULK received */
-
- if (urb->transfer_flags & URB_SHORT_NOT_OK) {
- urb->status = -EREMOTEIO;
- isp1760_dbg(priv, "short bulk, %d instead %zu "
- "with URB_SHORT_NOT_OK flag.\n",
- length, qtd->length);
- }
-
- if (urb->status == -EINPROGRESS)
- urb->status = 0;
-
- qtd = clean_up_qtdlist(qtd);
-
- isp1760_urb_done(priv, urb, urb->status);
-
- } else if (qtd->status & URB_COMPLETE_NOTIFY) {
- /* that was the last qtd of that URB */
-
- if (urb->status == -EINPROGRESS)
- urb->status = 0;
-
- qtd = clean_this_qtd(qtd);
- isp1760_urb_done(priv, urb, urb->status);
+ return PTD_STATE_URB_RETIRE;
+ }
- } else {
- /* next QTD of this URB */
+ return PTD_STATE_QTD_DONE;
+}
- qtd = clean_this_qtd(qtd);
- BUG_ON(!qtd);
- }
+static int check_atl_transfer(struct usb_hcd *hcd, struct ptd *ptd,
+ struct urb *urb)
+{
+ WARN_ON(!ptd);
+ if (ptd->dw3 & DW3_HALT_BIT) {
+ if (ptd->dw3 & DW3_BABBLE_BIT)
+ urb->status = -EOVERFLOW;
+ else if (FROM_DW3_CERR(ptd->dw3))
+ urb->status = -EPIPE; /* Stall */
+ else if (ptd->dw3 & DW3_ERROR_BIT)
+ urb->status = -EPROTO; /* XactErr */
+ else
+ urb->status = -EPROTO; /* Unknown */
+/*
+ dev_dbg(hcd->self.controller, "%s: ptd error:\n"
+ " dw0: %08x dw1: %08x dw2: %08x dw3: %08x\n"
+ " dw4: %08x dw5: %08x dw6: %08x dw7: %08x\n",
+ __func__,
+ ptd->dw0, ptd->dw1, ptd->dw2, ptd->dw3,
+ ptd->dw4, ptd->dw5, ptd->dw6, ptd->dw7);
+*/
+ return PTD_STATE_URB_RETIRE;
+ }
- if (qtd)
- enqueue_an_ATL_packet(usb_hcd, qh, qtd);
+ if ((ptd->dw3 & DW3_ERROR_BIT) && (ptd->dw3 & DW3_ACTIVE_BIT)) {
+ /* Transfer Error, *but* active and no HALT -> reload */
+ dev_dbg(hcd->self.controller, "PID error; reloading ptd\n");
+ return PTD_STATE_QTD_RELOAD;
+ }
- skip_map = isp1760_readl(usb_hcd->regs +
- HC_ATL_PTD_SKIPMAP_REG);
+ if (!FROM_DW3_NAKCOUNT(ptd->dw3) && (ptd->dw3 & DW3_ACTIVE_BIT)) {
+ /*
+ * NAKs are handled in HW by the chip. Usually if the
+ * device is not able to send data fast enough.
+ * This happens mostly on slower hardware.
+ */
+ return PTD_STATE_QTD_RELOAD;
}
+
+ return PTD_STATE_QTD_DONE;
}
-static void do_intl_int(struct usb_hcd *usb_hcd)
+static void handle_done_ptds(struct usb_hcd *hcd)
{
- struct isp1760_hcd *priv = hcd_to_priv(usb_hcd);
- u32 done_map, skip_map;
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
struct ptd ptd;
- struct urb *urb = NULL;
- u32 int_regs;
- u32 int_regs_base;
- u32 payload;
- u32 length;
- u32 or_map;
- int error;
- u32 queue_entry;
- struct isp1760_qtd *qtd;
struct isp1760_qh *qh;
+ int slot;
+ int state;
+ struct slotinfo *slots;
+ u32 ptd_offset;
+ struct isp1760_qtd *qtd;
+ int modified;
+ int skip_map;
+
+ skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
+ priv->int_done_map &= ~skip_map;
+ skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
+ priv->atl_done_map &= ~skip_map;
+
+ modified = priv->int_done_map || priv->atl_done_map;
+
+ while (priv->int_done_map || priv->atl_done_map) {
+ if (priv->int_done_map) {
+ /* INT ptd */
+ slot = __ffs(priv->int_done_map);
+ priv->int_done_map &= ~(1 << slot);
+ slots = priv->int_slots;
+ /* This should not trigger, and could be removed if
+ noone have any problems with it triggering: */
+ if (!slots[slot].qh) {
+ WARN_ON(1);
+ continue;
+ }
+ ptd_offset = INT_PTD_OFFSET;
+ ptd_read(hcd->regs, INT_PTD_OFFSET, slot, &ptd);
+ state = check_int_transfer(hcd, &ptd,
+ slots[slot].qtd->urb);
+ } else {
+ /* ATL ptd */
+ slot = __ffs(priv->atl_done_map);
+ priv->atl_done_map &= ~(1 << slot);
+ slots = priv->atl_slots;
+ /* This should not trigger, and could be removed if
+ noone have any problems with it triggering: */
+ if (!slots[slot].qh) {
+ WARN_ON(1);
+ continue;
+ }
+ ptd_offset = ATL_PTD_OFFSET;
+ ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
+ state = check_atl_transfer(hcd, &ptd,
+ slots[slot].qtd->urb);
+ }
- done_map = isp1760_readl(usb_hcd->regs +
- HC_INT_PTD_DONEMAP_REG);
- skip_map = isp1760_readl(usb_hcd->regs +
- HC_INT_PTD_SKIPMAP_REG);
-
- or_map = isp1760_readl(usb_hcd->regs + HC_INT_IRQ_MASK_OR_REG);
- or_map &= ~done_map;
- isp1760_writel(or_map, usb_hcd->regs + HC_INT_IRQ_MASK_OR_REG);
-
- int_regs_base = INT_REGS_OFFSET;
-
- while (done_map) {
- u32 dw1;
- u32 dw3;
+ qtd = slots[slot].qtd;
+ slots[slot].qtd = NULL;
+ qh = slots[slot].qh;
+ slots[slot].qh = NULL;
+ qh->slot = -1;
+
+ WARN_ON(qtd->status != QTD_XFER_STARTED);
+
+ switch (state) {
+ case PTD_STATE_QTD_DONE:
+ if ((usb_pipeint(qtd->urb->pipe)) &&
+ (qtd->urb->dev->speed != USB_SPEED_HIGH))
+ qtd->actual_length =
+ FROM_DW3_SCS_NRBYTESTRANSFERRED(ptd.dw3);
+ else
+ qtd->actual_length =
+ FROM_DW3_NRBYTESTRANSFERRED(ptd.dw3);
+
+ qtd->status = QTD_XFER_COMPLETE;
+ if (list_is_last(&qtd->qtd_list, &qh->qtd_list) ||
+ is_short_bulk(qtd))
+ qtd = NULL;
+ else
+ qtd = list_entry(qtd->qtd_list.next,
+ typeof(*qtd), qtd_list);
+
+ qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3);
+ qh->ping = FROM_DW3_PING(ptd.dw3);
+ break;
- queue_entry = __ffs(done_map);
- done_map &= ~(1 << queue_entry);
- skip_map |= 1 << queue_entry;
+ case PTD_STATE_QTD_RELOAD: /* QTD_RETRY, for atls only */
+ qtd->status = QTD_PAYLOAD_ALLOC;
+ ptd.dw0 |= DW0_VALID_BIT;
+ /* RL counter = ERR counter */
+ ptd.dw3 &= ~TO_DW3_NAKCOUNT(0xf);
+ ptd.dw3 |= TO_DW3_NAKCOUNT(FROM_DW2_RL(ptd.dw2));
+ ptd.dw3 &= ~TO_DW3_CERR(3);
+ ptd.dw3 |= TO_DW3_CERR(ERR_COUNTER);
+ qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3);
+ qh->ping = FROM_DW3_PING(ptd.dw3);
+ break;
- int_regs = int_regs_base + queue_entry * sizeof(struct ptd);
- urb = priv->int_ints[queue_entry].urb;
- qtd = priv->int_ints[queue_entry].qtd;
- qh = priv->int_ints[queue_entry].qh;
- payload = priv->int_ints[queue_entry].payload;
+ case PTD_STATE_URB_RETIRE:
+ qtd->status = QTD_RETIRE;
+ if ((qtd->urb->dev->speed != USB_SPEED_HIGH) &&
+ (qtd->urb->status != -EPIPE) &&
+ (qtd->urb->status != -EREMOTEIO)) {
+ qh->tt_buffer_dirty = 1;
+ if (usb_hub_clear_tt_buffer(qtd->urb))
+ /* Clear failed; let's hope things work
+ anyway */
+ qh->tt_buffer_dirty = 0;
+ }
+ qtd = NULL;
+ qh->toggle = 0;
+ qh->ping = 0;
+ break;
- if (!qh) {
- printk(KERN_ERR "(INT) qh is 0\n");
+ default:
+ WARN_ON(1);
continue;
}
- isp1760_writel(int_regs + ISP_BANK(0), usb_hcd->regs +
- HC_MEMORY_REG);
- isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
- HC_MEMORY_REG);
- /*
- * write bank1 address twice to ensure the 90ns delay (time
- * between BANK0 write and the priv_read_copy() call is at
- * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns)
- */
- isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
- HC_MEMORY_REG);
-
- priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs +
- ISP_BANK(0), sizeof(ptd));
- dw1 = le32_to_cpu(ptd.dw1);
- dw3 = le32_to_cpu(ptd.dw3);
- check_int_err_status(le32_to_cpu(ptd.dw4));
-
- error = check_error(&ptd);
- if (error) {
-#if 0
- printk(KERN_ERR "Error in %s().\n", __func__);
- printk(KERN_ERR "IN dw0: %08x dw1: %08x dw2: %08x "
- "dw3: %08x dw4: %08x dw5: %08x dw6: "
- "%08x dw7: %08x\n",
- ptd.dw0, ptd.dw1, ptd.dw2, ptd.dw3,
- ptd.dw4, ptd.dw5, ptd.dw6, ptd.dw7);
-#endif
- urb->status = -EPIPE;
- priv->int_ints[queue_entry].qh->toggle = 0;
- priv->int_ints[queue_entry].qh->ping = 0;
+ if (qtd && (qtd->status == QTD_PAYLOAD_ALLOC)) {
+ if (slots == priv->int_slots) {
+ if (state == PTD_STATE_QTD_RELOAD)
+ dev_err(hcd->self.controller,
+ "%s: PTD_STATE_QTD_RELOAD on "
+ "interrupt packet\n", __func__);
+ if (state != PTD_STATE_QTD_RELOAD)
+ create_ptd_int(qh, qtd, &ptd);
+ } else {
+ if (state != PTD_STATE_QTD_RELOAD)
+ create_ptd_atl(qh, qtd, &ptd);
+ }
- } else {
- priv->int_ints[queue_entry].qh->toggle =
- dw3 & (1 << 25);
- priv->int_ints[queue_entry].qh->ping = dw3 & (1 << 26);
+ start_bus_transfer(hcd, ptd_offset, slot, slots, qtd,
+ qh, &ptd);
}
+ }
- if (urb->dev->speed != USB_SPEED_HIGH)
- length = PTD_XFERRED_LENGTH_LO(dw3);
- else
- length = PTD_XFERRED_LENGTH(dw3);
-
- if (length) {
- switch (DW1_GET_PID(dw1)) {
- case IN_PID:
- priv_read_copy(priv,
- priv->int_ints[queue_entry].data_buffer,
- usb_hcd->regs + payload + ISP_BANK(1),
- length);
- case OUT_PID:
-
- urb->actual_length += length;
+ if (modified)
+ schedule_ptds(hcd);
+}
- case SETUP_PID:
- break;
- }
- }
+static irqreturn_t isp1760_irq(struct usb_hcd *hcd)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ u32 imask;
+ irqreturn_t irqret = IRQ_NONE;
- priv->int_ints[queue_entry].data_buffer = NULL;
- priv->int_ints[queue_entry].urb = NULL;
- priv->int_ints[queue_entry].qtd = NULL;
- priv->int_ints[queue_entry].qh = NULL;
+ spin_lock(&priv->lock);
- isp1760_writel(skip_map, usb_hcd->regs +
- HC_INT_PTD_SKIPMAP_REG);
- free_mem(priv, payload);
+ if (!(hcd->state & HC_STATE_RUNNING))
+ goto leave;
- if (urb->status == -EPIPE) {
- /* HALT received */
+ imask = reg_read32(hcd->regs, HC_INTERRUPT_REG);
+ if (unlikely(!imask))
+ goto leave;
+ reg_write32(hcd->regs, HC_INTERRUPT_REG, imask); /* Clear */
- qtd = clean_up_qtdlist(qtd);
- isp1760_urb_done(priv, urb, urb->status);
+ priv->int_done_map |= reg_read32(hcd->regs, HC_INT_PTD_DONEMAP_REG);
+ priv->atl_done_map |= reg_read32(hcd->regs, HC_ATL_PTD_DONEMAP_REG);
- } else if (qtd->status & URB_COMPLETE_NOTIFY) {
+ handle_done_ptds(hcd);
- if (urb->status == -EINPROGRESS)
- urb->status = 0;
+ irqret = IRQ_HANDLED;
+leave:
+ spin_unlock(&priv->lock);
- qtd = clean_this_qtd(qtd);
- isp1760_urb_done(priv, urb, urb->status);
+ return irqret;
+}
- } else {
- /* next QTD of this URB */
+/*
+ * Workaround for problem described in chip errata 2:
+ *
+ * Sometimes interrupts are not generated when ATL (not INT?) completion occurs.
+ * One solution suggested in the errata is to use SOF interrupts _instead_of_
+ * ATL done interrupts (the "instead of" might be important since it seems
+ * enabling ATL interrupts also causes the chip to sometimes - rarely - "forget"
+ * to set the PTD's done bit in addition to not generating an interrupt!).
+ *
+ * So if we use SOF + ATL interrupts, we sometimes get stale PTDs since their
+ * done bit is not being set. This is bad - it blocks the endpoint until reboot.
+ *
+ * If we use SOF interrupts only, we get latency between ptd completion and the
+ * actual handling. This is very noticeable in testusb runs which takes several
+ * minutes longer without ATL interrupts.
+ *
+ * A better solution is to run the code below every SLOT_CHECK_PERIOD ms. If it
+ * finds active ATL slots which are older than SLOT_TIMEOUT ms, it checks the
+ * slot's ACTIVE and VALID bits. If these are not set, the ptd is considered
+ * completed and its done map bit is set.
+ *
+ * The values of SLOT_TIMEOUT and SLOT_CHECK_PERIOD have been arbitrarily chosen
+ * not to cause too much lag when this HW bug occurs, while still hopefully
+ * ensuring that the check does not falsely trigger.
+ */
+#define SLOT_TIMEOUT 300
+#define SLOT_CHECK_PERIOD 200
+static struct timer_list errata2_timer;
- qtd = clean_this_qtd(qtd);
- BUG_ON(!qtd);
+static void errata2_function(unsigned long data)
+{
+ struct usb_hcd *hcd = (struct usb_hcd *) data;
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ int slot;
+ struct ptd ptd;
+ unsigned long spinflags;
+
+ spin_lock_irqsave(&priv->lock, spinflags);
+
+ for (slot = 0; slot < 32; slot++)
+ if (priv->atl_slots[slot].qh && time_after(jiffies,
+ priv->atl_slots[slot].timestamp +
+ SLOT_TIMEOUT * HZ / 1000)) {
+ ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
+ if (!FROM_DW0_VALID(ptd.dw0) &&
+ !FROM_DW3_ACTIVE(ptd.dw3))
+ priv->atl_done_map |= 1 << slot;
}
- if (qtd)
- enqueue_an_INT_packet(usb_hcd, qh, qtd);
+ if (priv->atl_done_map)
+ handle_done_ptds(hcd);
- skip_map = isp1760_readl(usb_hcd->regs +
- HC_INT_PTD_SKIPMAP_REG);
- }
+ spin_unlock_irqrestore(&priv->lock, spinflags);
+
+ errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000;
+ add_timer(&errata2_timer);
}
-#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
-static struct isp1760_qh *qh_make(struct isp1760_hcd *priv, struct urb *urb,
- gfp_t flags)
+static int isp1760_run(struct usb_hcd *hcd)
{
- struct isp1760_qh *qh;
- int is_input, type;
-
- qh = isp1760_qh_alloc(priv, flags);
- if (!qh)
- return qh;
-
- /*
- * init endpoint/device data for this QH
- */
- is_input = usb_pipein(urb->pipe);
- type = usb_pipetype(urb->pipe);
+ int retval;
+ u32 temp;
+ u32 command;
+ u32 chipid;
- if (type == PIPE_INTERRUPT) {
+ hcd->uses_new_polling = 1;
- if (urb->dev->speed == USB_SPEED_HIGH) {
+ hcd->state = HC_STATE_RUNNING;
- qh->period = urb->interval >> 3;
- if (qh->period == 0 && urb->interval != 1) {
- /* NOTE interval 2 or 4 uframes could work.
- * But interval 1 scheduling is simpler, and
- * includes high bandwidth.
- */
- printk(KERN_ERR "intr period %d uframes, NYET!",
- urb->interval);
- qh_destroy(qh);
- return NULL;
- }
- } else {
- qh->period = urb->interval;
- }
- }
+ /* Set PTD interrupt AND & OR maps */
+ reg_write32(hcd->regs, HC_ATL_IRQ_MASK_AND_REG, 0);
+ reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, 0xffffffff);
+ reg_write32(hcd->regs, HC_INT_IRQ_MASK_AND_REG, 0);
+ reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, 0xffffffff);
+ reg_write32(hcd->regs, HC_ISO_IRQ_MASK_AND_REG, 0);
+ reg_write32(hcd->regs, HC_ISO_IRQ_MASK_OR_REG, 0xffffffff);
+ /* step 23 passed */
- /* support for tt scheduling, and access to toggles */
- qh->dev = urb->dev;
+ temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp | HW_GLOBAL_INTR_EN);
- if (!usb_pipecontrol(urb->pipe))
- usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input,
- 1);
- return qh;
-}
+ command = reg_read32(hcd->regs, HC_USBCMD);
+ command &= ~(CMD_LRESET|CMD_RESET);
+ command |= CMD_RUN;
+ reg_write32(hcd->regs, HC_USBCMD, command);
-/*
- * For control/bulk/interrupt, return QH with these TDs appended.
- * Allocates and initializes the QH if necessary.
- * Returns null if it can't allocate a QH it needs to.
- * If the QH has TDs (urbs) already, that's great.
- */
-static struct isp1760_qh *qh_append_tds(struct isp1760_hcd *priv,
- struct urb *urb, struct list_head *qtd_list, int epnum,
- void **ptr)
-{
- struct isp1760_qh *qh;
- struct isp1760_qtd *qtd;
- struct isp1760_qtd *prev_qtd;
+ retval = handshake(hcd, HC_USBCMD, CMD_RUN, CMD_RUN, 250 * 1000);
+ if (retval)
+ return retval;
- qh = (struct isp1760_qh *)*ptr;
- if (!qh) {
- /* can't sleep here, we have priv->lock... */
- qh = qh_make(priv, urb, GFP_ATOMIC);
- if (!qh)
- return qh;
- *ptr = qh;
- }
+ /*
+ * XXX
+ * Spec says to write FLAG_CF as last config action, priv code grabs
+ * the semaphore while doing so.
+ */
+ down_write(&ehci_cf_port_reset_rwsem);
+ reg_write32(hcd->regs, HC_CONFIGFLAG, FLAG_CF);
- qtd = list_entry(qtd_list->next, struct isp1760_qtd,
- qtd_list);
- if (!list_empty(&qh->qtd_list))
- prev_qtd = list_entry(qh->qtd_list.prev,
- struct isp1760_qtd, qtd_list);
- else
- prev_qtd = NULL;
+ retval = handshake(hcd, HC_CONFIGFLAG, FLAG_CF, FLAG_CF, 250 * 1000);
+ up_write(&ehci_cf_port_reset_rwsem);
+ if (retval)
+ return retval;
- list_splice(qtd_list, qh->qtd_list.prev);
- if (prev_qtd) {
- BUG_ON(prev_qtd->hw_next);
- prev_qtd->hw_next = qtd;
- }
+ init_timer(&errata2_timer);
+ errata2_timer.function = errata2_function;
+ errata2_timer.data = (unsigned long) hcd;
+ errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000;
+ add_timer(&errata2_timer);
- urb->hcpriv = qh;
- return qh;
-}
+ chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG);
+ dev_info(hcd->self.controller, "USB ISP %04x HW rev. %d started\n",
+ chipid & 0xffff, chipid >> 16);
-static void qtd_list_free(struct isp1760_hcd *priv, struct urb *urb,
- struct list_head *qtd_list)
-{
- struct list_head *entry, *temp;
+ /* PTD Register Init Part 2, Step 28 */
- list_for_each_safe(entry, temp, qtd_list) {
- struct isp1760_qtd *qtd;
+ /* Setup registers controlling PTD checking */
+ reg_write32(hcd->regs, HC_ATL_PTD_LASTPTD_REG, 0x80000000);
+ reg_write32(hcd->regs, HC_INT_PTD_LASTPTD_REG, 0x80000000);
+ reg_write32(hcd->regs, HC_ISO_PTD_LASTPTD_REG, 0x00000001);
+ reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, 0xffffffff);
+ reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, 0xffffffff);
+ reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, 0xffffffff);
+ reg_write32(hcd->regs, HC_BUFFER_STATUS_REG,
+ ATL_BUF_FILL | INT_BUF_FILL);
- qtd = list_entry(entry, struct isp1760_qtd, qtd_list);
- list_del(&qtd->qtd_list);
- isp1760_qtd_free(qtd);
- }
+ /* GRR this is run-once init(), being done every time the HC starts.
+ * So long as they're part of class devices, we can't do it init()
+ * since the class device isn't created that early.
+ */
+ return 0;
}
-static int isp1760_prepare_enqueue(struct isp1760_hcd *priv, struct urb *urb,
- struct list_head *qtd_list, gfp_t mem_flags, packet_enqueue *p)
+static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len)
{
- struct isp1760_qtd *qtd;
- int epnum;
- unsigned long flags;
- struct isp1760_qh *qh = NULL;
- int rc;
- int qh_busy;
-
- qtd = list_entry(qtd_list->next, struct isp1760_qtd, qtd_list);
- epnum = urb->ep->desc.bEndpointAddress;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (!HCD_HW_ACCESSIBLE(priv_to_hcd(priv))) {
- rc = -ESHUTDOWN;
- goto done;
- }
- rc = usb_hcd_link_urb_to_ep(priv_to_hcd(priv), urb);
- if (rc)
- goto done;
-
- qh = urb->ep->hcpriv;
- if (qh)
- qh_busy = !list_empty(&qh->qtd_list);
- else
- qh_busy = 0;
-
- qh = qh_append_tds(priv, urb, qtd_list, epnum, &urb->ep->hcpriv);
- if (!qh) {
- usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb);
- rc = -ENOMEM;
- goto done;
- }
+ qtd->data_buffer = databuffer;
- if (!qh_busy)
- p(priv_to_hcd(priv), qh, qtd);
+ if (len > MAX_PAYLOAD_SIZE)
+ len = MAX_PAYLOAD_SIZE;
+ qtd->length = len;
-done:
- spin_unlock_irqrestore(&priv->lock, flags);
- if (!qh)
- qtd_list_free(priv, urb, qtd_list);
- return rc;
+ return qtd->length;
}
-static struct isp1760_qtd *isp1760_qtd_alloc(struct isp1760_hcd *priv,
- gfp_t flags)
+static void qtd_list_free(struct list_head *qtd_list)
{
- struct isp1760_qtd *qtd;
-
- qtd = kmem_cache_zalloc(qtd_cachep, flags);
- if (qtd)
- INIT_LIST_HEAD(&qtd->qtd_list);
+ struct isp1760_qtd *qtd, *qtd_next;
- return qtd;
+ list_for_each_entry_safe(qtd, qtd_next, qtd_list, qtd_list) {
+ list_del(&qtd->qtd_list);
+ qtd_free(qtd);
+ }
}
/*
- * create a list of filled qtds for this URB; won't link into qh.
+ * Packetize urb->transfer_buffer into list of packets of size wMaxPacketSize.
+ * Also calculate the PID type (SETUP/IN/OUT) for each packet.
*/
-static struct list_head *qh_urb_transaction(struct isp1760_hcd *priv,
+#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
+static void packetize_urb(struct usb_hcd *hcd,
struct urb *urb, struct list_head *head, gfp_t flags)
{
- struct isp1760_qtd *qtd, *qtd_prev;
+ struct isp1760_qtd *qtd;
void *buf;
- int len, maxpacket;
- int is_input;
- u32 token;
+ int len, maxpacketsize;
+ u8 packet_type;
/*
* URBs map to sequences of QTDs: one logical transaction
*/
- qtd = isp1760_qtd_alloc(priv, flags);
- if (!qtd)
- return NULL;
- list_add_tail(&qtd->qtd_list, head);
- qtd->urb = urb;
- urb->status = -EINPROGRESS;
+ if (!urb->transfer_buffer && urb->transfer_buffer_length) {
+ /* XXX This looks like usb storage / SCSI bug */
+ dev_err(hcd->self.controller,
+ "buf is null, dma is %08lx len is %d\n",
+ (long unsigned)urb->transfer_dma,
+ urb->transfer_buffer_length);
+ WARN_ON(1);
+ }
- token = 0;
- /* for split transactions, SplitXState initialized to zero */
+ if (usb_pipein(urb->pipe))
+ packet_type = IN_PID;
+ else
+ packet_type = OUT_PID;
- len = urb->transfer_buffer_length;
- is_input = usb_pipein(urb->pipe);
if (usb_pipecontrol(urb->pipe)) {
- /* SETUP pid */
- qtd_fill(qtd, urb->setup_packet,
- sizeof(struct usb_ctrlrequest),
- token | SETUP_PID);
-
- /* ... and always at least one more pid */
- token ^= DATA_TOGGLE;
- qtd_prev = qtd;
- qtd = isp1760_qtd_alloc(priv, flags);
+ qtd = qtd_alloc(flags, urb, SETUP_PID);
if (!qtd)
goto cleanup;
- qtd->urb = urb;
- qtd_prev->hw_next = qtd;
+ qtd_fill(qtd, urb->setup_packet, sizeof(struct usb_ctrlrequest));
list_add_tail(&qtd->qtd_list, head);
/* for zero length DATA stages, STATUS is always IN */
- if (len == 0)
- token |= IN_PID;
+ if (urb->transfer_buffer_length == 0)
+ packet_type = IN_PID;
}
- /*
- * data transfer stage: buffer setup
- */
- buf = urb->transfer_buffer;
-
- if (is_input)
- token |= IN_PID;
- else
- token |= OUT_PID;
-
- maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
+ maxpacketsize = max_packet(usb_maxpacket(urb->dev, urb->pipe,
+ usb_pipeout(urb->pipe)));
/*
* buffer gets wrapped in one or more qtds;
* last one may be "short" (including zero len)
* and may serve as a control status ack
*/
+ buf = urb->transfer_buffer;
+ len = urb->transfer_buffer_length;
+
for (;;) {
int this_qtd_len;
- if (!buf && len) {
- /* XXX This looks like usb storage / SCSI bug */
- printk(KERN_ERR "buf is null, dma is %08lx len is %d\n",
- (long unsigned)urb->transfer_dma, len);
- WARN_ON(1);
- }
+ qtd = qtd_alloc(flags, urb, packet_type);
+ if (!qtd)
+ goto cleanup;
+ this_qtd_len = qtd_fill(qtd, buf, len);
+ list_add_tail(&qtd->qtd_list, head);
- this_qtd_len = qtd_fill(qtd, buf, len, token);
len -= this_qtd_len;
buf += this_qtd_len;
- /* qh makes control packets use qtd toggle; maybe switch it */
- if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
- token ^= DATA_TOGGLE;
-
if (len <= 0)
break;
-
- qtd_prev = qtd;
- qtd = isp1760_qtd_alloc(priv, flags);
- if (!qtd)
- goto cleanup;
- qtd->urb = urb;
- qtd_prev->hw_next = qtd;
- list_add_tail(&qtd->qtd_list, head);
}
/*
@@ -1599,187 +1488,247 @@ static struct list_head *qh_urb_transaction(struct isp1760_hcd *priv,
if (usb_pipecontrol(urb->pipe)) {
one_more = 1;
- /* "in" <--> "out" */
- token ^= IN_PID;
- /* force DATA1 */
- token |= DATA_TOGGLE;
+ if (packet_type == IN_PID)
+ packet_type = OUT_PID;
+ else
+ packet_type = IN_PID;
} else if (usb_pipebulk(urb->pipe)
&& (urb->transfer_flags & URB_ZERO_PACKET)
- && !(urb->transfer_buffer_length % maxpacket)) {
+ && !(urb->transfer_buffer_length %
+ maxpacketsize)) {
one_more = 1;
}
if (one_more) {
- qtd_prev = qtd;
- qtd = isp1760_qtd_alloc(priv, flags);
+ qtd = qtd_alloc(flags, urb, packet_type);
if (!qtd)
goto cleanup;
- qtd->urb = urb;
- qtd_prev->hw_next = qtd;
- list_add_tail(&qtd->qtd_list, head);
/* never any data in such packets */
- qtd_fill(qtd, NULL, 0, token);
+ qtd_fill(qtd, NULL, 0);
+ list_add_tail(&qtd->qtd_list, head);
}
}
- qtd->status = URB_COMPLETE_NOTIFY;
- return head;
+ return;
cleanup:
- qtd_list_free(priv, urb, head);
- return NULL;
+ qtd_list_free(head);
}
static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
- struct list_head qtd_list;
- packet_enqueue *pe;
-
- INIT_LIST_HEAD(&qtd_list);
+ struct list_head *ep_queue;
+ struct isp1760_qh *qh, *qhit;
+ unsigned long spinflags;
+ LIST_HEAD(new_qtds);
+ int retval;
+ int qh_in_queue;
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
+ ep_queue = &priv->qh_list[QH_CONTROL];
+ break;
case PIPE_BULK:
-
- if (!qh_urb_transaction(priv, urb, &qtd_list, mem_flags))
- return -ENOMEM;
- pe = enqueue_an_ATL_packet;
+ ep_queue = &priv->qh_list[QH_BULK];
break;
-
case PIPE_INTERRUPT:
- if (!qh_urb_transaction(priv, urb, &qtd_list, mem_flags))
- return -ENOMEM;
- pe = enqueue_an_INT_packet;
+ if (urb->interval < 0)
+ return -EINVAL;
+ /* FIXME: Check bandwidth */
+ ep_queue = &priv->qh_list[QH_INTERRUPT];
break;
-
case PIPE_ISOCHRONOUS:
- printk(KERN_ERR "PIPE_ISOCHRONOUS ain't supported\n");
+ dev_err(hcd->self.controller, "%s: isochronous USB packets "
+ "not yet supported\n",
+ __func__);
+ return -EPIPE;
default:
+ dev_err(hcd->self.controller, "%s: unknown pipe type\n",
+ __func__);
return -EPIPE;
}
- return isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe);
+ if (usb_pipein(urb->pipe))
+ urb->actual_length = 0;
+
+ packetize_urb(hcd, urb, &new_qtds, mem_flags);
+ if (list_empty(&new_qtds))
+ return -ENOMEM;
+
+ retval = 0;
+ spin_lock_irqsave(&priv->lock, spinflags);
+
+ if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ retval = -ESHUTDOWN;
+ qtd_list_free(&new_qtds);
+ goto out;
+ }
+ retval = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (retval) {
+ qtd_list_free(&new_qtds);
+ goto out;
+ }
+
+ qh = urb->ep->hcpriv;
+ if (qh) {
+ qh_in_queue = 0;
+ list_for_each_entry(qhit, ep_queue, qh_list) {
+ if (qhit == qh) {
+ qh_in_queue = 1;
+ break;
+ }
+ }
+ if (!qh_in_queue)
+ list_add_tail(&qh->qh_list, ep_queue);
+ } else {
+ qh = qh_alloc(GFP_ATOMIC);
+ if (!qh) {
+ retval = -ENOMEM;
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ qtd_list_free(&new_qtds);
+ goto out;
+ }
+ list_add_tail(&qh->qh_list, ep_queue);
+ urb->ep->hcpriv = qh;
+ }
+
+ list_splice_tail(&new_qtds, &qh->qtd_list);
+ schedule_ptds(hcd);
+
+out:
+ spin_unlock_irqrestore(&priv->lock, spinflags);
+ return retval;
}
-static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
- int status)
+static void kill_transfer(struct usb_hcd *hcd, struct urb *urb,
+ struct isp1760_qh *qh)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
- struct inter_packet_info *ints;
- u32 i;
- u32 reg_base, or_reg, skip_reg;
- unsigned long flags;
- struct ptd ptd;
- packet_enqueue *pe;
+ int skip_map;
+
+ WARN_ON(qh->slot == -1);
+
+ /* We need to forcefully reclaim the slot since some transfers never
+ return, e.g. interrupt transfers and NAKed bulk transfers. */
+ if (usb_pipecontrol(urb->pipe) || usb_pipebulk(urb->pipe)) {
+ skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
+ skip_map |= (1 << qh->slot);
+ reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
+ priv->atl_slots[qh->slot].qh = NULL;
+ priv->atl_slots[qh->slot].qtd = NULL;
+ } else {
+ skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
+ skip_map |= (1 << qh->slot);
+ reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
+ priv->int_slots[qh->slot].qh = NULL;
+ priv->int_slots[qh->slot].qtd = NULL;
+ }
- switch (usb_pipetype(urb->pipe)) {
- case PIPE_ISOCHRONOUS:
- return -EPIPE;
- break;
+ qh->slot = -1;
+}
- case PIPE_INTERRUPT:
- ints = priv->int_ints;
- reg_base = INT_REGS_OFFSET;
- or_reg = HC_INT_IRQ_MASK_OR_REG;
- skip_reg = HC_INT_PTD_SKIPMAP_REG;
- pe = enqueue_an_INT_packet;
- break;
+/*
+ * Retire the qtds beginning at 'qtd' and belonging all to the same urb, killing
+ * any active transfer belonging to the urb in the process.
+ */
+static void dequeue_urb_from_qtd(struct usb_hcd *hcd, struct isp1760_qh *qh,
+ struct isp1760_qtd *qtd)
+{
+ struct urb *urb;
+ int urb_was_running;
- default:
- ints = priv->atl_ints;
- reg_base = ATL_REGS_OFFSET;
- or_reg = HC_ATL_IRQ_MASK_OR_REG;
- skip_reg = HC_ATL_PTD_SKIPMAP_REG;
- pe = enqueue_an_ATL_packet;
- break;
+ urb = qtd->urb;
+ urb_was_running = 0;
+ list_for_each_entry_from(qtd, &qh->qtd_list, qtd_list) {
+ if (qtd->urb != urb)
+ break;
+
+ if (qtd->status >= QTD_XFER_STARTED)
+ urb_was_running = 1;
+ if (last_qtd_of_urb(qtd, qh) &&
+ (qtd->status >= QTD_XFER_COMPLETE))
+ urb_was_running = 0;
+
+ if (qtd->status == QTD_XFER_STARTED)
+ kill_transfer(hcd, urb, qh);
+ qtd->status = QTD_RETIRE;
}
- memset(&ptd, 0, sizeof(ptd));
- spin_lock_irqsave(&priv->lock, flags);
+ if ((urb->dev->speed != USB_SPEED_HIGH) && urb_was_running) {
+ qh->tt_buffer_dirty = 1;
+ if (usb_hub_clear_tt_buffer(urb))
+ /* Clear failed; let's hope things work anyway */
+ qh->tt_buffer_dirty = 0;
+ }
+}
- for (i = 0; i < 32; i++) {
- if (ints->urb == urb) {
- u32 skip_map;
- u32 or_map;
- struct isp1760_qtd *qtd;
- struct isp1760_qh *qh = ints->qh;
-
- skip_map = isp1760_readl(hcd->regs + skip_reg);
- skip_map |= 1 << i;
- isp1760_writel(skip_map, hcd->regs + skip_reg);
-
- or_map = isp1760_readl(hcd->regs + or_reg);
- or_map &= ~(1 << i);
- isp1760_writel(or_map, hcd->regs + or_reg);
-
- priv_write_copy(priv, (u32 *)&ptd, hcd->regs + reg_base
- + i * sizeof(ptd), sizeof(ptd));
- qtd = ints->qtd;
- qtd = clean_up_qtdlist(qtd);
-
- free_mem(priv, ints->payload);
-
- ints->urb = NULL;
- ints->qh = NULL;
- ints->qtd = NULL;
- ints->data_buffer = NULL;
- ints->payload = 0;
-
- isp1760_urb_done(priv, urb, status);
- if (qtd)
- pe(hcd, qh, qtd);
- break;
+static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
+ int status)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ unsigned long spinflags;
+ struct isp1760_qh *qh;
+ struct isp1760_qtd *qtd;
+ int retval = 0;
- } else if (ints->qtd) {
- struct isp1760_qtd *qtd, *prev_qtd = ints->qtd;
+ spin_lock_irqsave(&priv->lock, spinflags);
+ retval = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (retval)
+ goto out;
- for (qtd = ints->qtd->hw_next; qtd; qtd = qtd->hw_next) {
- if (qtd->urb == urb) {
- prev_qtd->hw_next = clean_up_qtdlist(qtd);
- isp1760_urb_done(priv, urb, status);
- break;
- }
- prev_qtd = qtd;
- }
- /* we found the urb before the end of the list */
- if (qtd)
- break;
- }
- ints++;
+ qh = urb->ep->hcpriv;
+ if (!qh) {
+ retval = -EINVAL;
+ goto out;
}
- spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
+ list_for_each_entry(qtd, &qh->qtd_list, qtd_list)
+ if (qtd->urb == urb) {
+ dequeue_urb_from_qtd(hcd, qh, qtd);
+ list_move(&qtd->qtd_list, &qh->qtd_list);
+ break;
+ }
+
+ urb->status = status;
+ schedule_ptds(hcd);
+
+out:
+ spin_unlock_irqrestore(&priv->lock, spinflags);
+ return retval;
}
-static irqreturn_t isp1760_irq(struct usb_hcd *usb_hcd)
+static void isp1760_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
{
- struct isp1760_hcd *priv = hcd_to_priv(usb_hcd);
- u32 imask;
- irqreturn_t irqret = IRQ_NONE;
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ unsigned long spinflags;
+ struct isp1760_qh *qh, *qh_iter;
+ int i;
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, spinflags);
- if (!(usb_hcd->state & HC_STATE_RUNNING))
- goto leave;
+ qh = ep->hcpriv;
+ if (!qh)
+ goto out;
- imask = isp1760_readl(usb_hcd->regs + HC_INTERRUPT_REG);
- if (unlikely(!imask))
- goto leave;
+ WARN_ON(!list_empty(&qh->qtd_list));
- isp1760_writel(imask, usb_hcd->regs + HC_INTERRUPT_REG);
- if (imask & HC_ATL_INT)
- do_atl_int(usb_hcd);
+ for (i = 0; i < QH_END; i++)
+ list_for_each_entry(qh_iter, &priv->qh_list[i], qh_list)
+ if (qh_iter == qh) {
+ list_del(&qh_iter->qh_list);
+ i = QH_END;
+ break;
+ }
+ qh_free(qh);
+ ep->hcpriv = NULL;
- if (imask & HC_INTL_INT)
- do_intl_int(usb_hcd);
+ schedule_ptds(hcd);
- irqret = IRQ_HANDLED;
-leave:
- spin_unlock(&priv->lock);
- return irqret;
+out:
+ spin_unlock_irqrestore(&priv->lock, spinflags);
}
static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
@@ -1790,7 +1739,7 @@ static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
int retval = 1;
unsigned long flags;
- /* if !USB_SUSPEND, root hub timers won't get shut down ... */
+ /* if !PM_RUNTIME, root hub timers won't get shut down ... */
if (!HC_IS_RUNNING(hcd->state))
return 0;
@@ -1799,12 +1748,12 @@ static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
mask = PORT_CSC;
spin_lock_irqsave(&priv->lock, flags);
- temp = isp1760_readl(hcd->regs + HC_PORTSC1);
+ temp = reg_read32(hcd->regs, HC_PORTSC1);
if (temp & PORT_OWNER) {
if (temp & PORT_CSC) {
temp &= ~PORT_CSC;
- isp1760_writel(temp, hcd->regs + HC_PORTSC1);
+ reg_write32(hcd->regs, HC_PORTSC1, temp);
goto done;
}
}
@@ -1844,9 +1793,9 @@ static void isp1760_hub_descriptor(struct isp1760_hcd *priv,
temp = 1 + (ports / 8);
desc->bDescLength = 7 + 2 * temp;
- /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
- memset(&desc->bitmap[0], 0, temp);
- memset(&desc->bitmap[temp], 0xff, temp);
+ /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+ memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+ memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
/* per-port overcurrent reporting */
temp = 0x0008;
@@ -1861,8 +1810,8 @@ static void isp1760_hub_descriptor(struct isp1760_hcd *priv,
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
-static int check_reset_complete(struct isp1760_hcd *priv, int index,
- u32 __iomem *status_reg, int port_status)
+static int check_reset_complete(struct usb_hcd *hcd, int index,
+ int port_status)
{
if (!(port_status & PORT_CONNECT))
return port_status;
@@ -1870,15 +1819,17 @@ static int check_reset_complete(struct isp1760_hcd *priv, int index,
/* if reset finished and it's still not enabled -- handoff */
if (!(port_status & PORT_PE)) {
- printk(KERN_ERR "port %d full speed --> companion\n",
- index + 1);
+ dev_info(hcd->self.controller,
+ "port %d full speed --> companion\n",
+ index + 1);
port_status |= PORT_OWNER;
port_status &= ~PORT_RWC_BITS;
- isp1760_writel(port_status, status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, port_status);
} else
- printk(KERN_ERR "port %d high speed\n", index + 1);
+ dev_info(hcd->self.controller, "port %d high speed\n",
+ index + 1);
return port_status;
}
@@ -1888,7 +1839,6 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
int ports = HCS_N_PORTS(priv->hcs_params);
- u32 __iomem *status_reg = hcd->regs + HC_PORTSC1;
u32 temp, status;
unsigned long flags;
int retval = 0;
@@ -1917,7 +1867,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
- temp = isp1760_readl(status_reg);
+ temp = reg_read32(hcd->regs, HC_PORTSC1);
/*
* Even if OWNER is set, so the port is owned by the
@@ -1928,7 +1878,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
- isp1760_writel(temp & ~PORT_PE, status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, temp & ~PORT_PE);
break;
case USB_PORT_FEAT_C_ENABLE:
/* XXX error? */
@@ -1942,8 +1892,8 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
goto error;
/* resume signaling for 20 msec */
temp &= ~(PORT_RWC_BITS);
- isp1760_writel(temp | PORT_RESUME,
- status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1,
+ temp | PORT_RESUME);
priv->reset_done = jiffies +
msecs_to_jiffies(20);
}
@@ -1953,11 +1903,11 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC(priv->hcs_params))
- isp1760_writel(temp & ~PORT_POWER, status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1,
+ temp & ~PORT_POWER);
break;
case USB_PORT_FEAT_C_CONNECTION:
- isp1760_writel(temp | PORT_CSC,
- status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_CSC);
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
/* XXX error ?*/
@@ -1968,7 +1918,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
default:
goto error;
}
- isp1760_readl(hcd->regs + HC_USBCMD);
+ reg_read32(hcd->regs, HC_USBCMD);
break;
case GetHubDescriptor:
isp1760_hub_descriptor(priv, (struct usb_hub_descriptor *)
@@ -1983,7 +1933,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
goto error;
wIndex--;
status = 0;
- temp = isp1760_readl(status_reg);
+ temp = reg_read32(hcd->regs, HC_PORTSC1);
/* wPortChange bits */
if (temp & PORT_CSC)
@@ -1992,7 +1942,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
/* whoever resumes must GetPortStatus to complete it!! */
if (temp & PORT_RESUME) {
- printk(KERN_ERR "Port resume should be skipped.\n");
+ dev_err(hcd->self.controller, "Port resume should be skipped.\n");
/* Remote Wakeup received? */
if (!priv->reset_done) {
@@ -2000,8 +1950,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
priv->reset_done = jiffies
+ msecs_to_jiffies(20);
/* check the port again */
- mod_timer(&priv_to_hcd(priv)->rh_timer,
- priv->reset_done);
+ mod_timer(&hcd->rh_timer, priv->reset_done);
}
/* resume completed? */
@@ -2011,14 +1960,13 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
priv->reset_done = 0;
/* stop resume signaling */
- temp = isp1760_readl(status_reg);
- isp1760_writel(
- temp & ~(PORT_RWC_BITS | PORT_RESUME),
- status_reg);
- retval = handshake(priv, status_reg,
+ temp = reg_read32(hcd->regs, HC_PORTSC1);
+ reg_write32(hcd->regs, HC_PORTSC1,
+ temp & ~(PORT_RWC_BITS | PORT_RESUME));
+ retval = handshake(hcd, HC_PORTSC1,
PORT_RESUME, 0, 2000 /* 2msec */);
if (retval != 0) {
- isp1760_err(priv,
+ dev_err(hcd->self.controller,
"port %d resume error %d\n",
wIndex + 1, retval);
goto error;
@@ -2035,22 +1983,21 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
priv->reset_done = 0;
/* force reset to complete */
- isp1760_writel(temp & ~PORT_RESET,
- status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, temp & ~PORT_RESET);
/* REVISIT: some hardware needs 550+ usec to clear
* this bit; seems too long to spin routinely...
*/
- retval = handshake(priv, status_reg,
+ retval = handshake(hcd, HC_PORTSC1,
PORT_RESET, 0, 750);
if (retval != 0) {
- isp1760_err(priv, "port %d reset error %d\n",
+ dev_err(hcd->self.controller, "port %d reset error %d\n",
wIndex + 1, retval);
goto error;
}
/* see what we found out */
- temp = check_reset_complete(priv, wIndex, status_reg,
- isp1760_readl(status_reg));
+ temp = check_reset_complete(hcd, wIndex,
+ reg_read32(hcd->regs, HC_PORTSC1));
}
/*
* Even if OWNER is set, there's no harm letting khubd
@@ -2059,12 +2006,12 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
*/
if (temp & PORT_OWNER)
- printk(KERN_ERR "Warning: PORT_OWNER is set\n");
+ dev_err(hcd->self.controller, "PORT_OWNER is set\n");
if (temp & PORT_CONNECT) {
status |= USB_PORT_STAT_CONNECTION;
/* status may be from integrated TT */
- status |= ehci_port_speed(priv, temp);
+ status |= USB_PORT_STAT_HIGH_SPEED;
}
if (temp & PORT_PE)
status |= USB_PORT_STAT_ENABLE;
@@ -2093,14 +2040,14 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
- temp = isp1760_readl(status_reg);
+ temp = reg_read32(hcd->regs, HC_PORTSC1);
if (temp & PORT_OWNER)
break;
/* temp &= ~PORT_RWC_BITS; */
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
- isp1760_writel(temp | PORT_PE, status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_PE);
break;
case USB_PORT_FEAT_SUSPEND:
@@ -2108,12 +2055,12 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
|| (temp & PORT_RESET) != 0)
goto error;
- isp1760_writel(temp | PORT_SUSPEND, status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_SUSPEND);
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC(priv->hcs_params))
- isp1760_writel(temp | PORT_POWER,
- status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1,
+ temp | PORT_POWER);
break;
case USB_PORT_FEAT_RESET:
if (temp & PORT_RESUME)
@@ -2136,12 +2083,12 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
priv->reset_done = jiffies +
msecs_to_jiffies(50);
}
- isp1760_writel(temp, status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, temp);
break;
default:
goto error;
}
- isp1760_readl(hcd->regs + HC_USBCMD);
+ reg_read32(hcd->regs, HC_USBCMD);
break;
default:
@@ -2153,57 +2100,12 @@ error:
return retval;
}
-static void isp1760_endpoint_disable(struct usb_hcd *usb_hcd,
- struct usb_host_endpoint *ep)
-{
- struct isp1760_hcd *priv = hcd_to_priv(usb_hcd);
- struct isp1760_qh *qh;
- struct isp1760_qtd *qtd;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- qh = ep->hcpriv;
- if (!qh)
- goto out;
-
- ep->hcpriv = NULL;
- do {
- /* more than entry might get removed */
- if (list_empty(&qh->qtd_list))
- break;
-
- qtd = list_first_entry(&qh->qtd_list, struct isp1760_qtd,
- qtd_list);
-
- if (qtd->status & URB_ENQUEUED) {
-
- spin_unlock_irqrestore(&priv->lock, flags);
- isp1760_urb_dequeue(usb_hcd, qtd->urb, -ECONNRESET);
- spin_lock_irqsave(&priv->lock, flags);
- } else {
- struct urb *urb;
-
- urb = qtd->urb;
- clean_up_qtdlist(qtd);
- isp1760_urb_done(priv, urb, -ECONNRESET);
- }
- } while (1);
-
- qh_destroy(qh);
- /* remove requests and leak them.
- * ATL are pretty fast done, INT could take a while...
- * The latter shoule be removed
- */
-out:
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
static int isp1760_get_frame(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 fr;
- fr = isp1760_readl(hcd->regs + HC_FRINDEX);
+ fr = reg_read32(hcd->regs, HC_FRINDEX);
return (fr >> 3) % priv->periodic_size;
}
@@ -2212,18 +2114,20 @@ static void isp1760_stop(struct usb_hcd *hcd)
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 temp;
+ del_timer(&errata2_timer);
+
isp1760_hub_control(hcd, ClearPortFeature, USB_PORT_FEAT_POWER, 1,
NULL, 0);
mdelay(20);
spin_lock_irq(&priv->lock);
- ehci_reset(priv);
+ ehci_reset(hcd);
/* Disable IRQ */
- temp = isp1760_readl(hcd->regs + HC_HW_MODE_CTRL);
- isp1760_writel(temp &= ~HW_GLOBAL_INTR_EN, hcd->regs + HC_HW_MODE_CTRL);
+ temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp &= ~HW_GLOBAL_INTR_EN);
spin_unlock_irq(&priv->lock);
- isp1760_writel(0, hcd->regs + HC_CONFIGFLAG);
+ reg_write32(hcd->regs, HC_CONFIGFLAG, 0);
}
static void isp1760_shutdown(struct usb_hcd *hcd)
@@ -2231,14 +2135,31 @@ static void isp1760_shutdown(struct usb_hcd *hcd)
u32 command, temp;
isp1760_stop(hcd);
- temp = isp1760_readl(hcd->regs + HC_HW_MODE_CTRL);
- isp1760_writel(temp &= ~HW_GLOBAL_INTR_EN, hcd->regs + HC_HW_MODE_CTRL);
+ temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp &= ~HW_GLOBAL_INTR_EN);
- command = isp1760_readl(hcd->regs + HC_USBCMD);
+ command = reg_read32(hcd->regs, HC_USBCMD);
command &= ~CMD_RUN;
- isp1760_writel(command, hcd->regs + HC_USBCMD);
+ reg_write32(hcd->regs, HC_USBCMD, command);
+}
+
+static void isp1760_clear_tt_buffer_complete(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ struct isp1760_qh *qh = ep->hcpriv;
+ unsigned long spinflags;
+
+ if (!qh)
+ return;
+
+ spin_lock_irqsave(&priv->lock, spinflags);
+ qh->tt_buffer_dirty = 0;
+ schedule_ptds(hcd);
+ spin_unlock_irqrestore(&priv->lock, spinflags);
}
+
static const struct hc_driver isp1760_hc_driver = {
.description = "isp1760-hcd",
.product_desc = "NXP ISP1760 USB Host Controller",
@@ -2255,10 +2176,18 @@ static const struct hc_driver isp1760_hc_driver = {
.get_frame_number = isp1760_get_frame,
.hub_status_data = isp1760_hub_status_data,
.hub_control = isp1760_hub_control,
+ .clear_tt_buffer_complete = isp1760_clear_tt_buffer_complete,
};
int __init init_kmem_once(void)
{
+ urb_listitem_cachep = kmem_cache_create("isp1760_urb_listitem",
+ sizeof(struct urb_listitem), 0, SLAB_TEMPORARY |
+ SLAB_MEM_SPREAD, NULL);
+
+ if (!urb_listitem_cachep)
+ return -ENOMEM;
+
qtd_cachep = kmem_cache_create("isp1760_qtd",
sizeof(struct isp1760_qtd), 0, SLAB_TEMPORARY |
SLAB_MEM_SPREAD, NULL);
@@ -2281,10 +2210,12 @@ void deinit_kmem_cache(void)
{
kmem_cache_destroy(qtd_cachep);
kmem_cache_destroy(qh_cachep);
+ kmem_cache_destroy(urb_listitem_cachep);
}
struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,
int irq, unsigned long irqflags,
+ int rst_gpio,
struct device *dev, const char *busname,
unsigned int devflags)
{
@@ -2304,6 +2235,7 @@ struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,
priv = hcd_to_priv(hcd);
priv->devflags = devflags;
+ priv->rst_gpio = rst_gpio;
init_memory(priv);
hcd->regs = ioremap(res_start, res_len);
if (!hcd->regs) {
@@ -2318,6 +2250,7 @@ struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,
ret = usb_add_hcd(hcd, irq, irqflags);
if (ret)
goto err_unmap;
+ device_wakeup_enable(hcd->self.controller);
return hcd;
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h
index 6931ef5c965..33dc79ccaa6 100644
--- a/drivers/usb/host/isp1760-hcd.h
+++ b/drivers/usb/host/isp1760-hcd.h
@@ -4,6 +4,7 @@
/* exports for if */
struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,
int irq, unsigned long irqflags,
+ int rst_gpio,
struct device *dev, const char *busname,
unsigned int devflags);
int init_kmem_once(void);
@@ -49,10 +50,9 @@ void deinit_kmem_cache(void);
#define SW_RESET_RESET_ALL (1 << 0)
#define HC_BUFFER_STATUS_REG 0x334
-#define ATL_BUFFER 0x1
-#define INT_BUFFER 0x2
-#define ISO_BUFFER 0x4
-#define BUFFER_MAP 0x7
+#define ISO_BUF_FILL (1 << 2)
+#define INT_BUF_FILL (1 << 1)
+#define ATL_BUF_FILL (1 << 0)
#define HC_MEMORY_REG 0x33c
#define ISP_BANK(x) ((x) << 16)
@@ -68,13 +68,12 @@ void deinit_kmem_cache(void);
#define HC_INTERRUPT_REG 0x310
#define HC_INTERRUPT_ENABLE 0x314
-#define INTERRUPT_ENABLE_MASK (HC_INTL_INT | HC_ATL_INT | HC_EOT_INT)
-
#define HC_ISO_INT (1 << 9)
#define HC_ATL_INT (1 << 8)
#define HC_INTL_INT (1 << 7)
#define HC_EOT_INT (1 << 3)
#define HC_SOT_INT (1 << 1)
+#define INTERRUPT_ENABLE_MASK (HC_INTL_INT | HC_ATL_INT)
#define HC_ISO_IRQ_MASK_OR_REG 0x318
#define HC_INT_IRQ_MASK_OR_REG 0x31C
@@ -83,54 +82,38 @@ void deinit_kmem_cache(void);
#define HC_INT_IRQ_MASK_AND_REG 0x328
#define HC_ATL_IRQ_MASK_AND_REG 0x32C
-/* Register sets */
-#define HC_BEGIN_OF_ATL 0x0c00
-#define HC_BEGIN_OF_INT 0x0800
-#define HC_BEGIN_OF_ISO 0x0400
-#define HC_BEGIN_OF_PAYLOAD 0x1000
-
/* urb state*/
#define DELETE_URB (0x0008)
#define NO_TRANSFER_ACTIVE (0xffffffff)
-#define ATL_REGS_OFFSET (0xc00)
-#define INT_REGS_OFFSET (0x800)
-
-/* Philips Transfer Descriptor (PTD) */
+/* Philips Proprietary Transfer Descriptor (PTD) */
+typedef __u32 __bitwise __dw;
struct ptd {
- __le32 dw0;
- __le32 dw1;
- __le32 dw2;
- __le32 dw3;
- __le32 dw4;
- __le32 dw5;
- __le32 dw6;
- __le32 dw7;
+ __dw dw0;
+ __dw dw1;
+ __dw dw2;
+ __dw dw3;
+ __dw dw4;
+ __dw dw5;
+ __dw dw6;
+ __dw dw7;
};
+#define PTD_OFFSET 0x0400
+#define ISO_PTD_OFFSET 0x0400
+#define INT_PTD_OFFSET 0x0800
+#define ATL_PTD_OFFSET 0x0c00
+#define PAYLOAD_OFFSET 0x1000
-struct inter_packet_info {
- void *data_buffer;
- u32 payload;
-#define PTD_FIRE_NEXT (1 << 0)
-#define PTD_URB_FINISHED (1 << 1)
- struct urb *urb;
+struct slotinfo {
struct isp1760_qh *qh;
struct isp1760_qtd *qtd;
+ unsigned long timestamp;
};
typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
struct isp1760_qtd *qtd);
-#define isp1760_dbg(priv, fmt, args...) \
- dev_dbg(priv_to_hcd(priv)->self.controller, fmt, ##args)
-
-#define isp1760_info(priv, fmt, args...) \
- dev_info(priv_to_hcd(priv)->self.controller, fmt, ##args)
-
-#define isp1760_err(priv, fmt, args...) \
- dev_err(priv_to_hcd(priv)->self.controller, fmt, ##args)
-
/*
* Device flags that can vary from board to board. All of these
* indicate the most "atypical" case, so that a devflags of 0 is
@@ -144,6 +127,7 @@ typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
#define ISP1760_FLAG_ISP1761 0x00000040 /* Chip is ISP1761 */
#define ISP1760_FLAG_INTR_POL_HIGH 0x00000080 /* Interrupt polarity active high */
#define ISP1760_FLAG_INTR_EDGE_TRIG 0x00000100 /* Interrupt edge triggered */
+#define ISP1760_FLAG_RESET_ACTIVE_HIGH 0x80000000 /* RESET GPIO active high */
/* chip memory management */
struct memory_chunk {
@@ -167,63 +151,58 @@ struct memory_chunk {
#define BLOCK_2_SIZE 1024
#define BLOCK_3_SIZE 8192
#define BLOCKS (BLOCK_1_NUM + BLOCK_2_NUM + BLOCK_3_NUM)
-#define PAYLOAD_SIZE 0xf000
-
-/* I saw if some reloads if the pointer was negative */
-#define ISP1760_NULL_POINTER (0x400)
+#define MAX_PAYLOAD_SIZE BLOCK_3_SIZE
+#define PAYLOAD_AREA_SIZE 0xf000
/* ATL */
/* DW0 */
-#define PTD_VALID 1
-#define PTD_LENGTH(x) (((u32) x) << 3)
-#define PTD_MAXPACKET(x) (((u32) x) << 18)
-#define PTD_MULTI(x) (((u32) x) << 29)
-#define PTD_ENDPOINT(x) (((u32) x) << 31)
+#define DW0_VALID_BIT 1
+#define FROM_DW0_VALID(x) ((x) & 0x01)
+#define TO_DW0_LENGTH(x) (((u32) x) << 3)
+#define TO_DW0_MAXPACKET(x) (((u32) x) << 18)
+#define TO_DW0_MULTI(x) (((u32) x) << 29)
+#define TO_DW0_ENDPOINT(x) (((u32) x) << 31)
/* DW1 */
-#define PTD_DEVICE_ADDR(x) (((u32) x) << 3)
-#define PTD_PID_TOKEN(x) (((u32) x) << 10)
-#define PTD_TRANS_BULK ((u32) 2 << 12)
-#define PTD_TRANS_INT ((u32) 3 << 12)
-#define PTD_TRANS_SPLIT ((u32) 1 << 14)
-#define PTD_SE_USB_LOSPEED ((u32) 2 << 16)
-#define PTD_PORT_NUM(x) (((u32) x) << 18)
-#define PTD_HUB_NUM(x) (((u32) x) << 25)
-#define PTD_PING(x) (((u32) x) << 26)
+#define TO_DW1_DEVICE_ADDR(x) (((u32) x) << 3)
+#define TO_DW1_PID_TOKEN(x) (((u32) x) << 10)
+#define DW1_TRANS_BULK ((u32) 2 << 12)
+#define DW1_TRANS_INT ((u32) 3 << 12)
+#define DW1_TRANS_SPLIT ((u32) 1 << 14)
+#define DW1_SE_USB_LOSPEED ((u32) 2 << 16)
+#define TO_DW1_PORT_NUM(x) (((u32) x) << 18)
+#define TO_DW1_HUB_NUM(x) (((u32) x) << 25)
/* DW2 */
-#define PTD_RL_CNT(x) (((u32) x) << 25)
-#define PTD_DATA_START_ADDR(x) (((u32) x) << 8)
-#define BASE_ADDR 0x1000
+#define TO_DW2_DATA_START_ADDR(x) (((u32) x) << 8)
+#define TO_DW2_RL(x) ((x) << 25)
+#define FROM_DW2_RL(x) (((x) >> 25) & 0xf)
/* DW3 */
-#define PTD_CERR(x) (((u32) x) << 23)
-#define PTD_NAC_CNT(x) (((u32) x) << 19)
-#define PTD_ACTIVE ((u32) 1 << 31)
-#define PTD_DATA_TOGGLE(x) (((u32) x) << 25)
-
-#define DW3_HALT_BIT (1 << 30)
+#define FROM_DW3_NRBYTESTRANSFERRED(x) ((x) & 0x7fff)
+#define FROM_DW3_SCS_NRBYTESTRANSFERRED(x) ((x) & 0x07ff)
+#define TO_DW3_NAKCOUNT(x) ((x) << 19)
+#define FROM_DW3_NAKCOUNT(x) (((x) >> 19) & 0xf)
+#define TO_DW3_CERR(x) ((x) << 23)
+#define FROM_DW3_CERR(x) (((x) >> 23) & 0x3)
+#define TO_DW3_DATA_TOGGLE(x) ((x) << 25)
+#define FROM_DW3_DATA_TOGGLE(x) (((x) >> 25) & 0x1)
+#define TO_DW3_PING(x) ((x) << 26)
+#define FROM_DW3_PING(x) (((x) >> 26) & 0x1)
#define DW3_ERROR_BIT (1 << 28)
-#define DW3_QTD_ACTIVE (1 << 31)
+#define DW3_BABBLE_BIT (1 << 29)
+#define DW3_HALT_BIT (1 << 30)
+#define DW3_ACTIVE_BIT (1 << 31)
+#define FROM_DW3_ACTIVE(x) (((x) >> 31) & 0x01)
#define INT_UNDERRUN (1 << 2)
#define INT_BABBLE (1 << 1)
#define INT_EXACT (1 << 0)
-#define DW1_GET_PID(x) (((x) >> 10) & 0x3)
-#define PTD_XFERRED_LENGTH(x) ((x) & 0x7fff)
-#define PTD_XFERRED_LENGTH_LO(x) ((x) & 0x7ff)
-
#define SETUP_PID (2)
#define IN_PID (1)
#define OUT_PID (0)
-#define GET_QTD_TOKEN_TYPE(x) ((x) & 0x3)
-
-#define DATA_TOGGLE (1 << 31)
-#define GET_DATA_TOGGLE(x) ((x) >> 31)
/* Errata 1 */
#define RL_COUNTER (0)
#define NAK_COUNTER (0)
#define ERR_COUNTER (2)
-#define HC_ATL_PL_SIZE (8192)
-
-#endif
+#endif /* _ISP1760_HCD_H_ */
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 3b28dbfca05..df931e9ba5b 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -11,60 +11,75 @@
#include <linux/usb.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/usb/isp1760.h>
#include <linux/usb/hcd.h>
#include "isp1760-hcd.h"
-#ifdef CONFIG_PPC_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
+#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
#endif
#ifdef CONFIG_PCI
#include <linux/pci.h>
#endif
-#ifdef CONFIG_PPC_OF
-static int of_isp1760_probe(struct platform_device *dev,
- const struct of_device_id *match)
-{
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
+struct isp1760 {
struct usb_hcd *hcd;
+ int rst_gpio;
+};
+
+static int of_isp1760_probe(struct platform_device *dev)
+{
+ struct isp1760 *drvdata;
struct device_node *dp = dev->dev.of_node;
struct resource *res;
struct resource memory;
- struct of_irq oirq;
int virq;
resource_size_t res_len;
int ret;
- const unsigned int *prop;
unsigned int devflags = 0;
+ enum of_gpio_flags gpio_flags;
+ u32 bus_width = 0;
+
+ drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
ret = of_address_to_resource(dp, 0, &memory);
- if (ret)
- return -ENXIO;
+ if (ret) {
+ ret = -ENXIO;
+ goto free_data;
+ }
res_len = resource_size(&memory);
res = request_mem_region(memory.start, res_len, dev_name(&dev->dev));
- if (!res)
- return -EBUSY;
+ if (!res) {
+ ret = -EBUSY;
+ goto free_data;
+ }
- if (of_irq_map_one(dp, 0, &oirq)) {
+ virq = irq_of_parse_and_map(dp, 0);
+ if (!virq) {
ret = -ENODEV;
goto release_reg;
}
- virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
- oirq.size);
-
if (of_device_is_compatible(dp, "nxp,usb-isp1761"))
devflags |= ISP1760_FLAG_ISP1761;
/* Some systems wire up only 16 of the 32 data lines */
- prop = of_get_property(dp, "bus-width", NULL);
- if (prop && *prop == 16)
+ of_property_read_u32(dp, "bus-width", &bus_width);
+ if (bus_width == 16)
devflags |= ISP1760_FLAG_BUS_WIDTH_16;
if (of_get_property(dp, "port1-otg", NULL) != NULL)
@@ -79,32 +94,56 @@ static int of_isp1760_probe(struct platform_device *dev,
if (of_get_property(dp, "dreq-polarity", NULL) != NULL)
devflags |= ISP1760_FLAG_DREQ_POL_HIGH;
- hcd = isp1760_register(memory.start, res_len, virq,
- IRQF_SHARED | IRQF_DISABLED, &dev->dev, dev_name(&dev->dev),
- devflags);
- if (IS_ERR(hcd)) {
- ret = PTR_ERR(hcd);
- goto release_reg;
+ drvdata->rst_gpio = of_get_gpio_flags(dp, 0, &gpio_flags);
+ if (gpio_is_valid(drvdata->rst_gpio)) {
+ ret = gpio_request(drvdata->rst_gpio, dev_name(&dev->dev));
+ if (!ret) {
+ if (!(gpio_flags & OF_GPIO_ACTIVE_LOW)) {
+ devflags |= ISP1760_FLAG_RESET_ACTIVE_HIGH;
+ gpio_direction_output(drvdata->rst_gpio, 0);
+ } else {
+ gpio_direction_output(drvdata->rst_gpio, 1);
+ }
+ } else {
+ drvdata->rst_gpio = ret;
+ }
+ }
+
+ drvdata->hcd = isp1760_register(memory.start, res_len, virq,
+ IRQF_SHARED, drvdata->rst_gpio,
+ &dev->dev, dev_name(&dev->dev),
+ devflags);
+ if (IS_ERR(drvdata->hcd)) {
+ ret = PTR_ERR(drvdata->hcd);
+ goto free_gpio;
}
- dev_set_drvdata(&dev->dev, hcd);
+ platform_set_drvdata(dev, drvdata);
return ret;
+free_gpio:
+ if (gpio_is_valid(drvdata->rst_gpio))
+ gpio_free(drvdata->rst_gpio);
release_reg:
release_mem_region(memory.start, res_len);
+free_data:
+ kfree(drvdata);
return ret;
}
static int of_isp1760_remove(struct platform_device *dev)
{
- struct usb_hcd *hcd = dev_get_drvdata(&dev->dev);
+ struct isp1760 *drvdata = platform_get_drvdata(dev);
- dev_set_drvdata(&dev->dev, NULL);
+ usb_remove_hcd(drvdata->hcd);
+ iounmap(drvdata->hcd->regs);
+ release_mem_region(drvdata->hcd->rsrc_start, drvdata->hcd->rsrc_len);
+ usb_put_hcd(drvdata->hcd);
- usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
+ if (gpio_is_valid(drvdata->rst_gpio))
+ gpio_free(drvdata->rst_gpio);
+
+ kfree(drvdata);
return 0;
}
@@ -119,7 +158,7 @@ static const struct of_device_id of_isp1760_match[] = {
};
MODULE_DEVICE_TABLE(of, of_isp1760_match);
-static struct of_platform_driver isp1760_of_driver = {
+static struct platform_driver isp1760_of_driver = {
.driver = {
.name = "nxp-isp1760",
.owner = THIS_MODULE,
@@ -131,7 +170,7 @@ static struct of_platform_driver isp1760_of_driver = {
#endif
#ifdef CONFIG_PCI
-static int __devinit isp1761_pci_probe(struct pci_dev *dev,
+static int isp1761_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
u8 latency, limit;
@@ -241,7 +280,7 @@ static int __devinit isp1761_pci_probe(struct pci_dev *dev,
dev->dev.dma_mask = NULL;
hcd = isp1760_register(pci_mem_phy0, memlength, dev->irq,
- IRQF_SHARED | IRQF_DISABLED, &dev->dev, dev_name(&dev->dev),
+ IRQF_SHARED, -ENOENT, &dev->dev, dev_name(&dev->dev),
devflags);
if (IS_ERR(hcd)) {
ret_status = -ENODEV;
@@ -305,16 +344,16 @@ static struct pci_driver isp1761_pci_driver = {
};
#endif
-static int __devinit isp1760_plat_probe(struct platform_device *pdev)
+static int isp1760_plat_probe(struct platform_device *pdev)
{
int ret = 0;
struct usb_hcd *hcd;
struct resource *mem_res;
struct resource *irq_res;
resource_size_t mem_size;
- struct isp1760_platform_data *priv = pdev->dev.platform_data;
+ struct isp1760_platform_data *priv = dev_get_platdata(&pdev->dev);
unsigned int devflags = 0;
- unsigned long irqflags = IRQF_SHARED | IRQF_DISABLED;
+ unsigned long irqflags = IRQF_SHARED;
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem_res) {
@@ -332,8 +371,10 @@ static int __devinit isp1760_plat_probe(struct platform_device *pdev)
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq_res) {
pr_warning("isp1760: IRQ resource not available\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto cleanup;
}
+
irqflags |= irq_res->flags & IRQF_TRIGGER_MASK;
if (priv) {
@@ -352,7 +393,11 @@ static int __devinit isp1760_plat_probe(struct platform_device *pdev)
}
hcd = isp1760_register(mem_res->start, mem_size, irq_res->start,
- irqflags, &pdev->dev, dev_name(&pdev->dev), devflags);
+ irqflags, -ENOENT,
+ &pdev->dev, dev_name(&pdev->dev), devflags);
+
+ platform_set_drvdata(pdev, hcd);
+
if (IS_ERR(hcd)) {
pr_warning("isp1760: Failed to register the HCD device\n");
ret = -ENODEV;
@@ -368,21 +413,26 @@ out:
return ret;
}
-static int __devexit isp1760_plat_remove(struct platform_device *pdev)
+static int isp1760_plat_remove(struct platform_device *pdev)
{
struct resource *mem_res;
resource_size_t mem_size;
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_remove_hcd(hcd);
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mem_size = resource_size(mem_res);
release_mem_region(mem_res->start, mem_size);
+ usb_put_hcd(hcd);
+
return 0;
}
static struct platform_driver isp1760_plat_driver = {
.probe = isp1760_plat_probe,
- .remove = __devexit_p(isp1760_plat_remove),
+ .remove = isp1760_plat_remove,
.driver = {
.name = "isp1760",
},
@@ -397,8 +447,8 @@ static int __init isp1760_init(void)
ret = platform_driver_register(&isp1760_plat_driver);
if (!ret)
any_ret = 0;
-#ifdef CONFIG_PPC_OF
- ret = of_register_platform_driver(&isp1760_of_driver);
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
+ ret = platform_driver_register(&isp1760_of_driver);
if (!ret)
any_ret = 0;
#endif
@@ -417,8 +467,8 @@ module_init(isp1760_init);
static void __exit isp1760_exit(void)
{
platform_driver_unregister(&isp1760_plat_driver);
-#ifdef CONFIG_PPC_OF
- of_unregister_platform_driver(&isp1760_of_driver);
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
+ platform_driver_unregister(&isp1760_of_driver);
#endif
#ifdef CONFIG_PCI
pci_unregister_driver(&isp1761_pci_driver);
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
new file mode 100644
index 00000000000..858efcfda50
--- /dev/null
+++ b/drivers/usb/host/max3421-hcd.c
@@ -0,0 +1,1957 @@
+/*
+ * MAX3421 Host Controller driver for USB.
+ *
+ * Author: David Mosberger-Tang <davidm@egauge.net>
+ *
+ * (C) Copyright 2014 David Mosberger-Tang <davidm@egauge.net>
+ *
+ * MAX3421 is a chip implementing a USB 2.0 Full-/Low-Speed host
+ * controller on a SPI bus.
+ *
+ * Based on:
+ * o MAX3421E datasheet
+ * http://datasheets.maximintegrated.com/en/ds/MAX3421E.pdf
+ * o MAX3421E Programming Guide
+ * http://www.hdl.co.jp/ftpdata/utl-001/AN3785.pdf
+ * o gadget/dummy_hcd.c
+ * For USB HCD implementation.
+ * o Arduino MAX3421 driver
+ * https://github.com/felis/USB_Host_Shield_2.0/blob/master/Usb.cpp
+ *
+ * This file is licenced under the GPL v2.
+ *
+ * Important note on worst-case (full-speed) packet size constraints
+ * (See USB 2.0 Section 5.6.3 and following):
+ *
+ * - control: 64 bytes
+ * - isochronous: 1023 bytes
+ * - interrupt: 64 bytes
+ * - bulk: 64 bytes
+ *
+ * Since the MAX3421 FIFO size is 64 bytes, we do not have to work about
+ * multi-FIFO writes/reads for a single USB packet *except* for isochronous
+ * transfers. We don't support isochronous transfers at this time, so we
+ * just assume that a USB packet always fits into a single FIFO buffer.
+ *
+ * NOTE: The June 2006 version of "MAX3421E Programming Guide"
+ * (AN3785) has conflicting info for the RCVDAVIRQ bit:
+ *
+ * The description of RCVDAVIRQ says "The CPU *must* clear
+ * this IRQ bit (by writing a 1 to it) before reading the
+ * RCVFIFO data.
+ *
+ * However, the earlier section on "Programming BULK-IN
+ * Transfers" says * that:
+ *
+ * After the CPU retrieves the data, it clears the
+ * RCVDAVIRQ bit.
+ *
+ * The December 2006 version has been corrected and it consistently
+ * states the second behavior is the correct one.
+ *
+ * Synchronous SPI transactions sleep so we can't perform any such
+ * transactions while holding a spin-lock (and/or while interrupts are
+ * masked). To achieve this, all SPI transactions are issued from a
+ * single thread (max3421_spi_thread).
+ */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include <linux/platform_data/max3421-hcd.h>
+
+#define DRIVER_DESC "MAX3421 USB Host-Controller Driver"
+#define DRIVER_VERSION "1.0"
+
+/* 11-bit counter that wraps around (USB 2.0 Section 8.3.3): */
+#define USB_MAX_FRAME_NUMBER 0x7ff
+#define USB_MAX_RETRIES 3 /* # of retries before error is reported */
+
+/*
+ * Max. # of times we're willing to retransmit a request immediately in
+ * resposne to a NAK. Afterwards, we fall back on trying once a frame.
+ */
+#define NAK_MAX_FAST_RETRANSMITS 2
+
+#define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */
+
+/* Port-change mask: */
+#define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | \
+ USB_PORT_STAT_C_ENABLE | \
+ USB_PORT_STAT_C_SUSPEND | \
+ USB_PORT_STAT_C_OVERCURRENT | \
+ USB_PORT_STAT_C_RESET) << 16)
+
+enum max3421_rh_state {
+ MAX3421_RH_RESET,
+ MAX3421_RH_SUSPENDED,
+ MAX3421_RH_RUNNING
+};
+
+enum pkt_state {
+ PKT_STATE_SETUP, /* waiting to send setup packet to ctrl pipe */
+ PKT_STATE_TRANSFER, /* waiting to xfer transfer_buffer */
+ PKT_STATE_TERMINATE /* waiting to terminate control transfer */
+};
+
+enum scheduling_pass {
+ SCHED_PASS_PERIODIC,
+ SCHED_PASS_NON_PERIODIC,
+ SCHED_PASS_DONE
+};
+
+struct max3421_dma_buf {
+ u8 data[2];
+};
+
+struct max3421_hcd {
+ spinlock_t lock;
+
+ struct task_struct *spi_thread;
+
+ struct max3421_hcd *next;
+
+ enum max3421_rh_state rh_state;
+ /* lower 16 bits contain port status, upper 16 bits the change mask: */
+ u32 port_status;
+
+ unsigned active:1;
+
+ struct list_head ep_list; /* list of EP's with work */
+
+ /*
+ * The following are owned by spi_thread (may be accessed by
+ * SPI-thread without acquiring the HCD lock:
+ */
+ u8 rev; /* chip revision */
+ u16 frame_number;
+ /*
+ * kmalloc'd buffers guaranteed to be in separate (DMA)
+ * cache-lines:
+ */
+ struct max3421_dma_buf *tx;
+ struct max3421_dma_buf *rx;
+ /*
+ * URB we're currently processing. Must not be reset to NULL
+ * unless MAX3421E chip is idle:
+ */
+ struct urb *curr_urb;
+ enum scheduling_pass sched_pass;
+ struct usb_device *loaded_dev; /* dev that's loaded into the chip */
+ int loaded_epnum; /* epnum whose toggles are loaded */
+ int urb_done; /* > 0 -> no errors, < 0: errno */
+ size_t curr_len;
+ u8 hien;
+ u8 mode;
+ u8 iopins[2];
+ unsigned int do_enable_irq:1;
+ unsigned int do_reset_hcd:1;
+ unsigned int do_reset_port:1;
+ unsigned int do_check_unlink:1;
+ unsigned int do_iopin_update:1;
+#ifdef DEBUG
+ unsigned long err_stat[16];
+#endif
+};
+
+struct max3421_ep {
+ struct usb_host_endpoint *ep;
+ struct list_head ep_list;
+ u32 naks;
+ u16 last_active; /* frame # this ep was last active */
+ enum pkt_state pkt_state;
+ u8 retries;
+ u8 retransmit; /* packet needs retransmission */
+};
+
+static struct max3421_hcd *max3421_hcd_list;
+
+#define MAX3421_FIFO_SIZE 64
+
+#define MAX3421_SPI_DIR_RD 0 /* read register from MAX3421 */
+#define MAX3421_SPI_DIR_WR 1 /* write register to MAX3421 */
+
+/* SPI commands: */
+#define MAX3421_SPI_DIR_SHIFT 1
+#define MAX3421_SPI_REG_SHIFT 3
+
+#define MAX3421_REG_RCVFIFO 1
+#define MAX3421_REG_SNDFIFO 2
+#define MAX3421_REG_SUDFIFO 4
+#define MAX3421_REG_RCVBC 6
+#define MAX3421_REG_SNDBC 7
+#define MAX3421_REG_USBIRQ 13
+#define MAX3421_REG_USBIEN 14
+#define MAX3421_REG_USBCTL 15
+#define MAX3421_REG_CPUCTL 16
+#define MAX3421_REG_PINCTL 17
+#define MAX3421_REG_REVISION 18
+#define MAX3421_REG_IOPINS1 20
+#define MAX3421_REG_IOPINS2 21
+#define MAX3421_REG_GPINIRQ 22
+#define MAX3421_REG_GPINIEN 23
+#define MAX3421_REG_GPINPOL 24
+#define MAX3421_REG_HIRQ 25
+#define MAX3421_REG_HIEN 26
+#define MAX3421_REG_MODE 27
+#define MAX3421_REG_PERADDR 28
+#define MAX3421_REG_HCTL 29
+#define MAX3421_REG_HXFR 30
+#define MAX3421_REG_HRSL 31
+
+enum {
+ MAX3421_USBIRQ_OSCOKIRQ_BIT = 0,
+ MAX3421_USBIRQ_NOVBUSIRQ_BIT = 5,
+ MAX3421_USBIRQ_VBUSIRQ_BIT
+};
+
+enum {
+ MAX3421_CPUCTL_IE_BIT = 0,
+ MAX3421_CPUCTL_PULSEWID0_BIT = 6,
+ MAX3421_CPUCTL_PULSEWID1_BIT
+};
+
+enum {
+ MAX3421_USBCTL_PWRDOWN_BIT = 4,
+ MAX3421_USBCTL_CHIPRES_BIT
+};
+
+enum {
+ MAX3421_PINCTL_GPXA_BIT = 0,
+ MAX3421_PINCTL_GPXB_BIT,
+ MAX3421_PINCTL_POSINT_BIT,
+ MAX3421_PINCTL_INTLEVEL_BIT,
+ MAX3421_PINCTL_FDUPSPI_BIT,
+ MAX3421_PINCTL_EP0INAK_BIT,
+ MAX3421_PINCTL_EP2INAK_BIT,
+ MAX3421_PINCTL_EP3INAK_BIT,
+};
+
+enum {
+ MAX3421_HI_BUSEVENT_BIT = 0, /* bus-reset/-resume */
+ MAX3421_HI_RWU_BIT, /* remote wakeup */
+ MAX3421_HI_RCVDAV_BIT, /* receive FIFO data available */
+ MAX3421_HI_SNDBAV_BIT, /* send buffer available */
+ MAX3421_HI_SUSDN_BIT, /* suspend operation done */
+ MAX3421_HI_CONDET_BIT, /* peripheral connect/disconnect */
+ MAX3421_HI_FRAME_BIT, /* frame generator */
+ MAX3421_HI_HXFRDN_BIT, /* host transfer done */
+};
+
+enum {
+ MAX3421_HCTL_BUSRST_BIT = 0,
+ MAX3421_HCTL_FRMRST_BIT,
+ MAX3421_HCTL_SAMPLEBUS_BIT,
+ MAX3421_HCTL_SIGRSM_BIT,
+ MAX3421_HCTL_RCVTOG0_BIT,
+ MAX3421_HCTL_RCVTOG1_BIT,
+ MAX3421_HCTL_SNDTOG0_BIT,
+ MAX3421_HCTL_SNDTOG1_BIT
+};
+
+enum {
+ MAX3421_MODE_HOST_BIT = 0,
+ MAX3421_MODE_LOWSPEED_BIT,
+ MAX3421_MODE_HUBPRE_BIT,
+ MAX3421_MODE_SOFKAENAB_BIT,
+ MAX3421_MODE_SEPIRQ_BIT,
+ MAX3421_MODE_DELAYISO_BIT,
+ MAX3421_MODE_DMPULLDN_BIT,
+ MAX3421_MODE_DPPULLDN_BIT
+};
+
+enum {
+ MAX3421_HRSL_OK = 0,
+ MAX3421_HRSL_BUSY,
+ MAX3421_HRSL_BADREQ,
+ MAX3421_HRSL_UNDEF,
+ MAX3421_HRSL_NAK,
+ MAX3421_HRSL_STALL,
+ MAX3421_HRSL_TOGERR,
+ MAX3421_HRSL_WRONGPID,
+ MAX3421_HRSL_BADBC,
+ MAX3421_HRSL_PIDERR,
+ MAX3421_HRSL_PKTERR,
+ MAX3421_HRSL_CRCERR,
+ MAX3421_HRSL_KERR,
+ MAX3421_HRSL_JERR,
+ MAX3421_HRSL_TIMEOUT,
+ MAX3421_HRSL_BABBLE,
+ MAX3421_HRSL_RESULT_MASK = 0xf,
+ MAX3421_HRSL_RCVTOGRD_BIT = 4,
+ MAX3421_HRSL_SNDTOGRD_BIT,
+ MAX3421_HRSL_KSTATUS_BIT,
+ MAX3421_HRSL_JSTATUS_BIT
+};
+
+/* Return same error-codes as ohci.h:cc_to_error: */
+static const int hrsl_to_error[] = {
+ [MAX3421_HRSL_OK] = 0,
+ [MAX3421_HRSL_BUSY] = -EINVAL,
+ [MAX3421_HRSL_BADREQ] = -EINVAL,
+ [MAX3421_HRSL_UNDEF] = -EINVAL,
+ [MAX3421_HRSL_NAK] = -EAGAIN,
+ [MAX3421_HRSL_STALL] = -EPIPE,
+ [MAX3421_HRSL_TOGERR] = -EILSEQ,
+ [MAX3421_HRSL_WRONGPID] = -EPROTO,
+ [MAX3421_HRSL_BADBC] = -EREMOTEIO,
+ [MAX3421_HRSL_PIDERR] = -EPROTO,
+ [MAX3421_HRSL_PKTERR] = -EPROTO,
+ [MAX3421_HRSL_CRCERR] = -EILSEQ,
+ [MAX3421_HRSL_KERR] = -EIO,
+ [MAX3421_HRSL_JERR] = -EIO,
+ [MAX3421_HRSL_TIMEOUT] = -ETIME,
+ [MAX3421_HRSL_BABBLE] = -EOVERFLOW
+};
+
+/*
+ * See http://www.beyondlogic.org/usbnutshell/usb4.shtml#Control for a
+ * reasonable overview of how control transfers use the the IN/OUT
+ * tokens.
+ */
+#define MAX3421_HXFR_BULK_IN(ep) (0x00 | (ep)) /* bulk or interrupt */
+#define MAX3421_HXFR_SETUP 0x10
+#define MAX3421_HXFR_BULK_OUT(ep) (0x20 | (ep)) /* bulk or interrupt */
+#define MAX3421_HXFR_ISO_IN(ep) (0x40 | (ep))
+#define MAX3421_HXFR_ISO_OUT(ep) (0x60 | (ep))
+#define MAX3421_HXFR_HS_IN 0x80 /* handshake in */
+#define MAX3421_HXFR_HS_OUT 0xa0 /* handshake out */
+
+#define field(val, bit) ((val) << (bit))
+
+static inline s16
+frame_diff(u16 left, u16 right)
+{
+ return ((unsigned) (left - right)) % (USB_MAX_FRAME_NUMBER + 1);
+}
+
+static inline struct max3421_hcd *
+hcd_to_max3421(struct usb_hcd *hcd)
+{
+ return (struct max3421_hcd *) hcd->hcd_priv;
+}
+
+static inline struct usb_hcd *
+max3421_to_hcd(struct max3421_hcd *max3421_hcd)
+{
+ return container_of((void *) max3421_hcd, struct usb_hcd, hcd_priv);
+}
+
+static u8
+spi_rd8(struct usb_hcd *hcd, unsigned int reg)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct spi_transfer transfer;
+ struct spi_message msg;
+
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ max3421_hcd->tx->data[0] =
+ (field(reg, MAX3421_SPI_REG_SHIFT) |
+ field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT));
+
+ transfer.tx_buf = max3421_hcd->tx->data;
+ transfer.rx_buf = max3421_hcd->rx->data;
+ transfer.len = 2;
+
+ spi_message_add_tail(&transfer, &msg);
+ spi_sync(spi, &msg);
+
+ return max3421_hcd->rx->data[1];
+}
+
+static void
+spi_wr8(struct usb_hcd *hcd, unsigned int reg, u8 val)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct spi_transfer transfer;
+ struct spi_message msg;
+
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ max3421_hcd->tx->data[0] =
+ (field(reg, MAX3421_SPI_REG_SHIFT) |
+ field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT));
+ max3421_hcd->tx->data[1] = val;
+
+ transfer.tx_buf = max3421_hcd->tx->data;
+ transfer.len = 2;
+
+ spi_message_add_tail(&transfer, &msg);
+ spi_sync(spi, &msg);
+}
+
+static void
+spi_rd_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct spi_transfer transfer[2];
+ struct spi_message msg;
+
+ memset(transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ max3421_hcd->tx->data[0] =
+ (field(reg, MAX3421_SPI_REG_SHIFT) |
+ field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT));
+ transfer[0].tx_buf = max3421_hcd->tx->data;
+ transfer[0].len = 1;
+
+ transfer[1].rx_buf = buf;
+ transfer[1].len = len;
+
+ spi_message_add_tail(&transfer[0], &msg);
+ spi_message_add_tail(&transfer[1], &msg);
+ spi_sync(spi, &msg);
+}
+
+static void
+spi_wr_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct spi_transfer transfer[2];
+ struct spi_message msg;
+
+ memset(transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ max3421_hcd->tx->data[0] =
+ (field(reg, MAX3421_SPI_REG_SHIFT) |
+ field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT));
+
+ transfer[0].tx_buf = max3421_hcd->tx->data;
+ transfer[0].len = 1;
+
+ transfer[1].tx_buf = buf;
+ transfer[1].len = len;
+
+ spi_message_add_tail(&transfer[0], &msg);
+ spi_message_add_tail(&transfer[1], &msg);
+ spi_sync(spi, &msg);
+}
+
+/*
+ * Figure out the correct setting for the LOWSPEED and HUBPRE mode
+ * bits. The HUBPRE bit needs to be set when MAX3421E operates at
+ * full speed, but it's talking to a low-speed device (i.e., through a
+ * hub). Setting that bit ensures that every low-speed packet is
+ * preceded by a full-speed PRE PID. Possible configurations:
+ *
+ * Hub speed: Device speed: => LOWSPEED bit: HUBPRE bit:
+ * FULL FULL => 0 0
+ * FULL LOW => 1 1
+ * LOW LOW => 1 0
+ * LOW FULL => 1 0
+ */
+static void
+max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ u8 mode_lowspeed, mode_hubpre, mode = max3421_hcd->mode;
+
+ mode_lowspeed = BIT(MAX3421_MODE_LOWSPEED_BIT);
+ mode_hubpre = BIT(MAX3421_MODE_HUBPRE_BIT);
+ if (max3421_hcd->port_status & USB_PORT_STAT_LOW_SPEED) {
+ mode |= mode_lowspeed;
+ mode &= ~mode_hubpre;
+ } else if (dev->speed == USB_SPEED_LOW) {
+ mode |= mode_lowspeed | mode_hubpre;
+ } else {
+ mode &= ~(mode_lowspeed | mode_hubpre);
+ }
+ if (mode != max3421_hcd->mode) {
+ max3421_hcd->mode = mode;
+ spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
+ }
+
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
+ int force_toggles)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ int old_epnum, same_ep, rcvtog, sndtog;
+ struct usb_device *old_dev;
+ u8 hctl;
+
+ old_dev = max3421_hcd->loaded_dev;
+ old_epnum = max3421_hcd->loaded_epnum;
+
+ same_ep = (dev == old_dev && epnum == old_epnum);
+ if (same_ep && !force_toggles)
+ return;
+
+ if (old_dev && !same_ep) {
+ /* save the old end-points toggles: */
+ u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
+
+ rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
+ sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
+
+ /* no locking: HCD (i.e., we) own toggles, don't we? */
+ usb_settoggle(old_dev, old_epnum, 0, rcvtog);
+ usb_settoggle(old_dev, old_epnum, 1, sndtog);
+ }
+ /* setup new endpoint's toggle bits: */
+ rcvtog = usb_gettoggle(dev, epnum, 0);
+ sndtog = usb_gettoggle(dev, epnum, 1);
+ hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) |
+ BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
+
+ max3421_hcd->loaded_epnum = epnum;
+ spi_wr8(hcd, MAX3421_REG_HCTL, hctl);
+
+ /*
+ * Note: devnum for one and the same device can change during
+ * address-assignment so it's best to just always load the
+ * address whenever the end-point changed/was forced.
+ */
+ max3421_hcd->loaded_dev = dev;
+ spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum);
+}
+
+static int
+max3421_ctrl_setup(struct usb_hcd *hcd, struct urb *urb)
+{
+ spi_wr_buf(hcd, MAX3421_REG_SUDFIFO, urb->setup_packet, 8);
+ return MAX3421_HXFR_SETUP;
+}
+
+static int
+max3421_transfer_in(struct usb_hcd *hcd, struct urb *urb)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ int epnum = usb_pipeendpoint(urb->pipe);
+
+ max3421_hcd->curr_len = 0;
+ max3421_hcd->hien |= BIT(MAX3421_HI_RCVDAV_BIT);
+ return MAX3421_HXFR_BULK_IN(epnum);
+}
+
+static int
+max3421_transfer_out(struct usb_hcd *hcd, struct urb *urb, int fast_retransmit)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ int epnum = usb_pipeendpoint(urb->pipe);
+ u32 max_packet;
+ void *src;
+
+ src = urb->transfer_buffer + urb->actual_length;
+
+ if (fast_retransmit) {
+ if (max3421_hcd->rev == 0x12) {
+ /* work around rev 0x12 bug: */
+ spi_wr8(hcd, MAX3421_REG_SNDBC, 0);
+ spi_wr8(hcd, MAX3421_REG_SNDFIFO, ((u8 *) src)[0]);
+ spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len);
+ }
+ return MAX3421_HXFR_BULK_OUT(epnum);
+ }
+
+ max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
+
+ if (max_packet > MAX3421_FIFO_SIZE) {
+ /*
+ * We do not support isochronous transfers at this
+ * time.
+ */
+ dev_err(&spi->dev,
+ "%s: packet-size of %u too big (limit is %u bytes)",
+ __func__, max_packet, MAX3421_FIFO_SIZE);
+ max3421_hcd->urb_done = -EMSGSIZE;
+ return -EMSGSIZE;
+ }
+ max3421_hcd->curr_len = min((urb->transfer_buffer_length -
+ urb->actual_length), max_packet);
+
+ spi_wr_buf(hcd, MAX3421_REG_SNDFIFO, src, max3421_hcd->curr_len);
+ spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len);
+ return MAX3421_HXFR_BULK_OUT(epnum);
+}
+
+/*
+ * Issue the next host-transfer command.
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_next_transfer(struct usb_hcd *hcd, int fast_retransmit)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct urb *urb = max3421_hcd->curr_urb;
+ struct max3421_ep *max3421_ep;
+ int cmd = -EINVAL;
+
+ if (!urb)
+ return; /* nothing to do */
+
+ max3421_ep = urb->ep->hcpriv;
+
+ switch (max3421_ep->pkt_state) {
+ case PKT_STATE_SETUP:
+ cmd = max3421_ctrl_setup(hcd, urb);
+ break;
+
+ case PKT_STATE_TRANSFER:
+ if (usb_urb_dir_in(urb))
+ cmd = max3421_transfer_in(hcd, urb);
+ else
+ cmd = max3421_transfer_out(hcd, urb, fast_retransmit);
+ break;
+
+ case PKT_STATE_TERMINATE:
+ /*
+ * IN transfers are terminated with HS_OUT token,
+ * OUT transfers with HS_IN:
+ */
+ if (usb_urb_dir_in(urb))
+ cmd = MAX3421_HXFR_HS_OUT;
+ else
+ cmd = MAX3421_HXFR_HS_IN;
+ break;
+ }
+
+ if (cmd < 0)
+ return;
+
+ /* issue the command and wait for host-xfer-done interrupt: */
+
+ spi_wr8(hcd, MAX3421_REG_HXFR, cmd);
+ max3421_hcd->hien |= BIT(MAX3421_HI_HXFRDN_BIT);
+}
+
+/*
+ * Find the next URB to process and start its execution.
+ *
+ * At this time, we do not anticipate ever connecting a USB hub to the
+ * MAX3421 chip, so at most USB device can be connected and we can use
+ * a simplistic scheduler: at the start of a frame, schedule all
+ * periodic transfers. Once that is done, use the remainder of the
+ * frame to process non-periodic (bulk & control) transfers.
+ *
+ * Preconditions:
+ * o Caller must NOT hold HCD spinlock.
+ * o max3421_hcd->curr_urb MUST BE NULL.
+ * o MAX3421E chip must be idle.
+ */
+static int
+max3421_select_and_start_urb(struct usb_hcd *hcd)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct urb *urb, *curr_urb = NULL;
+ struct max3421_ep *max3421_ep;
+ int epnum, force_toggles = 0;
+ struct usb_host_endpoint *ep;
+ struct list_head *pos;
+ unsigned long flags;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ for (;
+ max3421_hcd->sched_pass < SCHED_PASS_DONE;
+ ++max3421_hcd->sched_pass)
+ list_for_each(pos, &max3421_hcd->ep_list) {
+ urb = NULL;
+ max3421_ep = container_of(pos, struct max3421_ep,
+ ep_list);
+ ep = max3421_ep->ep;
+
+ switch (usb_endpoint_type(&ep->desc)) {
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ if (max3421_hcd->sched_pass !=
+ SCHED_PASS_PERIODIC)
+ continue;
+ break;
+
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_BULK:
+ if (max3421_hcd->sched_pass !=
+ SCHED_PASS_NON_PERIODIC)
+ continue;
+ break;
+ }
+
+ if (list_empty(&ep->urb_list))
+ continue; /* nothing to do */
+ urb = list_first_entry(&ep->urb_list, struct urb,
+ urb_list);
+ if (urb->unlinked) {
+ dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
+ __func__, urb, urb->unlinked);
+ max3421_hcd->curr_urb = urb;
+ max3421_hcd->urb_done = 1;
+ spin_unlock_irqrestore(&max3421_hcd->lock,
+ flags);
+ return 1;
+ }
+
+ switch (usb_endpoint_type(&ep->desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ /*
+ * Allow one control transaction per
+ * frame per endpoint:
+ */
+ if (frame_diff(max3421_ep->last_active,
+ max3421_hcd->frame_number) == 0)
+ continue;
+ break;
+
+ case USB_ENDPOINT_XFER_BULK:
+ if (max3421_ep->retransmit
+ && (frame_diff(max3421_ep->last_active,
+ max3421_hcd->frame_number)
+ == 0))
+ /*
+ * We already tried this EP
+ * during this frame and got a
+ * NAK or error; wait for next frame
+ */
+ continue;
+ break;
+
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ if (frame_diff(max3421_hcd->frame_number,
+ max3421_ep->last_active)
+ < urb->interval)
+ /*
+ * We already processed this
+ * end-point in the current
+ * frame
+ */
+ continue;
+ break;
+ }
+
+ /* move current ep to tail: */
+ list_move_tail(pos, &max3421_hcd->ep_list);
+ curr_urb = urb;
+ goto done;
+ }
+done:
+ if (!curr_urb) {
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+ return 0;
+ }
+
+ urb = max3421_hcd->curr_urb = curr_urb;
+ epnum = usb_endpoint_num(&urb->ep->desc);
+ if (max3421_ep->retransmit)
+ /* restart (part of) a USB transaction: */
+ max3421_ep->retransmit = 0;
+ else {
+ /* start USB transaction: */
+ if (usb_endpoint_xfer_control(&ep->desc)) {
+ /*
+ * See USB 2.0 spec section 8.6.1
+ * Initialization via SETUP Token:
+ */
+ usb_settoggle(urb->dev, epnum, 0, 1);
+ usb_settoggle(urb->dev, epnum, 1, 1);
+ max3421_ep->pkt_state = PKT_STATE_SETUP;
+ force_toggles = 1;
+ } else
+ max3421_ep->pkt_state = PKT_STATE_TRANSFER;
+ }
+
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+
+ max3421_ep->last_active = max3421_hcd->frame_number;
+ max3421_set_address(hcd, urb->dev, epnum, force_toggles);
+ max3421_set_speed(hcd, urb->dev);
+ max3421_next_transfer(hcd, 0);
+ return 1;
+}
+
+/*
+ * Check all endpoints for URBs that got unlinked.
+ *
+ * Caller must NOT hold HCD spinlock.
+ */
+static int
+max3421_check_unlink(struct usb_hcd *hcd)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct list_head *pos, *upos, *next_upos;
+ struct max3421_ep *max3421_ep;
+ struct usb_host_endpoint *ep;
+ struct urb *urb;
+ unsigned long flags;
+ int retval = 0;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ list_for_each(pos, &max3421_hcd->ep_list) {
+ max3421_ep = container_of(pos, struct max3421_ep, ep_list);
+ ep = max3421_ep->ep;
+ list_for_each_safe(upos, next_upos, &ep->urb_list) {
+ urb = container_of(upos, struct urb, urb_list);
+ if (urb->unlinked) {
+ retval = 1;
+ dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
+ __func__, urb, urb->unlinked);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock_irqrestore(&max3421_hcd->lock,
+ flags);
+ usb_hcd_giveback_urb(hcd, urb, 0);
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+ return retval;
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_slow_retransmit(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct urb *urb = max3421_hcd->curr_urb;
+ struct max3421_ep *max3421_ep;
+
+ max3421_ep = urb->ep->hcpriv;
+ max3421_ep->retransmit = 1;
+ max3421_hcd->curr_urb = NULL;
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_recv_data_available(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct urb *urb = max3421_hcd->curr_urb;
+ size_t remaining, transfer_size;
+ u8 rcvbc;
+
+ rcvbc = spi_rd8(hcd, MAX3421_REG_RCVBC);
+
+ if (rcvbc > MAX3421_FIFO_SIZE)
+ rcvbc = MAX3421_FIFO_SIZE;
+ if (urb->actual_length >= urb->transfer_buffer_length)
+ remaining = 0;
+ else
+ remaining = urb->transfer_buffer_length - urb->actual_length;
+ transfer_size = rcvbc;
+ if (transfer_size > remaining)
+ transfer_size = remaining;
+ if (transfer_size > 0) {
+ void *dst = urb->transfer_buffer + urb->actual_length;
+
+ spi_rd_buf(hcd, MAX3421_REG_RCVFIFO, dst, transfer_size);
+ urb->actual_length += transfer_size;
+ max3421_hcd->curr_len = transfer_size;
+ }
+
+ /* ack the RCVDAV irq now that the FIFO has been read: */
+ spi_wr8(hcd, MAX3421_REG_HIRQ, BIT(MAX3421_HI_RCVDAV_BIT));
+}
+
+static void
+max3421_handle_error(struct usb_hcd *hcd, u8 hrsl)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ u8 result_code = hrsl & MAX3421_HRSL_RESULT_MASK;
+ struct urb *urb = max3421_hcd->curr_urb;
+ struct max3421_ep *max3421_ep = urb->ep->hcpriv;
+ int switch_sndfifo;
+
+ /*
+ * If an OUT command results in any response other than OK
+ * (i.e., error or NAK), we have to perform a dummy-write to
+ * SNDBC so the FIFO gets switched back to us. Otherwise, we
+ * get out of sync with the SNDFIFO double buffer.
+ */
+ switch_sndfifo = (max3421_ep->pkt_state == PKT_STATE_TRANSFER &&
+ usb_urb_dir_out(urb));
+
+ switch (result_code) {
+ case MAX3421_HRSL_OK:
+ return; /* this shouldn't happen */
+
+ case MAX3421_HRSL_WRONGPID: /* received wrong PID */
+ case MAX3421_HRSL_BUSY: /* SIE busy */
+ case MAX3421_HRSL_BADREQ: /* bad val in HXFR */
+ case MAX3421_HRSL_UNDEF: /* reserved */
+ case MAX3421_HRSL_KERR: /* K-state instead of response */
+ case MAX3421_HRSL_JERR: /* J-state instead of response */
+ /*
+ * packet experienced an error that we cannot recover
+ * from; report error
+ */
+ max3421_hcd->urb_done = hrsl_to_error[result_code];
+ dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
+ __func__, hrsl);
+ break;
+
+ case MAX3421_HRSL_TOGERR:
+ if (usb_urb_dir_in(urb))
+ ; /* don't do anything (device will switch toggle) */
+ else {
+ /* flip the send toggle bit: */
+ int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
+
+ sndtog ^= 1;
+ spi_wr8(hcd, MAX3421_REG_HCTL,
+ BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
+ }
+ /* FALL THROUGH */
+ case MAX3421_HRSL_BADBC: /* bad byte count */
+ case MAX3421_HRSL_PIDERR: /* received PID is corrupted */
+ case MAX3421_HRSL_PKTERR: /* packet error (stuff, EOP) */
+ case MAX3421_HRSL_CRCERR: /* CRC error */
+ case MAX3421_HRSL_BABBLE: /* device talked too long */
+ case MAX3421_HRSL_TIMEOUT:
+ if (max3421_ep->retries++ < USB_MAX_RETRIES)
+ /* retry the packet again in the next frame */
+ max3421_slow_retransmit(hcd);
+ else {
+ /* Based on ohci.h cc_to_err[]: */
+ max3421_hcd->urb_done = hrsl_to_error[result_code];
+ dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
+ __func__, hrsl);
+ }
+ break;
+
+ case MAX3421_HRSL_STALL:
+ dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
+ __func__, hrsl);
+ max3421_hcd->urb_done = hrsl_to_error[result_code];
+ break;
+
+ case MAX3421_HRSL_NAK:
+ /*
+ * Device wasn't ready for data or has no data
+ * available: retry the packet again.
+ */
+ if (max3421_ep->naks++ < NAK_MAX_FAST_RETRANSMITS) {
+ max3421_next_transfer(hcd, 1);
+ switch_sndfifo = 0;
+ } else
+ max3421_slow_retransmit(hcd);
+ break;
+ }
+ if (switch_sndfifo)
+ spi_wr8(hcd, MAX3421_REG_SNDBC, 0);
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static int
+max3421_transfer_in_done(struct usb_hcd *hcd, struct urb *urb)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ u32 max_packet;
+
+ if (urb->actual_length >= urb->transfer_buffer_length)
+ return 1; /* read is complete, so we're done */
+
+ /*
+ * USB 2.0 Section 5.3.2 Pipes: packets must be full size
+ * except for last one.
+ */
+ max_packet = usb_maxpacket(urb->dev, urb->pipe, 0);
+ if (max_packet > MAX3421_FIFO_SIZE) {
+ /*
+ * We do not support isochronous transfers at this
+ * time...
+ */
+ dev_err(&spi->dev,
+ "%s: packet-size of %u too big (limit is %u bytes)",
+ __func__, max_packet, MAX3421_FIFO_SIZE);
+ return -EINVAL;
+ }
+
+ if (max3421_hcd->curr_len < max_packet) {
+ if (urb->transfer_flags & URB_SHORT_NOT_OK) {
+ /*
+ * remaining > 0 and received an
+ * unexpected partial packet ->
+ * error
+ */
+ return -EREMOTEIO;
+ } else
+ /* short read, but it's OK */
+ return 1;
+ }
+ return 0; /* not done */
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static int
+max3421_transfer_out_done(struct usb_hcd *hcd, struct urb *urb)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+ urb->actual_length += max3421_hcd->curr_len;
+ if (urb->actual_length < urb->transfer_buffer_length)
+ return 0;
+ if (urb->transfer_flags & URB_ZERO_PACKET) {
+ /*
+ * Some hardware needs a zero-size packet at the end
+ * of a bulk-out transfer if the last transfer was a
+ * full-sized packet (i.e., such hardware use <
+ * max_packet as an indicator that the end of the
+ * packet has been reached).
+ */
+ u32 max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
+
+ if (max3421_hcd->curr_len == max_packet)
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_host_transfer_done(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct urb *urb = max3421_hcd->curr_urb;
+ struct max3421_ep *max3421_ep;
+ u8 result_code, hrsl;
+ int urb_done = 0;
+
+ max3421_hcd->hien &= ~(BIT(MAX3421_HI_HXFRDN_BIT) |
+ BIT(MAX3421_HI_RCVDAV_BIT));
+
+ hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
+ result_code = hrsl & MAX3421_HRSL_RESULT_MASK;
+
+#ifdef DEBUG
+ ++max3421_hcd->err_stat[result_code];
+#endif
+
+ max3421_ep = urb->ep->hcpriv;
+
+ if (unlikely(result_code != MAX3421_HRSL_OK)) {
+ max3421_handle_error(hcd, hrsl);
+ return;
+ }
+
+ max3421_ep->naks = 0;
+ max3421_ep->retries = 0;
+ switch (max3421_ep->pkt_state) {
+
+ case PKT_STATE_SETUP:
+ if (urb->transfer_buffer_length > 0)
+ max3421_ep->pkt_state = PKT_STATE_TRANSFER;
+ else
+ max3421_ep->pkt_state = PKT_STATE_TERMINATE;
+ break;
+
+ case PKT_STATE_TRANSFER:
+ if (usb_urb_dir_in(urb))
+ urb_done = max3421_transfer_in_done(hcd, urb);
+ else
+ urb_done = max3421_transfer_out_done(hcd, urb);
+ if (urb_done > 0 && usb_pipetype(urb->pipe) == PIPE_CONTROL) {
+ /*
+ * We aren't really done - we still need to
+ * terminate the control transfer:
+ */
+ max3421_hcd->urb_done = urb_done = 0;
+ max3421_ep->pkt_state = PKT_STATE_TERMINATE;
+ }
+ break;
+
+ case PKT_STATE_TERMINATE:
+ urb_done = 1;
+ break;
+ }
+
+ if (urb_done)
+ max3421_hcd->urb_done = urb_done;
+ else
+ max3421_next_transfer(hcd, 0);
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_detect_conn(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ unsigned int jk, have_conn = 0;
+ u32 old_port_status, chg;
+ unsigned long flags;
+ u8 hrsl, mode;
+
+ hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
+
+ jk = ((((hrsl >> MAX3421_HRSL_JSTATUS_BIT) & 1) << 0) |
+ (((hrsl >> MAX3421_HRSL_KSTATUS_BIT) & 1) << 1));
+
+ mode = max3421_hcd->mode;
+
+ switch (jk) {
+ case 0x0: /* SE0: disconnect */
+ /*
+ * Turn off SOFKAENAB bit to avoid getting interrupt
+ * every milli-second:
+ */
+ mode &= ~BIT(MAX3421_MODE_SOFKAENAB_BIT);
+ break;
+
+ case 0x1: /* J=0,K=1: low-speed (in full-speed or vice versa) */
+ case 0x2: /* J=1,K=0: full-speed (in full-speed or vice versa) */
+ if (jk == 0x2)
+ /* need to switch to the other speed: */
+ mode ^= BIT(MAX3421_MODE_LOWSPEED_BIT);
+ /* turn on SOFKAENAB bit: */
+ mode |= BIT(MAX3421_MODE_SOFKAENAB_BIT);
+ have_conn = 1;
+ break;
+
+ case 0x3: /* illegal */
+ break;
+ }
+
+ max3421_hcd->mode = mode;
+ spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ old_port_status = max3421_hcd->port_status;
+ if (have_conn)
+ max3421_hcd->port_status |= USB_PORT_STAT_CONNECTION;
+ else
+ max3421_hcd->port_status &= ~USB_PORT_STAT_CONNECTION;
+ if (mode & BIT(MAX3421_MODE_LOWSPEED_BIT))
+ max3421_hcd->port_status |= USB_PORT_STAT_LOW_SPEED;
+ else
+ max3421_hcd->port_status &= ~USB_PORT_STAT_LOW_SPEED;
+ chg = (old_port_status ^ max3421_hcd->port_status);
+ max3421_hcd->port_status |= chg << 16;
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+}
+
+static irqreturn_t
+max3421_irq_handler(int irq, void *dev_id)
+{
+ struct usb_hcd *hcd = dev_id;
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+ if (max3421_hcd->spi_thread &&
+ max3421_hcd->spi_thread->state != TASK_RUNNING)
+ wake_up_process(max3421_hcd->spi_thread);
+ if (!max3421_hcd->do_enable_irq) {
+ max3421_hcd->do_enable_irq = 1;
+ disable_irq_nosync(spi->irq);
+ }
+ return IRQ_HANDLED;
+}
+
+#ifdef DEBUG
+
+static void
+dump_eps(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct max3421_ep *max3421_ep;
+ struct usb_host_endpoint *ep;
+ struct list_head *pos, *upos;
+ char ubuf[512], *dp, *end;
+ unsigned long flags;
+ struct urb *urb;
+ int epnum, ret;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ list_for_each(pos, &max3421_hcd->ep_list) {
+ max3421_ep = container_of(pos, struct max3421_ep, ep_list);
+ ep = max3421_ep->ep;
+
+ dp = ubuf;
+ end = dp + sizeof(ubuf);
+ *dp = '\0';
+ list_for_each(upos, &ep->urb_list) {
+ urb = container_of(upos, struct urb, urb_list);
+ ret = snprintf(dp, end - dp, " %p(%d.%s %d/%d)", urb,
+ usb_pipetype(urb->pipe),
+ usb_urb_dir_in(urb) ? "IN" : "OUT",
+ urb->actual_length,
+ urb->transfer_buffer_length);
+ if (ret < 0 || ret >= end - dp)
+ break; /* error or buffer full */
+ dp += ret;
+ }
+
+ epnum = usb_endpoint_num(&ep->desc);
+ pr_info("EP%0u %u lst %04u rtr %u nak %6u rxmt %u: %s\n",
+ epnum, max3421_ep->pkt_state, max3421_ep->last_active,
+ max3421_ep->retries, max3421_ep->naks,
+ max3421_ep->retransmit, ubuf);
+ }
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+}
+
+#endif /* DEBUG */
+
+/* Return zero if no work was performed, 1 otherwise. */
+static int
+max3421_handle_irqs(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ u32 chg, old_port_status;
+ unsigned long flags;
+ u8 hirq;
+
+ /*
+ * Read and ack pending interrupts (CPU must never
+ * clear SNDBAV directly and RCVDAV must be cleared by
+ * max3421_recv_data_available()!):
+ */
+ hirq = spi_rd8(hcd, MAX3421_REG_HIRQ);
+ hirq &= max3421_hcd->hien;
+ if (!hirq)
+ return 0;
+
+ spi_wr8(hcd, MAX3421_REG_HIRQ,
+ hirq & ~(BIT(MAX3421_HI_SNDBAV_BIT) |
+ BIT(MAX3421_HI_RCVDAV_BIT)));
+
+ if (hirq & BIT(MAX3421_HI_FRAME_BIT)) {
+ max3421_hcd->frame_number = ((max3421_hcd->frame_number + 1)
+ & USB_MAX_FRAME_NUMBER);
+ max3421_hcd->sched_pass = SCHED_PASS_PERIODIC;
+ }
+
+ if (hirq & BIT(MAX3421_HI_RCVDAV_BIT))
+ max3421_recv_data_available(hcd);
+
+ if (hirq & BIT(MAX3421_HI_HXFRDN_BIT))
+ max3421_host_transfer_done(hcd);
+
+ if (hirq & BIT(MAX3421_HI_CONDET_BIT))
+ max3421_detect_conn(hcd);
+
+ /*
+ * Now process interrupts that may affect HCD state
+ * other than the end-points:
+ */
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ old_port_status = max3421_hcd->port_status;
+ if (hirq & BIT(MAX3421_HI_BUSEVENT_BIT)) {
+ if (max3421_hcd->port_status & USB_PORT_STAT_RESET) {
+ /* BUSEVENT due to completion of Bus Reset */
+ max3421_hcd->port_status &= ~USB_PORT_STAT_RESET;
+ max3421_hcd->port_status |= USB_PORT_STAT_ENABLE;
+ } else {
+ /* BUSEVENT due to completion of Bus Resume */
+ pr_info("%s: BUSEVENT Bus Resume Done\n", __func__);
+ }
+ }
+ if (hirq & BIT(MAX3421_HI_RWU_BIT))
+ pr_info("%s: RWU\n", __func__);
+ if (hirq & BIT(MAX3421_HI_SUSDN_BIT))
+ pr_info("%s: SUSDN\n", __func__);
+
+ chg = (old_port_status ^ max3421_hcd->port_status);
+ max3421_hcd->port_status |= chg << 16;
+
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+
+#ifdef DEBUG
+ {
+ static unsigned long last_time;
+ char sbuf[16 * 16], *dp, *end;
+ int i;
+
+ if (jiffies - last_time > 5*HZ) {
+ dp = sbuf;
+ end = sbuf + sizeof(sbuf);
+ *dp = '\0';
+ for (i = 0; i < 16; ++i) {
+ int ret = snprintf(dp, end - dp, " %lu",
+ max3421_hcd->err_stat[i]);
+ if (ret < 0 || ret >= end - dp)
+ break; /* error or buffer full */
+ dp += ret;
+ }
+ pr_info("%s: hrsl_stats %s\n", __func__, sbuf);
+ memset(max3421_hcd->err_stat, 0,
+ sizeof(max3421_hcd->err_stat));
+ last_time = jiffies;
+
+ dump_eps(hcd);
+ }
+ }
+#endif
+ return 1;
+}
+
+static int
+max3421_reset_hcd(struct usb_hcd *hcd)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ int timeout;
+
+ /* perform a chip reset and wait for OSCIRQ signal to appear: */
+ spi_wr8(hcd, MAX3421_REG_USBCTL, BIT(MAX3421_USBCTL_CHIPRES_BIT));
+ /* clear reset: */
+ spi_wr8(hcd, MAX3421_REG_USBCTL, 0);
+ timeout = 1000;
+ while (1) {
+ if (spi_rd8(hcd, MAX3421_REG_USBIRQ)
+ & BIT(MAX3421_USBIRQ_OSCOKIRQ_BIT))
+ break;
+ if (--timeout < 0) {
+ dev_err(&spi->dev,
+ "timed out waiting for oscillator OK signal");
+ return 1;
+ }
+ cond_resched();
+ }
+
+ /*
+ * Turn on host mode, automatic generation of SOF packets, and
+ * enable pull-down registers on DM/DP:
+ */
+ max3421_hcd->mode = (BIT(MAX3421_MODE_HOST_BIT) |
+ BIT(MAX3421_MODE_SOFKAENAB_BIT) |
+ BIT(MAX3421_MODE_DMPULLDN_BIT) |
+ BIT(MAX3421_MODE_DPPULLDN_BIT));
+ spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
+
+ /* reset frame-number: */
+ max3421_hcd->frame_number = USB_MAX_FRAME_NUMBER;
+ spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_FRMRST_BIT));
+
+ /* sample the state of the D+ and D- lines */
+ spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_SAMPLEBUS_BIT));
+ max3421_detect_conn(hcd);
+
+ /* enable frame, connection-detected, and bus-event interrupts: */
+ max3421_hcd->hien = (BIT(MAX3421_HI_FRAME_BIT) |
+ BIT(MAX3421_HI_CONDET_BIT) |
+ BIT(MAX3421_HI_BUSEVENT_BIT));
+ spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien);
+
+ /* enable interrupts: */
+ spi_wr8(hcd, MAX3421_REG_CPUCTL, BIT(MAX3421_CPUCTL_IE_BIT));
+ return 1;
+}
+
+static int
+max3421_urb_done(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ unsigned long flags;
+ struct urb *urb;
+ int status;
+
+ status = max3421_hcd->urb_done;
+ max3421_hcd->urb_done = 0;
+ if (status > 0)
+ status = 0;
+ urb = max3421_hcd->curr_urb;
+ if (urb) {
+ max3421_hcd->curr_urb = NULL;
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+
+ /* must be called without the HCD spinlock: */
+ usb_hcd_giveback_urb(hcd, urb, status);
+ }
+ return 1;
+}
+
+static int
+max3421_spi_thread(void *dev_id)
+{
+ struct usb_hcd *hcd = dev_id;
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ int i, i_worked = 1;
+
+ /* set full-duplex SPI mode, low-active interrupt pin: */
+ spi_wr8(hcd, MAX3421_REG_PINCTL,
+ (BIT(MAX3421_PINCTL_FDUPSPI_BIT) | /* full-duplex */
+ BIT(MAX3421_PINCTL_INTLEVEL_BIT))); /* low-active irq */
+
+ while (!kthread_should_stop()) {
+ max3421_hcd->rev = spi_rd8(hcd, MAX3421_REG_REVISION);
+ if (max3421_hcd->rev == 0x12 || max3421_hcd->rev == 0x13)
+ break;
+ dev_err(&spi->dev, "bad rev 0x%02x", max3421_hcd->rev);
+ msleep(10000);
+ }
+ dev_info(&spi->dev, "rev 0x%x, SPI clk %dHz, bpw %u, irq %d\n",
+ max3421_hcd->rev, spi->max_speed_hz, spi->bits_per_word,
+ spi->irq);
+
+ while (!kthread_should_stop()) {
+ if (!i_worked) {
+ /*
+ * We'll be waiting for wakeups from the hard
+ * interrupt handler, so now is a good time to
+ * sync our hien with the chip:
+ */
+ spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (max3421_hcd->do_enable_irq) {
+ max3421_hcd->do_enable_irq = 0;
+ enable_irq(spi->irq);
+ }
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ }
+
+ i_worked = 0;
+
+ if (max3421_hcd->urb_done)
+ i_worked |= max3421_urb_done(hcd);
+ else if (max3421_handle_irqs(hcd))
+ i_worked = 1;
+ else if (!max3421_hcd->curr_urb)
+ i_worked |= max3421_select_and_start_urb(hcd);
+
+ if (max3421_hcd->do_reset_hcd) {
+ /* reset the HCD: */
+ max3421_hcd->do_reset_hcd = 0;
+ i_worked |= max3421_reset_hcd(hcd);
+ }
+ if (max3421_hcd->do_reset_port) {
+ /* perform a USB bus reset: */
+ max3421_hcd->do_reset_port = 0;
+ spi_wr8(hcd, MAX3421_REG_HCTL,
+ BIT(MAX3421_HCTL_BUSRST_BIT));
+ i_worked = 1;
+ }
+ if (max3421_hcd->do_check_unlink) {
+ max3421_hcd->do_check_unlink = 0;
+ i_worked |= max3421_check_unlink(hcd);
+ }
+ if (max3421_hcd->do_iopin_update) {
+ /*
+ * IOPINS1/IOPINS2 do not auto-increment, so we can't
+ * use spi_wr_buf().
+ */
+ for (i = 0; i < ARRAY_SIZE(max3421_hcd->iopins); ++i) {
+ u8 val = spi_rd8(hcd, MAX3421_REG_IOPINS1);
+
+ val = ((val & 0xf0) |
+ (max3421_hcd->iopins[i] & 0x0f));
+ spi_wr8(hcd, MAX3421_REG_IOPINS1 + i, val);
+ max3421_hcd->iopins[i] = val;
+ }
+ max3421_hcd->do_iopin_update = 0;
+ i_worked = 1;
+ }
+ }
+ set_current_state(TASK_RUNNING);
+ dev_info(&spi->dev, "SPI thread exiting");
+ return 0;
+}
+
+static int
+max3421_reset_port(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+ max3421_hcd->port_status &= ~(USB_PORT_STAT_ENABLE |
+ USB_PORT_STAT_LOW_SPEED);
+ max3421_hcd->do_reset_port = 1;
+ wake_up_process(max3421_hcd->spi_thread);
+ return 0;
+}
+
+static int
+max3421_reset(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+ hcd->self.sg_tablesize = 0;
+ hcd->speed = HCD_USB2;
+ hcd->self.root_hub->speed = USB_SPEED_FULL;
+ max3421_hcd->do_reset_hcd = 1;
+ wake_up_process(max3421_hcd->spi_thread);
+ return 0;
+}
+
+static int
+max3421_start(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+ spin_lock_init(&max3421_hcd->lock);
+ max3421_hcd->rh_state = MAX3421_RH_RUNNING;
+
+ INIT_LIST_HEAD(&max3421_hcd->ep_list);
+
+ hcd->power_budget = POWER_BUDGET;
+ hcd->state = HC_STATE_RUNNING;
+ hcd->uses_new_polling = 1;
+ return 0;
+}
+
+static void
+max3421_stop(struct usb_hcd *hcd)
+{
+}
+
+static int
+max3421_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct max3421_ep *max3421_ep;
+ unsigned long flags;
+ int retval;
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_INTERRUPT:
+ case PIPE_ISOCHRONOUS:
+ if (urb->interval < 0) {
+ dev_err(&spi->dev,
+ "%s: interval=%d for intr-/iso-pipe; expected > 0\n",
+ __func__, urb->interval);
+ return -EINVAL;
+ }
+ default:
+ break;
+ }
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ max3421_ep = urb->ep->hcpriv;
+ if (!max3421_ep) {
+ /* gets freed in max3421_endpoint_disable: */
+ max3421_ep = kzalloc(sizeof(struct max3421_ep), mem_flags);
+ if (!max3421_ep) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ max3421_ep->ep = urb->ep;
+ max3421_ep->last_active = max3421_hcd->frame_number;
+ urb->ep->hcpriv = max3421_ep;
+
+ list_add_tail(&max3421_ep->ep_list, &max3421_hcd->ep_list);
+ }
+
+ retval = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (retval == 0) {
+ /* Since we added to the queue, restart scheduling: */
+ max3421_hcd->sched_pass = SCHED_PASS_PERIODIC;
+ wake_up_process(max3421_hcd->spi_thread);
+ }
+
+out:
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+ return retval;
+}
+
+static int
+max3421_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ /*
+ * This will set urb->unlinked which in turn causes the entry
+ * to be dropped at the next opportunity.
+ */
+ retval = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (retval == 0) {
+ max3421_hcd->do_check_unlink = 1;
+ wake_up_process(max3421_hcd->spi_thread);
+ }
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+ return retval;
+}
+
+static void
+max3421_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ if (ep->hcpriv) {
+ struct max3421_ep *max3421_ep = ep->hcpriv;
+
+ /* remove myself from the ep_list: */
+ if (!list_empty(&max3421_ep->ep_list))
+ list_del(&max3421_ep->ep_list);
+ kfree(max3421_ep);
+ ep->hcpriv = NULL;
+ }
+
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+}
+
+static int
+max3421_get_frame_number(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ return max3421_hcd->frame_number;
+}
+
+/*
+ * Should return a non-zero value when any port is undergoing a resume
+ * transition while the root hub is suspended.
+ */
+static int
+max3421_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ unsigned long flags;
+ int retval = 0;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ if (!HCD_HW_ACCESSIBLE(hcd))
+ goto done;
+
+ *buf = 0;
+ if ((max3421_hcd->port_status & PORT_C_MASK) != 0) {
+ *buf = (1 << 1); /* a hub over-current condition exists */
+ dev_dbg(hcd->self.controller,
+ "port status 0x%08x has changes\n",
+ max3421_hcd->port_status);
+ retval = 1;
+ if (max3421_hcd->rh_state == MAX3421_RH_SUSPENDED)
+ usb_hcd_resume_root_hub(hcd);
+ }
+done:
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+ return retval;
+}
+
+static inline void
+hub_descriptor(struct usb_hub_descriptor *desc)
+{
+ memset(desc, 0, sizeof(*desc));
+ /*
+ * See Table 11-13: Hub Descriptor in USB 2.0 spec.
+ */
+ desc->bDescriptorType = 0x29; /* hub descriptor */
+ desc->bDescLength = 9;
+ desc->wHubCharacteristics = cpu_to_le16(0x0001);
+ desc->bNbrPorts = 1;
+}
+
+/*
+ * Set the MAX3421E general-purpose output with number PIN_NUMBER to
+ * VALUE (0 or 1). PIN_NUMBER may be in the range from 1-8. For
+ * any other value, this function acts as a no-op.
+ */
+static void
+max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ u8 mask, idx;
+
+ --pin_number;
+ if (pin_number > 7)
+ return;
+
+ mask = 1u << pin_number;
+ idx = pin_number / 4;
+
+ if (value)
+ max3421_hcd->iopins[idx] |= mask;
+ else
+ max3421_hcd->iopins[idx] &= ~mask;
+ max3421_hcd->do_iopin_update = 1;
+ wake_up_process(max3421_hcd->spi_thread);
+}
+
+static int
+max3421_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value, u16 index,
+ char *buf, u16 length)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct max3421_hcd_platform_data *pdata;
+ unsigned long flags;
+ int retval = 0;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ pdata = spi->dev.platform_data;
+
+ switch (type_req) {
+ case ClearHubFeature:
+ break;
+ case ClearPortFeature:
+ switch (value) {
+ case USB_PORT_FEAT_SUSPEND:
+ break;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(hcd->self.controller, "power-off\n");
+ max3421_gpout_set_value(hcd, pdata->vbus_gpout,
+ !pdata->vbus_active_level);
+ /* FALLS THROUGH */
+ default:
+ max3421_hcd->port_status &= ~(1 << value);
+ }
+ break;
+ case GetHubDescriptor:
+ hub_descriptor((struct usb_hub_descriptor *) buf);
+ break;
+
+ case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
+ case GetPortErrorCount:
+ case SetHubDepth:
+ /* USB3 only */
+ goto error;
+
+ case GetHubStatus:
+ *(__le32 *) buf = cpu_to_le32(0);
+ break;
+
+ case GetPortStatus:
+ if (index != 1) {
+ retval = -EPIPE;
+ goto error;
+ }
+ ((__le16 *) buf)[0] = cpu_to_le16(max3421_hcd->port_status);
+ ((__le16 *) buf)[1] =
+ cpu_to_le16(max3421_hcd->port_status >> 16);
+ break;
+
+ case SetHubFeature:
+ retval = -EPIPE;
+ break;
+
+ case SetPortFeature:
+ switch (value) {
+ case USB_PORT_FEAT_LINK_STATE:
+ case USB_PORT_FEAT_U1_TIMEOUT:
+ case USB_PORT_FEAT_U2_TIMEOUT:
+ case USB_PORT_FEAT_BH_PORT_RESET:
+ goto error;
+ case USB_PORT_FEAT_SUSPEND:
+ if (max3421_hcd->active)
+ max3421_hcd->port_status |=
+ USB_PORT_STAT_SUSPEND;
+ break;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(hcd->self.controller, "power-on\n");
+ max3421_hcd->port_status |= USB_PORT_STAT_POWER;
+ max3421_gpout_set_value(hcd, pdata->vbus_gpout,
+ pdata->vbus_active_level);
+ break;
+ case USB_PORT_FEAT_RESET:
+ max3421_reset_port(hcd);
+ /* FALLS THROUGH */
+ default:
+ if ((max3421_hcd->port_status & USB_PORT_STAT_POWER)
+ != 0)
+ max3421_hcd->port_status |= (1 << value);
+ }
+ break;
+
+ default:
+ dev_dbg(hcd->self.controller,
+ "hub control req%04x v%04x i%04x l%d\n",
+ type_req, value, index, length);
+error: /* "protocol stall" on error */
+ retval = -EPIPE;
+ }
+
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+ return retval;
+}
+
+static int
+max3421_bus_suspend(struct usb_hcd *hcd)
+{
+ return -1;
+}
+
+static int
+max3421_bus_resume(struct usb_hcd *hcd)
+{
+ return -1;
+}
+
+/*
+ * The SPI driver already takes care of DMA-mapping/unmapping, so no
+ * reason to do it twice.
+ */
+static int
+max3421_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+ return 0;
+}
+
+static void
+max3421_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+}
+
+static struct hc_driver max3421_hcd_desc = {
+ .description = "max3421",
+ .product_desc = DRIVER_DESC,
+ .hcd_priv_size = sizeof(struct max3421_hcd),
+ .flags = HCD_USB11,
+ .reset = max3421_reset,
+ .start = max3421_start,
+ .stop = max3421_stop,
+ .get_frame_number = max3421_get_frame_number,
+ .urb_enqueue = max3421_urb_enqueue,
+ .urb_dequeue = max3421_urb_dequeue,
+ .map_urb_for_dma = max3421_map_urb_for_dma,
+ .unmap_urb_for_dma = max3421_unmap_urb_for_dma,
+ .endpoint_disable = max3421_endpoint_disable,
+ .hub_status_data = max3421_hub_status_data,
+ .hub_control = max3421_hub_control,
+ .bus_suspend = max3421_bus_suspend,
+ .bus_resume = max3421_bus_resume,
+};
+
+static int
+max3421_probe(struct spi_device *spi)
+{
+ struct max3421_hcd *max3421_hcd;
+ struct usb_hcd *hcd = NULL;
+ int retval = -ENOMEM;
+
+ if (spi_setup(spi) < 0) {
+ dev_err(&spi->dev, "Unable to setup SPI bus");
+ return -EFAULT;
+ }
+
+ hcd = usb_create_hcd(&max3421_hcd_desc, &spi->dev,
+ dev_name(&spi->dev));
+ if (!hcd) {
+ dev_err(&spi->dev, "failed to create HCD structure\n");
+ goto error;
+ }
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ max3421_hcd = hcd_to_max3421(hcd);
+ max3421_hcd->next = max3421_hcd_list;
+ max3421_hcd_list = max3421_hcd;
+ INIT_LIST_HEAD(&max3421_hcd->ep_list);
+
+ max3421_hcd->tx = kmalloc(sizeof(*max3421_hcd->tx), GFP_KERNEL);
+ if (!max3421_hcd->tx) {
+ dev_err(&spi->dev, "failed to kmalloc tx buffer\n");
+ goto error;
+ }
+ max3421_hcd->rx = kmalloc(sizeof(*max3421_hcd->rx), GFP_KERNEL);
+ if (!max3421_hcd->rx) {
+ dev_err(&spi->dev, "failed to kmalloc rx buffer\n");
+ goto error;
+ }
+
+ max3421_hcd->spi_thread = kthread_run(max3421_spi_thread, hcd,
+ "max3421_spi_thread");
+ if (max3421_hcd->spi_thread == ERR_PTR(-ENOMEM)) {
+ dev_err(&spi->dev,
+ "failed to create SPI thread (out of memory)\n");
+ goto error;
+ }
+
+ retval = usb_add_hcd(hcd, 0, 0);
+ if (retval) {
+ dev_err(&spi->dev, "failed to add HCD\n");
+ goto error;
+ }
+
+ retval = request_irq(spi->irq, max3421_irq_handler,
+ IRQF_TRIGGER_LOW, "max3421", hcd);
+ if (retval < 0) {
+ dev_err(&spi->dev, "failed to request irq %d\n", spi->irq);
+ goto error;
+ }
+ return 0;
+
+error:
+ if (hcd) {
+ kfree(max3421_hcd->tx);
+ kfree(max3421_hcd->rx);
+ if (max3421_hcd->spi_thread)
+ kthread_stop(max3421_hcd->spi_thread);
+ usb_put_hcd(hcd);
+ }
+ return retval;
+}
+
+static int
+max3421_remove(struct spi_device *spi)
+{
+ struct max3421_hcd *max3421_hcd = NULL, **prev;
+ struct usb_hcd *hcd = NULL;
+ unsigned long flags;
+
+ for (prev = &max3421_hcd_list; *prev; prev = &(*prev)->next) {
+ max3421_hcd = *prev;
+ hcd = max3421_to_hcd(max3421_hcd);
+ if (hcd->self.controller == &spi->dev)
+ break;
+ }
+ if (!max3421_hcd) {
+ dev_err(&spi->dev, "no MAX3421 HCD found for SPI device %p\n",
+ spi);
+ return -ENODEV;
+ }
+
+ usb_remove_hcd(hcd);
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ kthread_stop(max3421_hcd->spi_thread);
+ *prev = max3421_hcd->next;
+
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+
+ free_irq(spi->irq, hcd);
+
+ usb_put_hcd(hcd);
+ return 0;
+}
+
+static struct spi_driver max3421_driver = {
+ .probe = max3421_probe,
+ .remove = max3421_remove,
+ .driver = {
+ .name = "max3421-hcd",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_spi_driver(max3421_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("David Mosberger <davidm@egauge.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/octeon2-common.c b/drivers/usb/host/octeon2-common.c
index 72d672cfcf3..d9df423f3d1 100644
--- a/drivers/usb/host/octeon2-common.c
+++ b/drivers/usb/host/octeon2-common.c
@@ -3,18 +3,19 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2010 Cavium Networks
+ * Copyright (C) 2010, 2011 Cavium Networks
*/
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/delay.h>
-#include <asm/atomic.h>
-
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-uctlx-defs.h>
-static atomic_t octeon2_usb_clock_start_cnt = ATOMIC_INIT(0);
+static DEFINE_MUTEX(octeon2_usb_clocks_mutex);
+
+static int octeon2_usb_clock_start_cnt;
void octeon2_usb_clocks_start(void)
{
@@ -26,8 +27,12 @@ void octeon2_usb_clocks_start(void)
int i;
unsigned long io_clk_64_to_ns;
- if (atomic_inc_return(&octeon2_usb_clock_start_cnt) != 1)
- return;
+
+ mutex_lock(&octeon2_usb_clocks_mutex);
+
+ octeon2_usb_clock_start_cnt++;
+ if (octeon2_usb_clock_start_cnt != 1)
+ goto exit;
io_clk_64_to_ns = 64000000000ull / octeon_get_io_clock_rate();
@@ -43,6 +48,13 @@ void octeon2_usb_clocks_start(void)
/* Step 3: Configure the reference clock, PHY, and HCLK */
clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
+
+ /*
+ * If the UCTL looks like it has already been started, skip
+ * the initialization, otherwise bus errors are obtained.
+ */
+ if (clk_rst_ctl.s.hrst)
+ goto end_clock;
/* 3a */
clk_rst_ctl.s.p_por = 1;
clk_rst_ctl.s.hrst = 0;
@@ -158,28 +170,31 @@ void octeon2_usb_clocks_start(void)
clk_rst_ctl.s.hrst = 1;
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+end_clock:
/* Now we can set some other registers. */
for (i = 0; i <= 1; i++) {
port_ctl_status.u64 =
cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0));
- /* Set txvreftune to 15 to obtain complient 'eye' diagram. */
+ /* Set txvreftune to 15 to obtain compliant 'eye' diagram. */
port_ctl_status.s.txvreftune = 15;
+ port_ctl_status.s.txrisetune = 1;
+ port_ctl_status.s.txpreemphasistune = 1;
cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0),
port_ctl_status.u64);
}
+
+ /* Set uSOF cycle period to 60,000 bits. */
+ cvmx_write_csr(CVMX_UCTLX_EHCI_FLA(0), 0x20ull);
+exit:
+ mutex_unlock(&octeon2_usb_clocks_mutex);
}
EXPORT_SYMBOL(octeon2_usb_clocks_start);
void octeon2_usb_clocks_stop(void)
{
- union cvmx_uctlx_if_ena if_ena;
-
- if (atomic_dec_return(&octeon2_usb_clock_start_cnt) != 0)
- return;
-
- if_ena.u64 = 0;
- if_ena.s.en = 0;
- cvmx_write_csr(CVMX_UCTLX_IF_ENA(0), if_ena.u64);
+ mutex_lock(&octeon2_usb_clocks_mutex);
+ octeon2_usb_clock_start_cnt--;
+ mutex_unlock(&octeon2_usb_clocks_mutex);
}
EXPORT_SYMBOL(octeon2_usb_clocks_stop);
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 944291e10f9..e49eb4f90f5 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -13,20 +13,38 @@
*/
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/atmel.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <mach/hardware.h>
#include <asm/gpio.h>
-#include <mach/board.h>
#include <mach/cpu.h>
-#ifndef CONFIG_ARCH_AT91
-#error "CONFIG_ARCH_AT91 must be defined."
-#endif
+#include "ohci.h"
+
+#define valid_port(index) ((index) >= 0 && (index) < AT91_MAX_USBH_PORTS)
+#define at91_for_each_port(index) \
+ for ((index) = 0; (index) < AT91_MAX_USBH_PORTS; (index)++)
+
+/* interface, function and usb clocks; sometimes also an AHB clock */
+static struct clk *iclk, *fclk, *uclk, *hclk;
/* interface and function clocks; sometimes also an AHB clock */
-static struct clk *iclk, *fclk, *hclk;
+
+#define DRIVER_DESC "OHCI Atmel driver"
+
+static const char hcd_name[] = "ohci-atmel";
+
+static struct hc_driver __read_mostly ohci_at91_hc_driver;
static int clocked;
extern int usb_disabled(void);
@@ -35,19 +53,23 @@ extern int usb_disabled(void);
static void at91_start_clock(void)
{
- if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
- clk_enable(hclk);
- clk_enable(iclk);
- clk_enable(fclk);
+ if (IS_ENABLED(CONFIG_COMMON_CLK)) {
+ clk_set_rate(uclk, 48000000);
+ clk_prepare_enable(uclk);
+ }
+ clk_prepare_enable(hclk);
+ clk_prepare_enable(iclk);
+ clk_prepare_enable(fclk);
clocked = 1;
}
static void at91_stop_clock(void)
{
- clk_disable(fclk);
- clk_disable(iclk);
- if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
- clk_disable(hclk);
+ clk_disable_unprepare(fclk);
+ clk_disable_unprepare(iclk);
+ clk_disable_unprepare(hclk);
+ if (IS_ENABLED(CONFIG_COMMON_CLK))
+ clk_disable_unprepare(uclk);
clocked = 0;
}
@@ -107,65 +129,80 @@ static void usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *);
static int usb_hcd_at91_probe(const struct hc_driver *driver,
struct platform_device *pdev)
{
+ struct at91_usbh_data *board;
+ struct ohci_hcd *ohci;
int retval;
struct usb_hcd *hcd = NULL;
-
- if (pdev->num_resources != 2) {
- pr_debug("hcd probe: invalid num_resources");
- return -ENODEV;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_dbg(dev, "hcd probe: missing memory resource\n");
+ return -ENXIO;
}
- if ((pdev->resource[0].flags != IORESOURCE_MEM)
- || (pdev->resource[1].flags != IORESOURCE_IRQ)) {
- pr_debug("hcd probe: invalid resource type\n");
- return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_dbg(dev, "hcd probe: missing irq resource\n");
+ return irq;
}
- hcd = usb_create_hcd(driver, &pdev->dev, "at91");
+ hcd = usb_create_hcd(driver, dev, "at91");
if (!hcd)
return -ENOMEM;
- hcd->rsrc_start = pdev->resource[0].start;
- hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- pr_debug("request_mem_region failed\n");
- retval = -EBUSY;
- goto err1;
+ hcd->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
+ goto err;
}
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- pr_debug("ioremap failed\n");
- retval = -EIO;
- goto err2;
+ iclk = devm_clk_get(dev, "ohci_clk");
+ if (IS_ERR(iclk)) {
+ dev_err(dev, "failed to get ohci_clk\n");
+ retval = PTR_ERR(iclk);
+ goto err;
+ }
+ fclk = devm_clk_get(dev, "uhpck");
+ if (IS_ERR(fclk)) {
+ dev_err(dev, "failed to get uhpck\n");
+ retval = PTR_ERR(fclk);
+ goto err;
+ }
+ hclk = devm_clk_get(dev, "hclk");
+ if (IS_ERR(hclk)) {
+ dev_err(dev, "failed to get hclk\n");
+ retval = PTR_ERR(hclk);
+ goto err;
+ }
+ if (IS_ENABLED(CONFIG_COMMON_CLK)) {
+ uclk = devm_clk_get(dev, "usb_clk");
+ if (IS_ERR(uclk)) {
+ dev_err(dev, "failed to get uclk\n");
+ retval = PTR_ERR(uclk);
+ goto err;
+ }
}
- iclk = clk_get(&pdev->dev, "ohci_clk");
- fclk = clk_get(&pdev->dev, "uhpck");
- if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
- hclk = clk_get(&pdev->dev, "hck0");
-
+ board = hcd->self.controller->platform_data;
+ ohci = hcd_to_ohci(hcd);
+ ohci->num_ports = board->ports;
at91_start_hc(pdev);
- ohci_hcd_init(hcd_to_ohci(hcd));
- retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED);
- if (retval == 0)
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (retval == 0) {
+ device_wakeup_enable(hcd->self.controller);
return retval;
+ }
/* Error handling */
at91_stop_hc(pdev);
- if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
- clk_put(hclk);
- clk_put(fclk);
- clk_put(iclk);
-
- iounmap(hcd->regs);
-
- err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-
- err1:
+ err:
usb_put_hcd(hcd);
return retval;
}
@@ -188,103 +225,347 @@ static void usb_hcd_at91_remove(struct usb_hcd *hcd,
{
usb_remove_hcd(hcd);
at91_stop_hc(pdev);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
+}
+
+/*-------------------------------------------------------------------------*/
+static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int enable)
+{
+ if (!valid_port(port))
+ return;
- if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
- clk_put(hclk);
- clk_put(fclk);
- clk_put(iclk);
- fclk = iclk = hclk = NULL;
+ if (!gpio_is_valid(pdata->vbus_pin[port]))
+ return;
- dev_set_drvdata(&pdev->dev, NULL);
+ gpio_set_value(pdata->vbus_pin[port],
+ pdata->vbus_pin_active_low[port] ^ enable);
}
-/*-------------------------------------------------------------------------*/
+static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
+{
+ if (!valid_port(port))
+ return -EINVAL;
+
+ if (!gpio_is_valid(pdata->vbus_pin[port]))
+ return -EINVAL;
+
+ return gpio_get_value(pdata->vbus_pin[port]) ^
+ pdata->vbus_pin_active_low[port];
+}
-static int __devinit
-ohci_at91_start (struct usb_hcd *hcd)
+/*
+ * Update the status data from the hub with the over-current indicator change.
+ */
+static int ohci_at91_hub_status_data(struct usb_hcd *hcd, char *buf)
{
- struct at91_usbh_data *board = hcd->self.controller->platform_data;
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- int ret;
+ struct at91_usbh_data *pdata = hcd->self.controller->platform_data;
+ int length = ohci_hub_status_data(hcd, buf);
+ int port;
+
+ at91_for_each_port(port) {
+ if (pdata->overcurrent_changed[port]) {
+ if (!length)
+ length = 1;
+ buf[0] |= 1 << (port + 1);
+ }
+ }
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
+ return length;
+}
- ohci->num_ports = board->ports;
+/*
+ * Look at the control requests to the root hub and see if we need to override.
+ */
+static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct at91_usbh_data *pdata = dev_get_platdata(hcd->self.controller);
+ struct usb_hub_descriptor *desc;
+ int ret = -EINVAL;
+ u32 *data = (u32 *)buf;
+
+ dev_dbg(hcd->self.controller,
+ "ohci_at91_hub_control(%p,0x%04x,0x%04x,0x%04x,%p,%04x)\n",
+ hcd, typeReq, wValue, wIndex, buf, wLength);
+
+ wIndex--;
+
+ switch (typeReq) {
+ case SetPortFeature:
+ if (wValue == USB_PORT_FEAT_POWER) {
+ dev_dbg(hcd->self.controller, "SetPortFeat: POWER\n");
+ if (valid_port(wIndex)) {
+ ohci_at91_usb_set_power(pdata, wIndex, 1);
+ ret = 0;
+ }
+
+ goto out;
+ }
+ break;
+
+ case ClearPortFeature:
+ switch (wValue) {
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ dev_dbg(hcd->self.controller,
+ "ClearPortFeature: C_OVER_CURRENT\n");
+
+ if (valid_port(wIndex)) {
+ pdata->overcurrent_changed[wIndex] = 0;
+ pdata->overcurrent_status[wIndex] = 0;
+ }
+
+ goto out;
+
+ case USB_PORT_FEAT_OVER_CURRENT:
+ dev_dbg(hcd->self.controller,
+ "ClearPortFeature: OVER_CURRENT\n");
+
+ if (valid_port(wIndex))
+ pdata->overcurrent_status[wIndex] = 0;
+
+ goto out;
+
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(hcd->self.controller,
+ "ClearPortFeature: POWER\n");
+
+ if (valid_port(wIndex)) {
+ ohci_at91_usb_set_power(pdata, wIndex, 0);
+ return 0;
+ }
+ }
+ break;
+ }
+
+ ret = ohci_hub_control(hcd, typeReq, wValue, wIndex + 1, buf, wLength);
+ if (ret)
+ goto out;
+
+ switch (typeReq) {
+ case GetHubDescriptor:
+
+ /* update the hub's descriptor */
+
+ desc = (struct usb_hub_descriptor *)buf;
+
+ dev_dbg(hcd->self.controller, "wHubCharacteristics 0x%04x\n",
+ desc->wHubCharacteristics);
+
+ /* remove the old configurations for power-switching, and
+ * over-current protection, and insert our new configuration
+ */
+
+ desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_LPSM);
+ desc->wHubCharacteristics |= cpu_to_le16(0x0001);
+
+ if (pdata->overcurrent_supported) {
+ desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_OCPM);
+ desc->wHubCharacteristics |= cpu_to_le16(0x0008|0x0001);
+ }
+
+ dev_dbg(hcd->self.controller, "wHubCharacteristics after 0x%04x\n",
+ desc->wHubCharacteristics);
- if ((ret = ohci_run(ohci)) < 0) {
- err("can't start %s", hcd->self.bus_name);
- ohci_stop(hcd);
return ret;
+
+ case GetPortStatus:
+ /* check port status */
+
+ dev_dbg(hcd->self.controller, "GetPortStatus(%d)\n", wIndex);
+
+ if (valid_port(wIndex)) {
+ if (!ohci_at91_usb_get_power(pdata, wIndex))
+ *data &= ~cpu_to_le32(RH_PS_PPS);
+
+ if (pdata->overcurrent_changed[wIndex])
+ *data |= cpu_to_le32(RH_PS_OCIC);
+
+ if (pdata->overcurrent_status[wIndex])
+ *data |= cpu_to_le32(RH_PS_POCI);
+ }
}
- return 0;
+
+ out:
+ return ret;
}
/*-------------------------------------------------------------------------*/
-static const struct hc_driver ohci_at91_hc_driver = {
- .description = hcd_name,
- .product_desc = "AT91 OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
+static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
+{
+ struct platform_device *pdev = data;
+ struct at91_usbh_data *pdata = dev_get_platdata(&pdev->dev);
+ int val, gpio, port;
+
+ /* From the GPIO notifying the over-current situation, find
+ * out the corresponding port */
+ at91_for_each_port(port) {
+ if (gpio_is_valid(pdata->overcurrent_pin[port]) &&
+ gpio_to_irq(pdata->overcurrent_pin[port]) == irq) {
+ gpio = pdata->overcurrent_pin[port];
+ break;
+ }
+ }
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
+ if (port == AT91_MAX_USBH_PORTS) {
+ dev_err(& pdev->dev, "overcurrent interrupt from unknown GPIO\n");
+ return IRQ_HANDLED;
+ }
- /*
- * basic lifecycle operations
- */
- .start = ohci_at91_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
+ val = gpio_get_value(gpio);
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
+ /* When notified of an over-current situation, disable power
+ on the corresponding port, and mark this port in
+ over-current. */
+ if (!val) {
+ ohci_at91_usb_set_power(pdata, port, 0);
+ pdata->overcurrent_status[port] = 1;
+ pdata->overcurrent_changed[port] = 1;
+ }
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
+ dev_dbg(& pdev->dev, "overcurrent situation %s\n",
+ val ? "exited" : "notified");
- /*
- * root hub support
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id at91_ohci_dt_ids[] = {
+ { .compatible = "atmel,at91rm9200-ohci" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids);
+
+static int ohci_at91_of_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int i, gpio, ret;
+ enum of_gpio_flags flags;
+ struct at91_usbh_data *pdata;
+ u32 ports;
+
+ if (!np)
+ return 0;
+
+ /* Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
*/
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ if (!of_property_read_u32(np, "num-ports", &ports))
+ pdata->ports = ports;
+
+ at91_for_each_port(i) {
+ gpio = of_get_named_gpio_flags(np, "atmel,vbus-gpio", i, &flags);
+ pdata->vbus_pin[i] = gpio;
+ if (!gpio_is_valid(gpio))
+ continue;
+ pdata->vbus_pin_active_low[i] = flags & OF_GPIO_ACTIVE_LOW;
+ }
+
+ at91_for_each_port(i)
+ pdata->overcurrent_pin[i] =
+ of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags);
+
+ pdev->dev.platform_data = pdata;
+
+ return 0;
+}
+#else
+static int ohci_at91_of_init(struct platform_device *pdev)
+{
+ return 0;
+}
#endif
- .start_port_reset = ohci_start_port_reset,
-};
/*-------------------------------------------------------------------------*/
static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
{
- struct at91_usbh_data *pdata = pdev->dev.platform_data;
+ struct at91_usbh_data *pdata;
int i;
+ int gpio;
+ int ret;
+
+ ret = ohci_at91_of_init(pdev);
+ if (ret)
+ return ret;
+
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata) {
- /* REVISIT make the driver support per-port power switching,
- * and also overcurrent detection. Here we assume the ports
- * are always powered while this driver is active, and use
- * active-low power switches.
- */
- for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) {
- if (pdata->vbus_pin[i] <= 0)
+ at91_for_each_port(i) {
+ /*
+ * do not configure PIO if not in relation with
+ * real USB port on board
+ */
+ if (i >= pdata->ports) {
+ pdata->vbus_pin[i] = -EINVAL;
+ pdata->overcurrent_pin[i] = -EINVAL;
+ break;
+ }
+
+ if (!gpio_is_valid(pdata->vbus_pin[i]))
+ continue;
+ gpio = pdata->vbus_pin[i];
+
+ ret = gpio_request(gpio, "ohci_vbus");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "can't request vbus gpio %d\n", gpio);
continue;
- gpio_request(pdata->vbus_pin[i], "ohci_vbus");
- gpio_direction_output(pdata->vbus_pin[i], 0);
+ }
+ ret = gpio_direction_output(gpio,
+ !pdata->vbus_pin_active_low[i]);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "can't put vbus gpio %d as output %d\n",
+ gpio, !pdata->vbus_pin_active_low[i]);
+ gpio_free(gpio);
+ continue;
+ }
+
+ ohci_at91_usb_set_power(pdata, i, 1);
+ }
+
+ at91_for_each_port(i) {
+ if (!gpio_is_valid(pdata->overcurrent_pin[i]))
+ continue;
+ gpio = pdata->overcurrent_pin[i];
+
+ ret = gpio_request(gpio, "ohci_overcurrent");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "can't request overcurrent gpio %d\n",
+ gpio);
+ continue;
+ }
+
+ ret = gpio_direction_input(gpio);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "can't configure overcurrent gpio %d as input\n",
+ gpio);
+ gpio_free(gpio);
+ continue;
+ }
+
+ ret = request_irq(gpio_to_irq(gpio),
+ ohci_hcd_at91_overcurrent_irq,
+ IRQF_SHARED, "ohci_overcurrent", pdev);
+ if (ret) {
+ gpio_free(gpio);
+ dev_err(&pdev->dev,
+ "can't get gpio IRQ for overcurrent\n");
+ }
}
}
@@ -294,16 +575,23 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
static int ohci_hcd_at91_drv_remove(struct platform_device *pdev)
{
- struct at91_usbh_data *pdata = pdev->dev.platform_data;
+ struct at91_usbh_data *pdata = dev_get_platdata(&pdev->dev);
int i;
if (pdata) {
- for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) {
- if (pdata->vbus_pin[i] <= 0)
+ at91_for_each_port(i) {
+ if (!gpio_is_valid(pdata->vbus_pin[i]))
continue;
- gpio_direction_output(pdata->vbus_pin[i], 1);
+ ohci_at91_usb_set_power(pdata, i, 0);
gpio_free(pdata->vbus_pin[i]);
}
+
+ at91_for_each_port(i) {
+ if (!gpio_is_valid(pdata->overcurrent_pin[i]))
+ continue;
+ free_irq(gpio_to_irq(pdata->overcurrent_pin[i]), pdev);
+ gpio_free(pdata->overcurrent_pin[i]);
+ }
}
device_init_wakeup(&pdev->dev, 0);
@@ -318,10 +606,17 @@ ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ bool do_wakeup = device_may_wakeup(&pdev->dev);
+ int ret;
- if (device_may_wakeup(&pdev->dev))
+ if (do_wakeup)
enable_irq_wake(hcd->irq);
+ ret = ohci_suspend(hcd, do_wakeup);
+ if (ret) {
+ disable_irq_wake(hcd->irq);
+ return ret;
+ }
/*
* The integrated transceivers seem unable to notice disconnect,
* reconnect, or wakeup without the 48 MHz clock active. so for
@@ -330,13 +625,17 @@ ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg)
* REVISIT: some boards will be able to turn VBUS off...
*/
if (at91_suspend_entering_slow_clock()) {
- ohci_usb_reset (ohci);
+ ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
+ ohci->hc_control &= OHCI_CTRL_RWC;
+ ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
+ ohci->rh_state = OHCI_RH_HALTED;
+
/* flush the writes */
(void) ohci_readl (ohci, &ohci->regs->control);
at91_stop_clock();
}
- return 0;
+ return ret;
}
static int ohci_hcd_at91_drv_resume(struct platform_device *pdev)
@@ -349,7 +648,7 @@ static int ohci_hcd_at91_drv_resume(struct platform_device *pdev)
if (!clocked)
at91_start_clock();
- ohci_finish_controller_resume(hcd);
+ ohci_resume(hcd, false);
return 0;
}
#else
@@ -357,8 +656,6 @@ static int ohci_hcd_at91_drv_resume(struct platform_device *pdev)
#define ohci_hcd_at91_drv_resume NULL
#endif
-MODULE_ALIAS("platform:at91_ohci");
-
static struct platform_driver ohci_hcd_at91_driver = {
.probe = ohci_hcd_at91_drv_probe,
.remove = ohci_hcd_at91_drv_remove,
@@ -368,5 +665,40 @@ static struct platform_driver ohci_hcd_at91_driver = {
.driver = {
.name = "at91_ohci",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(at91_ohci_dt_ids),
},
};
+
+static int __init ohci_at91_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+ ohci_init_driver(&ohci_at91_hc_driver, NULL);
+
+ /*
+ * The Atmel HW has some unusual quirks, which require Atmel-specific
+ * workarounds. We override certain hc_driver functions here to
+ * achieve that. We explicitly do not enhance ohci_driver_overrides to
+ * allow this more easily, since this is an unusual case, and we don't
+ * want to encourage others to override these functions by making it
+ * too easy.
+ */
+
+ ohci_at91_hc_driver.hub_status_data = ohci_at91_hub_status_data;
+ ohci_at91_hc_driver.hub_control = ohci_at91_hub_control;
+
+ return platform_driver_register(&ohci_hcd_at91_driver);
+}
+module_init(ohci_at91_init);
+
+static void __exit ohci_at91_cleanup(void)
+{
+ platform_driver_unregister(&ohci_hcd_at91_driver);
+}
+module_exit(ohci_at91_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:at91_ohci");
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
deleted file mode 100644
index 17a6043c1fa..00000000000
--- a/drivers/usb/host/ohci-au1xxx.c
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * OHCI HCD (Host Controller Driver) for USB.
- *
- * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
- * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
- * (C) Copyright 2002 Hewlett-Packard Company
- *
- * Bus Glue for AMD Alchemy Au1xxx
- *
- * Written by Christopher Hoover <ch@hpl.hp.com>
- * Based on fragments of previous driver by Russell King et al.
- *
- * Modified for LH7A404 from ohci-sa1111.c
- * by Durgesh Pattamatta <pattamattad@sharpsec.com>
- * Modified for AMD Alchemy Au1xxx
- * by Matt Porter <mporter@kernel.crashing.org>
- *
- * This file is licenced under the GPL.
- */
-
-#include <linux/platform_device.h>
-#include <linux/signal.h>
-
-#include <asm/mach-au1x00/au1000.h>
-
-#ifndef CONFIG_SOC_AU1200
-
-#define USBH_ENABLE_BE (1<<0)
-#define USBH_ENABLE_C (1<<1)
-#define USBH_ENABLE_E (1<<2)
-#define USBH_ENABLE_CE (1<<3)
-#define USBH_ENABLE_RD (1<<4)
-
-#ifdef __LITTLE_ENDIAN
-#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C)
-#elif __BIG_ENDIAN
-#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C | \
- USBH_ENABLE_BE)
-#else
-#error not byte order defined
-#endif
-
-#else /* Au1200 */
-
-#define USB_HOST_CONFIG (USB_MSR_BASE + USB_MSR_MCFG)
-#define USB_MCFG_PFEN (1<<31)
-#define USB_MCFG_RDCOMB (1<<30)
-#define USB_MCFG_SSDEN (1<<23)
-#define USB_MCFG_OHCCLKEN (1<<16)
-#ifdef CONFIG_DMA_COHERENT
-#define USB_MCFG_UCAM (1<<7)
-#else
-#define USB_MCFG_UCAM (0)
-#endif
-#define USB_MCFG_OBMEN (1<<1)
-#define USB_MCFG_OMEMEN (1<<0)
-
-#define USBH_ENABLE_CE USB_MCFG_OHCCLKEN
-
-#define USBH_ENABLE_INIT (USB_MCFG_PFEN | USB_MCFG_RDCOMB | \
- USBH_ENABLE_CE | USB_MCFG_SSDEN | \
- USB_MCFG_UCAM | \
- USB_MCFG_OBMEN | USB_MCFG_OMEMEN)
-
-#define USBH_DISABLE (USB_MCFG_OBMEN | USB_MCFG_OMEMEN)
-
-#endif /* Au1200 */
-
-extern int usb_disabled(void);
-
-static void au1xxx_start_ohc(void)
-{
- /* enable host controller */
-#ifndef CONFIG_SOC_AU1200
- au_writel(USBH_ENABLE_CE, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-
- au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_INIT, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-
- /* wait for reset complete (read register twice; see au1500 errata) */
- while (au_readl(USB_HOST_CONFIG),
- !(au_readl(USB_HOST_CONFIG) & USBH_ENABLE_RD))
- udelay(1000);
-
-#else /* Au1200 */
- au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_CE, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-
- au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_INIT, USB_HOST_CONFIG);
- au_sync();
- udelay(2000);
-#endif /* Au1200 */
-}
-
-static void au1xxx_stop_ohc(void)
-{
-#ifdef CONFIG_SOC_AU1200
- /* Disable mem */
- au_writel(au_readl(USB_HOST_CONFIG) & ~USBH_DISABLE, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-#endif
- /* Disable clock */
- au_writel(au_readl(USB_HOST_CONFIG) & ~USBH_ENABLE_CE, USB_HOST_CONFIG);
- au_sync();
-}
-
-static int __devinit ohci_au1xxx_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int ret;
-
- ohci_dbg(ohci, "ohci_au1xxx_start, ohci:%p", ohci);
-
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
-
- if ((ret = ohci_run(ohci)) < 0) {
- err ("can't start %s", hcd->self.bus_name);
- ohci_stop(hcd);
- return ret;
- }
-
- return 0;
-}
-
-static const struct hc_driver ohci_au1xxx_hc_driver = {
- .description = hcd_name,
- .product_desc = "Au1xxx OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .start = ohci_au1xxx_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-static int ohci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
-{
- int ret;
- struct usb_hcd *hcd;
-
- if (usb_disabled())
- return -ENODEV;
-
-#if defined(CONFIG_SOC_AU1200) && defined(CONFIG_DMA_COHERENT)
- /* Au1200 AB USB does not support coherent memory */
- if (!(read_c0_prid() & 0xff)) {
- printk(KERN_INFO "%s: this is chip revision AB !!\n",
- pdev->name);
- printk(KERN_INFO "%s: update your board or re-configure "
- "the kernel\n", pdev->name);
- return -ENODEV;
- }
-#endif
-
- if (pdev->resource[1].flags != IORESOURCE_IRQ) {
- pr_debug("resource[1] is not IORESOURCE_IRQ\n");
- return -ENOMEM;
- }
-
- hcd = usb_create_hcd(&ohci_au1xxx_hc_driver, &pdev->dev, "au1xxx");
- if (!hcd)
- return -ENOMEM;
-
- hcd->rsrc_start = pdev->resource[0].start;
- hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;
-
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- pr_debug("request_mem_region failed\n");
- ret = -EBUSY;
- goto err1;
- }
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- pr_debug("ioremap failed\n");
- ret = -ENOMEM;
- goto err2;
- }
-
- au1xxx_start_ohc();
- ohci_hcd_init(hcd_to_ohci(hcd));
-
- ret = usb_add_hcd(hcd, pdev->resource[1].start,
- IRQF_DISABLED | IRQF_SHARED);
- if (ret == 0) {
- platform_set_drvdata(pdev, hcd);
- return ret;
- }
-
- au1xxx_stop_ohc();
- iounmap(hcd->regs);
-err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-err1:
- usb_put_hcd(hcd);
- return ret;
-}
-
-static int ohci_hcd_au1xxx_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_remove_hcd(hcd);
- au1xxx_stop_ohc();
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ohci_hcd_au1xxx_drv_suspend(struct device *dev)
-{
- struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- unsigned long flags;
- int rc;
-
- rc = 0;
-
- /* Root hub was already suspended. Disable irq emission and
- * mark HW unaccessible, bail out if RH has been resumed. Use
- * the spinlock to properly synchronize with possible pending
- * RH suspend or resume activity.
- *
- * This is still racy as hcd->state is manipulated outside of
- * any locks =P But that will be a different fix.
- */
- spin_lock_irqsave(&ohci->lock, flags);
- if (hcd->state != HC_STATE_SUSPENDED) {
- rc = -EINVAL;
- goto bail;
- }
- ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
- (void)ohci_readl(ohci, &ohci->regs->intrdisable);
-
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-
- au1xxx_stop_ohc();
-bail:
- spin_unlock_irqrestore(&ohci->lock, flags);
-
- return rc;
-}
-
-static int ohci_hcd_au1xxx_drv_resume(struct device *dev)
-{
- struct usb_hcd *hcd = dev_get_drvdata(dev);
-
- au1xxx_start_ohc();
-
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- ohci_finish_controller_resume(hcd);
-
- return 0;
-}
-
-static const struct dev_pm_ops au1xxx_ohci_pmops = {
- .suspend = ohci_hcd_au1xxx_drv_suspend,
- .resume = ohci_hcd_au1xxx_drv_resume,
-};
-
-#define AU1XXX_OHCI_PMOPS &au1xxx_ohci_pmops
-
-#else
-#define AU1XXX_OHCI_PMOPS NULL
-#endif
-
-static struct platform_driver ohci_hcd_au1xxx_driver = {
- .probe = ohci_hcd_au1xxx_drv_probe,
- .remove = ohci_hcd_au1xxx_drv_remove,
- .shutdown = usb_hcd_platform_shutdown,
- .driver = {
- .name = "au1xxx-ohci",
- .owner = THIS_MODULE,
- .pm = AU1XXX_OHCI_PMOPS,
- },
-};
-
-MODULE_ALIAS("platform:au1xxx-ohci");
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index d22fb4d577b..df06be6b47f 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -17,7 +17,7 @@
#include <linux/clk.h>
#include <mach/da8xx.h>
-#include <mach/usb.h>
+#include <linux/platform_data/usb-davinci.h>
#ifndef CONFIG_ARCH_DAVINCI_DA8XX
#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX."
@@ -85,7 +85,7 @@ static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub,
static int ohci_da8xx_init(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev->platform_data;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int result;
u32 rh_a;
@@ -171,7 +171,7 @@ static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev->platform_data;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
int temp;
switch (typeReq) {
@@ -292,7 +292,7 @@ static const struct hc_driver ohci_da8xx_hc_driver = {
static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
struct platform_device *pdev)
{
- struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(&pdev->dev);
struct usb_hcd *hcd;
struct resource *mem;
int error, irq;
@@ -300,41 +300,28 @@ static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
if (hub == NULL)
return -ENODEV;
- usb11_clk = clk_get(&pdev->dev, "usb11");
+ usb11_clk = devm_clk_get(&pdev->dev, "usb11");
if (IS_ERR(usb11_clk))
return PTR_ERR(usb11_clk);
- usb20_clk = clk_get(&pdev->dev, "usb20");
- if (IS_ERR(usb20_clk)) {
- error = PTR_ERR(usb20_clk);
- goto err0;
- }
+ usb20_clk = devm_clk_get(&pdev->dev, "usb20");
+ if (IS_ERR(usb20_clk))
+ return PTR_ERR(usb20_clk);
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
- if (!hcd) {
- error = -ENOMEM;
- goto err1;
- }
+ if (!hcd)
+ return -ENOMEM;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- error = -ENODEV;
- goto err2;
- }
+ if (!mem)
+ return -ENODEV;
hcd->rsrc_start = mem->start;
- hcd->rsrc_len = mem->end - mem->start + 1;
-
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- dev_dbg(&pdev->dev, "request_mem_region failed\n");
- error = -EBUSY;
- goto err2;
- }
+ hcd->rsrc_len = resource_size(mem);
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- dev_err(&pdev->dev, "ioremap failed\n");
- error = -ENOMEM;
- goto err3;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(hcd->regs)) {
+ error = PTR_ERR(hcd->regs);
+ goto err;
}
ohci_hcd_init(hcd_to_ohci(hcd));
@@ -342,11 +329,13 @@ static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
error = -ENODEV;
- goto err4;
+ goto err;
}
- error = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ error = usb_add_hcd(hcd, irq, 0);
if (error)
- goto err4;
+ goto err;
+
+ device_wakeup_enable(hcd->self.controller);
if (hub->ocic_notify) {
error = hub->ocic_notify(ohci_da8xx_ocic_handler);
@@ -355,16 +344,8 @@ static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
}
usb_remove_hcd(hcd);
-err4:
- iounmap(hcd->regs);
-err3:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-err2:
+err:
usb_put_hcd(hcd);
-err1:
- clk_put(usb20_clk);
-err0:
- clk_put(usb11_clk);
return error;
}
@@ -380,15 +361,11 @@ err0:
static inline void
usb_hcd_da8xx_remove(struct usb_hcd *hcd, struct platform_device *pdev)
{
- struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(&pdev->dev);
hub->ocic_notify(NULL);
usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
- clk_put(usb20_clk);
- clk_put(usb11_clk);
}
static int ohci_hcd_da8xx_drv_probe(struct platform_device *dev)
@@ -401,25 +378,32 @@ static int ohci_hcd_da8xx_drv_remove(struct platform_device *dev)
struct usb_hcd *hcd = platform_get_drvdata(dev);
usb_hcd_da8xx_remove(hcd, dev);
- platform_set_drvdata(dev, NULL);
return 0;
}
#ifdef CONFIG_PM
-static int ohci_da8xx_suspend(struct platform_device *dev, pm_message_t message)
+static int ohci_da8xx_suspend(struct platform_device *pdev,
+ pm_message_t message)
{
- struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ bool do_wakeup = device_may_wakeup(&pdev->dev);
+ int ret;
+
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
+ ret = ohci_suspend(hcd, do_wakeup);
+ if (ret)
+ return ret;
+
ohci_da8xx_clock(0);
hcd->state = HC_STATE_SUSPENDED;
- dev->dev.power.power_state = PMSG_SUSPEND;
- return 0;
+
+ return ret;
}
static int ohci_da8xx_resume(struct platform_device *dev)
@@ -454,3 +438,5 @@ static struct platform_driver ohci_hcd_da8xx_driver = {
.name = "ohci",
},
};
+
+MODULE_ALIAS("platform:ohci");
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index d7d34492934..45032e933e1 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -9,68 +9,15 @@
/*-------------------------------------------------------------------------*/
-#ifdef DEBUG
-
#define edstring(ed_type) ({ char *temp; \
switch (ed_type) { \
case PIPE_CONTROL: temp = "ctrl"; break; \
case PIPE_BULK: temp = "bulk"; break; \
case PIPE_INTERRUPT: temp = "intr"; break; \
default: temp = "isoc"; break; \
- }; temp;})
+ } temp;})
#define pipestring(pipe) edstring(usb_pipetype(pipe))
-/* debug| print the main components of an URB
- * small: 0) header + data packets 1) just header
- */
-static void __maybe_unused
-urb_print(struct urb * urb, char * str, int small, int status)
-{
- unsigned int pipe= urb->pipe;
-
- if (!urb->dev || !urb->dev->bus) {
- dbg("%s URB: no dev", str);
- return;
- }
-
-#ifndef OHCI_VERBOSE_DEBUG
- if (status != 0)
-#endif
- dbg("%s %p dev=%d ep=%d%s-%s flags=%x len=%d/%d stat=%d",
- str,
- urb,
- usb_pipedevice (pipe),
- usb_pipeendpoint (pipe),
- usb_pipeout (pipe)? "out" : "in",
- pipestring (pipe),
- urb->transfer_flags,
- urb->actual_length,
- urb->transfer_buffer_length,
- status);
-
-#ifdef OHCI_VERBOSE_DEBUG
- if (!small) {
- int i, len;
-
- if (usb_pipecontrol (pipe)) {
- printk (KERN_DEBUG "%s: setup(8):", __FILE__);
- for (i = 0; i < 8 ; i++)
- printk (" %02x", ((__u8 *) urb->setup_packet) [i]);
- printk ("\n");
- }
- if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) {
- printk (KERN_DEBUG "%s: data(%d/%d):", __FILE__,
- urb->actual_length,
- urb->transfer_buffer_length);
- len = usb_pipeout (pipe)?
- urb->transfer_buffer_length: urb->actual_length;
- for (i = 0; i < 16 && i < len; i++)
- printk (" %02x", ((__u8 *) urb->transfer_buffer) [i]);
- printk ("%s stat:%d\n", i < len? "...": "", status);
- }
- }
-#endif
-}
#define ohci_dbg_sw(ohci, next, size, format, arg...) \
do { \
@@ -82,6 +29,14 @@ urb_print(struct urb * urb, char * str, int small, int status)
ohci_dbg(ohci,format, ## arg ); \
} while (0);
+/* Version for use where "next" is the address of a local variable */
+#define ohci_dbg_nosw(ohci, next, size, format, arg...) \
+ do { \
+ unsigned s_len; \
+ s_len = scnprintf(*next, *size, format, ## arg); \
+ *size -= s_len; *next += s_len; \
+ } while (0);
+
static void ohci_dump_intr_mask (
struct ohci_hcd *ohci,
@@ -127,6 +82,19 @@ static char *hcfs2string (int state)
return "?";
}
+static const char *rh_state_string(struct ohci_hcd *ohci)
+{
+ switch (ohci->rh_state) {
+ case OHCI_RH_HALTED:
+ return "halted";
+ case OHCI_RH_SUSPENDED:
+ return "suspended";
+ case OHCI_RH_RUNNING:
+ return "running";
+ }
+ return "?";
+}
+
// dump control and status registers
static void
ohci_dump_status (struct ohci_hcd *controller, char **next, unsigned *size)
@@ -136,9 +104,10 @@ ohci_dump_status (struct ohci_hcd *controller, char **next, unsigned *size)
temp = ohci_readl (controller, &regs->revision) & 0xff;
ohci_dbg_sw (controller, next, size,
- "OHCI %d.%d, %s legacy support registers\n",
+ "OHCI %d.%d, %s legacy support registers, rh state %s\n",
0x03 & (temp >> 4), (temp & 0x0f),
- (temp & 0x0100) ? "with" : "NO");
+ (temp & 0x0100) ? "with" : "NO",
+ rh_state_string(controller));
temp = ohci_readl (controller, &regs->control);
ohci_dbg_sw (controller, next, size,
@@ -385,22 +354,8 @@ ohci_dump_ed (const struct ohci_hcd *ohci, const char *label,
}
}
-#else
-static inline void ohci_dump (struct ohci_hcd *controller, int verbose) {}
-
-#undef OHCI_VERBOSE_DEBUG
-
-#endif /* DEBUG */
-
/*-------------------------------------------------------------------------*/
-#ifdef STUB_DEBUG_FILES
-
-static inline void create_debug_files (struct ohci_hcd *bus) { }
-static inline void remove_debug_files (struct ohci_hcd *bus) { }
-
-#else
-
static int debug_async_open(struct inode *, struct file *);
static int debug_periodic_open(struct inode *, struct file *);
static int debug_registers_open(struct inode *, struct file *);
@@ -639,7 +594,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
/* dump driver info, then registers in spec order */
- ohci_dbg_sw (ohci, &next, &size,
+ ohci_dbg_nosw(ohci, &next, &size,
"bus %s, device %s\n"
"%s\n"
"%s\n",
@@ -658,7 +613,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
/* hcca */
if (ohci->hcca)
- ohci_dbg_sw (ohci, &next, &size,
+ ohci_dbg_nosw(ohci, &next, &size,
"hcca frame 0x%04x\n", ohci_frame_no(ohci));
/* other registers mostly affect frame timings */
@@ -849,7 +804,5 @@ static inline void remove_debug_files (struct ohci_hcd *ohci)
debugfs_remove(ohci->debug_dir);
}
-#endif
-
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c
deleted file mode 100644
index 4e681613e7a..00000000000
--- a/drivers/usb/host/ohci-ep93xx.c
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * OHCI HCD (Host Controller Driver) for USB.
- *
- * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
- * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
- * (C) Copyright 2002 Hewlett-Packard Company
- *
- * Bus Glue for ep93xx.
- *
- * Written by Christopher Hoover <ch@hpl.hp.com>
- * Based on fragments of previous driver by Russell King et al.
- *
- * Modified for LH7A404 from ohci-sa1111.c
- * by Durgesh Pattamatta <pattamattad@sharpsec.com>
- *
- * Modified for pxa27x from ohci-lh7a404.c
- * by Nick Bane <nick@cecomputing.co.uk> 26-8-2004
- *
- * Modified for ep93xx from ohci-pxa27x.c
- * by Lennert Buytenhek <buytenh@wantstofly.org> 28-2-2006
- * Based on an earlier driver by Ray Lehtiniemi
- *
- * This file is licenced under the GPL.
- */
-
-#include <linux/clk.h>
-#include <linux/device.h>
-#include <linux/signal.h>
-#include <linux/platform_device.h>
-
-static struct clk *usb_host_clock;
-
-static void ep93xx_start_hc(struct device *dev)
-{
- clk_enable(usb_host_clock);
-}
-
-static void ep93xx_stop_hc(struct device *dev)
-{
- clk_disable(usb_host_clock);
-}
-
-static int usb_hcd_ep93xx_probe(const struct hc_driver *driver,
- struct platform_device *pdev)
-{
- int retval;
- struct usb_hcd *hcd;
-
- if (pdev->resource[1].flags != IORESOURCE_IRQ) {
- dbg("resource[1] is not IORESOURCE_IRQ");
- return -ENOMEM;
- }
-
- hcd = usb_create_hcd(driver, &pdev->dev, "ep93xx");
- if (hcd == NULL)
- return -ENOMEM;
-
- hcd->rsrc_start = pdev->resource[0].start;
- hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- usb_put_hcd(hcd);
- retval = -EBUSY;
- goto err1;
- }
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (hcd->regs == NULL) {
- dbg("ioremap failed");
- retval = -ENOMEM;
- goto err2;
- }
-
- usb_host_clock = clk_get(&pdev->dev, NULL);
- if (IS_ERR(usb_host_clock)) {
- dbg("clk_get failed");
- retval = PTR_ERR(usb_host_clock);
- goto err3;
- }
-
- ep93xx_start_hc(&pdev->dev);
-
- ohci_hcd_init(hcd_to_ohci(hcd));
-
- retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_DISABLED);
- if (retval == 0)
- return retval;
-
- ep93xx_stop_hc(&pdev->dev);
-err3:
- iounmap(hcd->regs);
-err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-err1:
- usb_put_hcd(hcd);
-
- return retval;
-}
-
-static void usb_hcd_ep93xx_remove(struct usb_hcd *hcd,
- struct platform_device *pdev)
-{
- usb_remove_hcd(hcd);
- ep93xx_stop_hc(&pdev->dev);
- clk_put(usb_host_clock);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
-}
-
-static int __devinit ohci_ep93xx_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int ret;
-
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
-
- if ((ret = ohci_run(ohci)) < 0) {
- err("can't start %s", hcd->self.bus_name);
- ohci_stop(hcd);
- return ret;
- }
-
- return 0;
-}
-
-static struct hc_driver ohci_ep93xx_hc_driver = {
- .description = hcd_name,
- .product_desc = "EP93xx OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
- .start = ohci_ep93xx_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
- .get_frame_number = ohci_get_frame,
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-extern int usb_disabled(void);
-
-static int ohci_hcd_ep93xx_drv_probe(struct platform_device *pdev)
-{
- int ret;
-
- ret = -ENODEV;
- if (!usb_disabled())
- ret = usb_hcd_ep93xx_probe(&ohci_ep93xx_hc_driver, pdev);
-
- return ret;
-}
-
-static int ohci_hcd_ep93xx_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_hcd_ep93xx_remove(hcd, pdev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ohci_hcd_ep93xx_drv_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
-
- if (time_before(jiffies, ohci->next_statechange))
- msleep(5);
- ohci->next_statechange = jiffies;
-
- ep93xx_stop_hc(&pdev->dev);
- hcd->state = HC_STATE_SUSPENDED;
-
- return 0;
-}
-
-static int ohci_hcd_ep93xx_drv_resume(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
-
- if (time_before(jiffies, ohci->next_statechange))
- msleep(5);
- ohci->next_statechange = jiffies;
-
- ep93xx_start_hc(&pdev->dev);
-
- ohci_finish_controller_resume(hcd);
- return 0;
-}
-#endif
-
-
-static struct platform_driver ohci_hcd_ep93xx_driver = {
- .probe = ohci_hcd_ep93xx_drv_probe,
- .remove = ohci_hcd_ep93xx_drv_remove,
- .shutdown = usb_hcd_platform_shutdown,
-#ifdef CONFIG_PM
- .suspend = ohci_hcd_ep93xx_drv_suspend,
- .resume = ohci_hcd_ep93xx_drv_resume,
-#endif
- .driver = {
- .name = "ep93xx-ohci",
- .owner = THIS_MODULE,
- },
-};
-
-MODULE_ALIAS("platform:ep93xx-ohci");
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
new file mode 100644
index 00000000000..060a6a41475
--- /dev/null
+++ b/drivers/usb/host/ohci-exynos.c
@@ -0,0 +1,363 @@
+/*
+ * SAMSUNG EXYNOS USB HOST OHCI Controller
+ *
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/samsung_usb_phy.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
+
+#include "ohci.h"
+
+#define DRIVER_DESC "OHCI EXYNOS driver"
+
+static const char hcd_name[] = "ohci-exynos";
+static struct hc_driver __read_mostly exynos_ohci_hc_driver;
+
+#define to_exynos_ohci(hcd) (struct exynos_ohci_hcd *)(hcd_to_ohci(hcd)->priv)
+
+#define PHY_NUMBER 3
+
+struct exynos_ohci_hcd {
+ struct clk *clk;
+ struct usb_phy *phy;
+ struct usb_otg *otg;
+ struct phy *phy_g[PHY_NUMBER];
+};
+
+static int exynos_ohci_get_phy(struct device *dev,
+ struct exynos_ohci_hcd *exynos_ohci)
+{
+ struct device_node *child;
+ struct phy *phy;
+ int phy_number;
+ int ret = 0;
+
+ exynos_ohci->phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(exynos_ohci->phy)) {
+ ret = PTR_ERR(exynos_ohci->phy);
+ if (ret != -ENXIO && ret != -ENODEV) {
+ dev_err(dev, "no usb2 phy configured\n");
+ return ret;
+ }
+ dev_dbg(dev, "Failed to get usb2 phy\n");
+ } else {
+ exynos_ohci->otg = exynos_ohci->phy->otg;
+ }
+
+ /*
+ * Getting generic phy:
+ * We are keeping both types of phys as a part of transiting OHCI
+ * to generic phy framework, so as to maintain backward compatibilty
+ * with old DTB.
+ * If there are existing devices using DTB files built from them,
+ * to remove the support for old bindings in this driver,
+ * we need to make sure that such devices have their DTBs
+ * updated to ones built from new DTS.
+ */
+ for_each_available_child_of_node(dev->of_node, child) {
+ ret = of_property_read_u32(child, "reg", &phy_number);
+ if (ret) {
+ dev_err(dev, "Failed to parse device tree\n");
+ of_node_put(child);
+ return ret;
+ }
+
+ if (phy_number >= PHY_NUMBER) {
+ dev_err(dev, "Invalid number of PHYs\n");
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ phy = devm_of_phy_get(dev, child, 0);
+ of_node_put(child);
+ if (IS_ERR(phy)) {
+ ret = PTR_ERR(phy);
+ if (ret != -ENOSYS && ret != -ENODEV) {
+ dev_err(dev, "no usb2 phy configured\n");
+ return ret;
+ }
+ dev_dbg(dev, "Failed to get usb2 phy\n");
+ }
+ exynos_ohci->phy_g[phy_number] = phy;
+ }
+
+ return ret;
+}
+
+static int exynos_ohci_phy_enable(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
+ int i;
+ int ret = 0;
+
+ if (!IS_ERR(exynos_ohci->phy))
+ return usb_phy_init(exynos_ohci->phy);
+
+ for (i = 0; ret == 0 && i < PHY_NUMBER; i++)
+ if (!IS_ERR(exynos_ohci->phy_g[i]))
+ ret = phy_power_on(exynos_ohci->phy_g[i]);
+ if (ret)
+ for (i--; i >= 0; i--)
+ if (!IS_ERR(exynos_ohci->phy_g[i]))
+ phy_power_off(exynos_ohci->phy_g[i]);
+
+ return ret;
+}
+
+static void exynos_ohci_phy_disable(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
+ int i;
+
+ if (!IS_ERR(exynos_ohci->phy)) {
+ usb_phy_shutdown(exynos_ohci->phy);
+ return;
+ }
+
+ for (i = 0; i < PHY_NUMBER; i++)
+ if (!IS_ERR(exynos_ohci->phy_g[i]))
+ phy_power_off(exynos_ohci->phy_g[i]);
+}
+
+static int exynos_ohci_probe(struct platform_device *pdev)
+{
+ struct exynos_ohci_hcd *exynos_ohci;
+ struct usb_hcd *hcd;
+ struct resource *res;
+ int irq;
+ int err;
+
+ /*
+ * Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we move to full device tree support this will vanish off.
+ */
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ hcd = usb_create_hcd(&exynos_ohci_hc_driver,
+ &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ dev_err(&pdev->dev, "Unable to create HCD\n");
+ return -ENOMEM;
+ }
+
+ exynos_ohci = to_exynos_ohci(hcd);
+
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "samsung,exynos5440-ohci"))
+ goto skip_phy;
+
+ err = exynos_ohci_get_phy(&pdev->dev, exynos_ohci);
+ if (err)
+ goto fail_clk;
+
+skip_phy:
+ exynos_ohci->clk = devm_clk_get(&pdev->dev, "usbhost");
+
+ if (IS_ERR(exynos_ohci->clk)) {
+ dev_err(&pdev->dev, "Failed to get usbhost clock\n");
+ err = PTR_ERR(exynos_ohci->clk);
+ goto fail_clk;
+ }
+
+ err = clk_prepare_enable(exynos_ohci->clk);
+ if (err)
+ goto fail_clk;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get I/O memory\n");
+ err = -ENXIO;
+ goto fail_io;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ err = PTR_ERR(hcd->regs);
+ goto fail_io;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ err = -ENODEV;
+ goto fail_io;
+ }
+
+ if (exynos_ohci->otg)
+ exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
+
+ platform_set_drvdata(pdev, hcd);
+
+ err = exynos_ohci_phy_enable(&pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable USB phy\n");
+ goto fail_io;
+ }
+
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add USB HCD\n");
+ goto fail_add_hcd;
+ }
+ device_wakeup_enable(hcd->self.controller);
+ return 0;
+
+fail_add_hcd:
+ exynos_ohci_phy_disable(&pdev->dev);
+fail_io:
+ clk_disable_unprepare(exynos_ohci->clk);
+fail_clk:
+ usb_put_hcd(hcd);
+ return err;
+}
+
+static int exynos_ohci_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
+
+ usb_remove_hcd(hcd);
+
+ if (exynos_ohci->otg)
+ exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
+
+ exynos_ohci_phy_disable(&pdev->dev);
+
+ clk_disable_unprepare(exynos_ohci->clk);
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static void exynos_ohci_shutdown(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+#ifdef CONFIG_PM
+static int exynos_ohci_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
+ bool do_wakeup = device_may_wakeup(dev);
+ int rc = ohci_suspend(hcd, do_wakeup);
+
+ if (rc)
+ return rc;
+
+ if (exynos_ohci->otg)
+ exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
+
+ exynos_ohci_phy_disable(dev);
+
+ clk_disable_unprepare(exynos_ohci->clk);
+
+ return 0;
+}
+
+static int exynos_ohci_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
+ int ret;
+
+ clk_prepare_enable(exynos_ohci->clk);
+
+ if (exynos_ohci->otg)
+ exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
+
+ ret = exynos_ohci_phy_enable(dev);
+ if (ret) {
+ dev_err(dev, "Failed to enable USB phy\n");
+ clk_disable_unprepare(exynos_ohci->clk);
+ return ret;
+ }
+
+ ohci_resume(hcd, false);
+
+ return 0;
+}
+#else
+#define exynos_ohci_suspend NULL
+#define exynos_ohci_resume NULL
+#endif
+
+static const struct ohci_driver_overrides exynos_overrides __initconst = {
+ .extra_priv_size = sizeof(struct exynos_ohci_hcd),
+};
+
+static const struct dev_pm_ops exynos_ohci_pm_ops = {
+ .suspend = exynos_ohci_suspend,
+ .resume = exynos_ohci_resume,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id exynos_ohci_match[] = {
+ { .compatible = "samsung,exynos4210-ohci" },
+ { .compatible = "samsung,exynos5440-ohci" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_ohci_match);
+#endif
+
+static struct platform_driver exynos_ohci_driver = {
+ .probe = exynos_ohci_probe,
+ .remove = exynos_ohci_remove,
+ .shutdown = exynos_ohci_shutdown,
+ .driver = {
+ .name = "exynos-ohci",
+ .owner = THIS_MODULE,
+ .pm = &exynos_ohci_pm_ops,
+ .of_match_table = of_match_ptr(exynos_ohci_match),
+ }
+};
+static int __init ohci_exynos_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+ ohci_init_driver(&exynos_ohci_hc_driver, &exynos_overrides);
+ return platform_driver_register(&exynos_ohci_driver);
+}
+module_init(ohci_exynos_init);
+
+static void __exit ohci_exynos_cleanup(void)
+{
+ platform_driver_unregister(&exynos_ohci_driver);
+}
+module_exit(ohci_exynos_cleanup);
+
+MODULE_ALIAS("platform:exynos-ohci");
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 5179acb7aa2..f98d03f3144 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1,5 +1,7 @@
/*
- * OHCI HCD (Host Controller Driver) for USB.
+ * Open Host Controller Interface (OHCI) driver for USB.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
*
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
* (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
@@ -40,7 +42,6 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/system.h>
#include <asm/unaligned.h>
#include <asm/byteorder.h>
@@ -50,8 +51,6 @@
/*-------------------------------------------------------------------------*/
-#undef OHCI_VERBOSE_DEBUG /* not always helpful */
-
/* For initializing controller (mask in an HCFS mode too) */
#define OHCI_CONTROL_INIT OHCI_CTRL_CBSR
#define OHCI_INTR_INIT \
@@ -75,35 +74,11 @@ static const char hcd_name [] = "ohci_hcd";
#define STATECHANGE_DELAY msecs_to_jiffies(300)
#include "ohci.h"
+#include "pci-quirks.h"
static void ohci_dump (struct ohci_hcd *ohci, int verbose);
-static int ohci_init (struct ohci_hcd *ohci);
static void ohci_stop (struct usb_hcd *hcd);
-#if defined(CONFIG_PM) || defined(CONFIG_PCI)
-static int ohci_restart (struct ohci_hcd *ohci);
-#endif
-
-#ifdef CONFIG_PCI
-static void quirk_amd_pll(int state);
-static void amd_iso_dev_put(void);
-static void sb800_prefetch(struct ohci_hcd *ohci, int on);
-#else
-static inline void quirk_amd_pll(int state)
-{
- return;
-}
-static inline void amd_iso_dev_put(void)
-{
- return;
-}
-static inline void sb800_prefetch(struct ohci_hcd *ohci, int on)
-{
- return;
-}
-#endif
-
-
#include "ohci-hub.c"
#include "ohci-dbg.c"
#include "ohci-mem.c"
@@ -122,13 +97,13 @@ static inline void sb800_prefetch(struct ohci_hcd *ohci, int on)
/* Some boards misreport power switching/overcurrent */
-static int distrust_firmware = 1;
+static bool distrust_firmware = 1;
module_param (distrust_firmware, bool, 0);
MODULE_PARM_DESC (distrust_firmware,
"true to distrust firmware power/overcurrent setup");
/* Some boards leave IR set wrongly, since they fail BIOS/SMM handshakes */
-static int no_handshake = 0;
+static bool no_handshake = 0;
module_param (no_handshake, bool, 0);
MODULE_PARM_DESC (no_handshake, "true (not default) disables BIOS handshake");
@@ -150,10 +125,6 @@ static int ohci_urb_enqueue (
unsigned long flags;
int retval = 0;
-#ifdef OHCI_VERBOSE_DEBUG
- urb_print(urb, "SUB", usb_pipein(pipe), -EINPROGRESS);
-#endif
-
/* every endpoint has a ed, locate and maybe (re)initialize it */
if (! (ed = ed_get (ohci, urb->ep, urb->dev, pipe, urb->interval)))
return -ENOMEM;
@@ -171,7 +142,7 @@ static int ohci_urb_enqueue (
// case PIPE_INTERRUPT:
// case PIPE_BULK:
default:
- /* one TD for every 4096 Bytes (can be upto 8K) */
+ /* one TD for every 4096 Bytes (can be up to 8K) */
size += urb->transfer_buffer_length / 4096;
/* ... and for any remaining bytes ... */
if ((urb->transfer_buffer_length % 4096) != 0)
@@ -216,7 +187,7 @@ static int ohci_urb_enqueue (
retval = -ENODEV;
goto fail;
}
- if (!HC_IS_RUNNING(hcd->state)) {
+ if (ohci->rh_state != OHCI_RH_RUNNING) {
retval = -ENODEV;
goto fail;
}
@@ -239,13 +210,47 @@ static int ohci_urb_enqueue (
frame &= ~(ed->interval - 1);
frame |= ed->branch;
urb->start_frame = frame;
-
- /* yes, only URB_ISO_ASAP is supported, and
- * urb->start_frame is never used as input.
+ ed->last_iso = frame + ed->interval * (size - 1);
+ }
+ } else if (ed->type == PIPE_ISOCHRONOUS) {
+ u16 next = ohci_frame_no(ohci) + 1;
+ u16 frame = ed->last_iso + ed->interval;
+ u16 length = ed->interval * (size - 1);
+
+ /* Behind the scheduling threshold? */
+ if (unlikely(tick_before(frame, next))) {
+
+ /* URB_ISO_ASAP: Round up to the first available slot */
+ if (urb->transfer_flags & URB_ISO_ASAP) {
+ frame += (next - frame + ed->interval - 1) &
+ -ed->interval;
+
+ /*
+ * Not ASAP: Use the next slot in the stream,
+ * no matter what.
*/
+ } else {
+ /*
+ * Some OHCI hardware doesn't handle late TDs
+ * correctly. After retiring them it proceeds
+ * to the next ED instead of the next TD.
+ * Therefore we have to omit the late TDs
+ * entirely.
+ */
+ urb_priv->td_cnt = DIV_ROUND_UP(
+ (u16) (next - frame),
+ ed->interval);
+ if (urb_priv->td_cnt >= urb_priv->length) {
+ ++urb_priv->td_cnt; /* Mark it */
+ ohci_dbg(ohci, "iso underrun %p (%u+%u < %u)\n",
+ urb, frame, length,
+ next);
+ }
+ }
}
- } else if (ed->type == PIPE_ISOCHRONOUS)
- urb->start_frame = ed->last_iso + ed->interval;
+ urb->start_frame = frame;
+ ed->last_iso = frame + length;
+ }
/* fill the TDs and link them to the ed; and
* enable that part of the schedule, if needed
@@ -273,15 +278,11 @@ static int ohci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
unsigned long flags;
int rc;
-#ifdef OHCI_VERBOSE_DEBUG
- urb_print(urb, "UNLINK", 1, status);
-#endif
-
spin_lock_irqsave (&ohci->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc) {
; /* Do nothing */
- } else if (HC_IS_RUNNING(hcd->state)) {
+ } else if (ohci->rh_state == OHCI_RH_RUNNING) {
urb_priv_t *urb_priv;
/* Unless an IRQ completed the unlink while it was being
@@ -328,7 +329,7 @@ ohci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
rescan:
spin_lock_irqsave (&ohci->lock, flags);
- if (!HC_IS_RUNNING (hcd->state)) {
+ if (ohci->rh_state != OHCI_RH_RUNNING) {
sanitize:
ed->state = ED_IDLE;
if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
@@ -384,6 +385,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
ohci->hc_control &= OHCI_CTRL_RWC;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
+ ohci->rh_state = OHCI_RH_HALTED;
}
/* ohci_shutdown forcibly disables IRQs and DMA, helping kexec and
@@ -396,17 +398,14 @@ ohci_shutdown (struct usb_hcd *hcd)
struct ohci_hcd *ohci;
ohci = hcd_to_ohci (hcd);
- ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
- ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
+ ohci_writel(ohci, (u32) ~0, &ohci->regs->intrdisable);
- /* If the SHUTDOWN quirk is set, don't put the controller in RESET */
- ohci->hc_control &= (ohci->flags & OHCI_QUIRK_SHUTDOWN ?
- OHCI_CTRL_RWC | OHCI_CTRL_HCFS :
- OHCI_CTRL_RWC);
- ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
+ /* Software reset, after which the controller goes into SUSPEND */
+ ohci_writel(ohci, OHCI_HCR, &ohci->regs->cmdstatus);
+ ohci_readl(ohci, &ohci->regs->cmdstatus); /* flush the writes */
+ udelay(10);
- /* flush the writes */
- (void) ohci_readl (ohci, &ohci->regs->control);
+ ohci_writel(ohci, ohci->fminterval, &ohci->regs->fminterval);
}
static int check_ed(struct ohci_hcd *ohci, struct ed *ed)
@@ -510,7 +509,7 @@ static int ohci_init (struct ohci_hcd *ohci)
if (distrust_firmware)
ohci->flags |= OHCI_QUIRK_HUB_POWER;
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
ohci->regs = hcd->regs;
/* REVISIT this BIOS handshake is now moved into PCI "quirks", and
@@ -585,7 +584,7 @@ static int ohci_run (struct ohci_hcd *ohci)
int first = ohci->fminterval == 0;
struct usb_hcd *hcd = ohci_to_hcd(ohci);
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
/* boot firmware should have set this up (5.1.1.3.1) */
if (first) {
@@ -698,7 +697,7 @@ retry:
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_CONTROL_INIT | OHCI_USB_OPER;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
- hcd->state = HC_STATE_RUNNING;
+ ohci->rh_state = OHCI_RH_RUNNING;
/* wake on ConnectStatusChange, matching external hubs */
ohci_writel (ohci, RH_HS_DRWE, &ohci->regs->roothub.status);
@@ -735,7 +734,6 @@ retry:
// POTPGT delay is bits 24-31, in 2 ms units.
mdelay ((val >> 23) & 0x1fe);
- hcd->state = HC_STATE_RUNNING;
if (quirk_zfmicro(ohci)) {
/* Create timer to watch for bad queue state on ZF Micro */
@@ -751,6 +749,32 @@ retry:
return 0;
}
+/* ohci_setup routine for generic controller initialization */
+
+int ohci_setup(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+ ohci_hcd_init(ohci);
+
+ return ohci_init(ohci);
+}
+EXPORT_SYMBOL_GPL(ohci_setup);
+
+/* ohci_start routine for generic controller start of all OHCI bus glue */
+static int ohci_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
+
+ ret = ohci_run(ohci);
+ if (ret < 0) {
+ ohci_err(ohci, "can't start\n");
+ ohci_stop(hcd);
+ }
+ return ret;
+}
+
/*-------------------------------------------------------------------------*/
/* an interrupt happens */
@@ -771,8 +795,9 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
* of dead, unclocked, or unplugged (CardBus...) devices
*/
if (ints == ~(u32)0) {
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
ohci_dbg (ohci, "device removed!\n");
+ usb_hc_died(hcd);
return IRQ_HANDLED;
}
@@ -780,7 +805,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
ints &= ohci_readl(ohci, &regs->intrenable);
/* interrupt for some other device? */
- if (ints == 0)
+ if (ints == 0 || unlikely(ohci->rh_state == OHCI_RH_HALTED))
return IRQ_NOTMINE;
if (ints & OHCI_INTR_UE) {
@@ -795,8 +820,9 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
schedule_work (&ohci->nec_work);
} else {
- disable (ohci);
ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n");
+ ohci->rh_state = OHCI_RH_HALTED;
+ usb_hc_died(hcd);
}
ohci_dump (ohci, 1);
@@ -804,7 +830,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
}
if (ints & OHCI_INTR_RHSC) {
- ohci_vdbg(ohci, "rhsc\n");
+ ohci_dbg(ohci, "rhsc\n");
ohci->next_statechange = jiffies + STATECHANGE_DELAY;
ohci_writel(ohci, OHCI_INTR_RD | OHCI_INTR_RHSC,
&regs->intrstatus);
@@ -826,7 +852,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
* this might not happen.
*/
else if (ints & OHCI_INTR_RD) {
- ohci_vdbg(ohci, "resume detect\n");
+ ohci_dbg(ohci, "resume detect\n");
ohci_writel(ohci, OHCI_INTR_RD, &regs->intrstatus);
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
if (ohci->autostop) {
@@ -879,11 +905,11 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
if ((ints & OHCI_INTR_SF) != 0
&& !ohci->ed_rm_list
&& !ohci->ed_to_check
- && HC_IS_RUNNING(hcd->state))
+ && ohci->rh_state == OHCI_RH_RUNNING)
ohci_writel (ohci, OHCI_INTR_SF, &regs->intrdisable);
spin_unlock (&ohci->lock);
- if (HC_IS_RUNNING(hcd->state)) {
+ if (ohci->rh_state == OHCI_RH_RUNNING) {
ohci_writel (ohci, ints, &regs->intrstatus);
ohci_writel (ohci, OHCI_INTR_MIE, &regs->intrenable);
// flush those writes
@@ -901,17 +927,18 @@ static void ohci_stop (struct usb_hcd *hcd)
ohci_dump (ohci, 1);
- flush_scheduled_work();
+ if (quirk_nec(ohci))
+ flush_work(&ohci->nec_work);
- ohci_usb_reset (ohci);
ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
+ ohci_usb_reset(ohci);
free_irq(hcd->irq, hcd);
- hcd->irq = -1;
+ hcd->irq = 0;
if (quirk_zfmicro(ohci))
del_timer(&ohci->unlink_watchdog);
if (quirk_amdiso(ohci))
- amd_iso_dev_put();
+ usb_amd_dev_put();
remove_debug_files (ohci);
ohci_mem_cleanup (ohci);
@@ -929,14 +956,15 @@ static void ohci_stop (struct usb_hcd *hcd)
#if defined(CONFIG_PM) || defined(CONFIG_PCI)
/* must not be called from interrupt context */
-static int ohci_restart (struct ohci_hcd *ohci)
+int ohci_restart(struct ohci_hcd *ohci)
{
int temp;
int i;
struct urb_priv *priv;
+ ohci_init(ohci);
spin_lock_irq(&ohci->lock);
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
/* Recycle any "live" eds/tds (and urbs). */
if (!list_empty (&ohci->pending))
@@ -988,93 +1016,172 @@ static int ohci_restart (struct ohci_hcd *ohci)
ohci_dbg(ohci, "restart complete\n");
return 0;
}
+EXPORT_SYMBOL_GPL(ohci_restart);
#endif
-/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_PM
-MODULE_AUTHOR (DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE ("GPL");
+int ohci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci (hcd);
+ unsigned long flags;
+ int rc = 0;
-#ifdef CONFIG_PCI
-#include "ohci-pci.c"
-#define PCI_DRIVER ohci_pci_driver
-#endif
+ /* Disable irq emission and mark HW unaccessible. Use
+ * the spinlock to properly synchronize with possible pending
+ * RH suspend or resume activity.
+ */
+ spin_lock_irqsave (&ohci->lock, flags);
+ ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
+ (void)ohci_readl(ohci, &ohci->regs->intrdisable);
-#if defined(CONFIG_ARCH_SA1100) && defined(CONFIG_SA1111)
-#include "ohci-sa1111.c"
-#define SA1111_DRIVER ohci_hcd_sa1111_driver
-#endif
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ spin_unlock_irqrestore (&ohci->lock, flags);
-#if defined(CONFIG_ARCH_S3C2410) || defined(CONFIG_ARCH_S3C64XX)
-#include "ohci-s3c2410.c"
-#define PLATFORM_DRIVER ohci_hcd_s3c2410_driver
-#endif
+ synchronize_irq(hcd->irq);
-#ifdef CONFIG_USB_OHCI_HCD_OMAP1
-#include "ohci-omap.c"
-#define OMAP1_PLATFORM_DRIVER ohci_hcd_omap_driver
-#endif
+ if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
+ ohci_resume(hcd, false);
+ rc = -EBUSY;
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ohci_suspend);
-#ifdef CONFIG_USB_OHCI_HCD_OMAP3
-#include "ohci-omap3.c"
-#define OMAP3_PLATFORM_DRIVER ohci_hcd_omap3_driver
-#endif
-#ifdef CONFIG_ARCH_LH7A404
-#include "ohci-lh7a404.c"
-#define PLATFORM_DRIVER ohci_hcd_lh7a404_driver
-#endif
+int ohci_resume(struct usb_hcd *hcd, bool hibernated)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int port;
+ bool need_reinit = false;
-#if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx)
-#include "ohci-pxa27x.c"
-#define PLATFORM_DRIVER ohci_hcd_pxa27x_driver
-#endif
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-#ifdef CONFIG_ARCH_EP93XX
-#include "ohci-ep93xx.c"
-#define PLATFORM_DRIVER ohci_hcd_ep93xx_driver
-#endif
+ /* Make sure resume from hibernation re-enumerates everything */
+ if (hibernated)
+ ohci_usb_reset(ohci);
-#ifdef CONFIG_MIPS_ALCHEMY
-#include "ohci-au1xxx.c"
-#define PLATFORM_DRIVER ohci_hcd_au1xxx_driver
-#endif
+ /* See if the controller is already running or has been reset */
+ ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
+ if (ohci->hc_control & (OHCI_CTRL_IR | OHCI_SCHED_ENABLES)) {
+ need_reinit = true;
+ } else {
+ switch (ohci->hc_control & OHCI_CTRL_HCFS) {
+ case OHCI_USB_OPER:
+ case OHCI_USB_RESET:
+ need_reinit = true;
+ }
+ }
-#ifdef CONFIG_PNX8550
-#include "ohci-pnx8550.c"
-#define PLATFORM_DRIVER ohci_hcd_pnx8550_driver
-#endif
+ /* If needed, reinitialize and suspend the root hub */
+ if (need_reinit) {
+ spin_lock_irq(&ohci->lock);
+ ohci_rh_resume(ohci);
+ ohci_rh_suspend(ohci, 0);
+ spin_unlock_irq(&ohci->lock);
+ }
-#ifdef CONFIG_USB_OHCI_HCD_PPC_SOC
-#include "ohci-ppc-soc.c"
-#define PLATFORM_DRIVER ohci_hcd_ppc_soc_driver
-#endif
+ /* Normally just turn on port power and enable interrupts */
+ else {
+ ohci_dbg(ohci, "powerup ports\n");
+ for (port = 0; port < ohci->num_ports; port++)
+ ohci_writel(ohci, RH_PS_PPS,
+ &ohci->regs->roothub.portstatus[port]);
+
+ ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrenable);
+ ohci_readl(ohci, &ohci->regs->intrenable);
+ msleep(20);
+ }
-#ifdef CONFIG_ARCH_AT91
-#include "ohci-at91.c"
-#define PLATFORM_DRIVER ohci_hcd_at91_driver
-#endif
+ usb_hcd_resume_root_hub(hcd);
-#ifdef CONFIG_ARCH_PNX4008
-#include "ohci-pnx4008.c"
-#define PLATFORM_DRIVER usb_hcd_pnx4008_driver
-#endif
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ohci_resume);
-#ifdef CONFIG_ARCH_DAVINCI_DA8XX
-#include "ohci-da8xx.c"
-#define PLATFORM_DRIVER ohci_hcd_da8xx_driver
#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721) || \
- defined(CONFIG_CPU_SUBTYPE_SH7763) || \
- defined(CONFIG_CPU_SUBTYPE_SH7786)
-#include "ohci-sh.c"
-#define PLATFORM_DRIVER ohci_hcd_sh_driver
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Generic structure: This gets copied for platform drivers so that
+ * individual entries can be overridden as needed.
+ */
+
+static const struct hc_driver ohci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "OHCI Host Controller",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ohci_irq,
+ .flags = HCD_MEMORY | HCD_USB11,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ohci_setup,
+ .start = ohci_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ohci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
+#endif
+ .start_port_reset = ohci_start_port_reset,
+};
+
+void ohci_init_driver(struct hc_driver *drv,
+ const struct ohci_driver_overrides *over)
+{
+ /* Copy the generic table to drv and then apply the overrides */
+ *drv = ohci_hc_driver;
+
+ if (over) {
+ drv->product_desc = over->product_desc;
+ drv->hcd_priv_size += over->extra_priv_size;
+ if (over->reset)
+ drv->reset = over->reset;
+ }
+}
+EXPORT_SYMBOL_GPL(ohci_init_driver);
+
+/*-------------------------------------------------------------------------*/
+
+MODULE_AUTHOR (DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE ("GPL");
+
+#if defined(CONFIG_ARCH_SA1100) && defined(CONFIG_SA1111)
+#include "ohci-sa1111.c"
+#define SA1111_DRIVER ohci_hcd_sa1111_driver
#endif
+#ifdef CONFIG_USB_OHCI_HCD_DAVINCI
+#include "ohci-da8xx.c"
+#define DAVINCI_PLATFORM_DRIVER ohci_hcd_da8xx_driver
+#endif
#ifdef CONFIG_USB_OHCI_HCD_PPC_OF
#include "ohci-ppc-of.c"
@@ -1086,11 +1193,6 @@ MODULE_LICENSE ("GPL");
#define PS3_SYSTEM_BUS_DRIVER ps3_ohci_driver
#endif
-#ifdef CONFIG_USB_OHCI_HCD_SSB
-#include "ohci-ssb.c"
-#define SSB_OHCI_DRIVER ssb_ohci_driver
-#endif
-
#ifdef CONFIG_MFD_SM501
#include "ohci-sm501.c"
#define SM501_OHCI_DRIVER ohci_hcd_sm501_driver
@@ -1111,17 +1213,9 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ohci_octeon_driver
#endif
-#if !defined(PCI_DRIVER) && \
- !defined(PLATFORM_DRIVER) && \
- !defined(OMAP1_PLATFORM_DRIVER) && \
- !defined(OMAP3_PLATFORM_DRIVER) && \
- !defined(OF_PLATFORM_DRIVER) && \
- !defined(SA1111_DRIVER) && \
- !defined(PS3_SYSTEM_BUS_DRIVER) && \
- !defined(SM501_OHCI_DRIVER) && \
- !defined(TMIO_OHCI_DRIVER) && \
- !defined(SSB_OHCI_DRIVER)
-#error "missing bus glue for ohci-hcd"
+#ifdef CONFIG_TILE_USB
+#include "ohci-tilegx.c"
+#define PLATFORM_DRIVER ohci_hcd_tilegx_driver
#endif
static int __init ohci_hcd_mod_init(void)
@@ -1136,13 +1230,11 @@ static int __init ohci_hcd_mod_init(void)
sizeof (struct ed), sizeof (struct td));
set_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
-#ifdef DEBUG
ohci_debug_root = debugfs_create_dir("ohci", usb_debug_root);
if (!ohci_debug_root) {
retval = -ENOENT;
goto error_debug;
}
-#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
retval = ps3_ohci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
@@ -1156,20 +1248,8 @@ static int __init ohci_hcd_mod_init(void)
goto error_platform;
#endif
-#ifdef OMAP1_PLATFORM_DRIVER
- retval = platform_driver_register(&OMAP1_PLATFORM_DRIVER);
- if (retval < 0)
- goto error_omap1_platform;
-#endif
-
-#ifdef OMAP3_PLATFORM_DRIVER
- retval = platform_driver_register(&OMAP3_PLATFORM_DRIVER);
- if (retval < 0)
- goto error_omap3_platform;
-#endif
-
#ifdef OF_PLATFORM_DRIVER
- retval = of_register_platform_driver(&OF_PLATFORM_DRIVER);
+ retval = platform_driver_register(&OF_PLATFORM_DRIVER);
if (retval < 0)
goto error_of_platform;
#endif
@@ -1180,18 +1260,6 @@ static int __init ohci_hcd_mod_init(void)
goto error_sa1111;
#endif
-#ifdef PCI_DRIVER
- retval = pci_register_driver(&PCI_DRIVER);
- if (retval < 0)
- goto error_pci;
-#endif
-
-#ifdef SSB_OHCI_DRIVER
- retval = ssb_driver_register(&SSB_OHCI_DRIVER);
- if (retval)
- goto error_ssb;
-#endif
-
#ifdef SM501_OHCI_DRIVER
retval = platform_driver_register(&SM501_OHCI_DRIVER);
if (retval < 0)
@@ -1204,9 +1272,19 @@ static int __init ohci_hcd_mod_init(void)
goto error_tmio;
#endif
+#ifdef DAVINCI_PLATFORM_DRIVER
+ retval = platform_driver_register(&DAVINCI_PLATFORM_DRIVER);
+ if (retval < 0)
+ goto error_davinci;
+#endif
+
return retval;
/* Error path */
+#ifdef DAVINCI_PLATFORM_DRIVER
+ platform_driver_unregister(&DAVINCI_PLATFORM_DRIVER);
+ error_davinci:
+#endif
#ifdef TMIO_OHCI_DRIVER
platform_driver_unregister(&TMIO_OHCI_DRIVER);
error_tmio:
@@ -1215,43 +1293,25 @@ static int __init ohci_hcd_mod_init(void)
platform_driver_unregister(&SM501_OHCI_DRIVER);
error_sm501:
#endif
-#ifdef SSB_OHCI_DRIVER
- ssb_driver_unregister(&SSB_OHCI_DRIVER);
- error_ssb:
-#endif
-#ifdef PCI_DRIVER
- pci_unregister_driver(&PCI_DRIVER);
- error_pci:
-#endif
#ifdef SA1111_DRIVER
sa1111_driver_unregister(&SA1111_DRIVER);
error_sa1111:
#endif
#ifdef OF_PLATFORM_DRIVER
- of_unregister_platform_driver(&OF_PLATFORM_DRIVER);
+ platform_driver_unregister(&OF_PLATFORM_DRIVER);
error_of_platform:
#endif
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
error_platform:
#endif
-#ifdef OMAP1_PLATFORM_DRIVER
- platform_driver_unregister(&OMAP1_PLATFORM_DRIVER);
- error_omap1_platform:
-#endif
-#ifdef OMAP3_PLATFORM_DRIVER
- platform_driver_unregister(&OMAP3_PLATFORM_DRIVER);
- error_omap3_platform:
-#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
error_ps3:
#endif
-#ifdef DEBUG
debugfs_remove(ohci_debug_root);
ohci_debug_root = NULL;
error_debug:
-#endif
clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
return retval;
@@ -1260,36 +1320,28 @@ module_init(ohci_hcd_mod_init);
static void __exit ohci_hcd_mod_exit(void)
{
+#ifdef DAVINCI_PLATFORM_DRIVER
+ platform_driver_unregister(&DAVINCI_PLATFORM_DRIVER);
+#endif
#ifdef TMIO_OHCI_DRIVER
platform_driver_unregister(&TMIO_OHCI_DRIVER);
#endif
#ifdef SM501_OHCI_DRIVER
platform_driver_unregister(&SM501_OHCI_DRIVER);
#endif
-#ifdef SSB_OHCI_DRIVER
- ssb_driver_unregister(&SSB_OHCI_DRIVER);
-#endif
-#ifdef PCI_DRIVER
- pci_unregister_driver(&PCI_DRIVER);
-#endif
#ifdef SA1111_DRIVER
sa1111_driver_unregister(&SA1111_DRIVER);
#endif
#ifdef OF_PLATFORM_DRIVER
- of_unregister_platform_driver(&OF_PLATFORM_DRIVER);
+ platform_driver_unregister(&OF_PLATFORM_DRIVER);
#endif
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
#endif
-#ifdef OMAP3_PLATFORM_DRIVER
- platform_driver_unregister(&OMAP3_PLATFORM_DRIVER);
-#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
#endif
-#ifdef DEBUG
debugfs_remove(ohci_debug_root);
-#endif
clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
}
module_exit(ohci_hcd_mod_exit);
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index cddcda95b57..b4940de1eba 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -90,6 +90,24 @@ __acquires(ohci->lock)
dl_done_list (ohci);
finish_unlinks (ohci, ohci_frame_no(ohci));
+ /*
+ * Some controllers don't handle "global" suspend properly if
+ * there are unsuspended ports. For these controllers, put all
+ * the enabled ports into suspend before suspending the root hub.
+ */
+ if (ohci->flags & OHCI_QUIRK_GLOBAL_SUSPEND) {
+ __hc32 __iomem *portstat = ohci->regs->roothub.portstatus;
+ int i;
+ unsigned temp;
+
+ for (i = 0; i < ohci->num_ports; (++i, ++portstat)) {
+ temp = ohci_readl(ohci, portstat);
+ if ((temp & (RH_PS_PES | RH_PS_PSS)) ==
+ RH_PS_PES)
+ ohci_writel(ohci, RH_PS_PSS, portstat);
+ }
+ }
+
/* maybe resume can wake root hub */
if (ohci_to_hcd(ohci)->self.root_hub->do_remote_wakeup || autostop) {
ohci->hc_control |= OHCI_CTRL_RWE;
@@ -111,6 +129,7 @@ __acquires(ohci->lock)
if (!autostop) {
ohci->next_statechange = jiffies + msecs_to_jiffies (5);
ohci->autostop = 0;
+ ohci->rh_state = OHCI_RH_SUSPENDED;
}
done:
@@ -140,7 +159,7 @@ __acquires(ohci->lock)
if (ohci->hc_control & (OHCI_CTRL_IR | OHCI_SCHED_ENABLES)) {
/* this can happen after resuming a swsusp snapshot */
- if (hcd->state == HC_STATE_RESUMING) {
+ if (ohci->rh_state != OHCI_RH_RUNNING) {
ohci_dbg (ohci, "BIOS/SMM active, control %03x\n",
ohci->hc_control);
status = -EBUSY;
@@ -175,7 +194,6 @@ __acquires(ohci->lock)
if (status == -EBUSY) {
if (!autostopped) {
spin_unlock_irq (&ohci->lock);
- (void) ohci_init (ohci);
status = ohci_restart (ohci);
usb_root_hub_lost_power(hcd->self.root_hub);
@@ -212,10 +230,11 @@ __acquires(ohci->lock)
/* Sometimes PCI D3 suspend trashes frame timings ... */
periodic_reinit (ohci);
- /* the following code is executed with ohci->lock held and
- * irqs disabled if and only if autostopped is true
+ /*
+ * The following code is executed with ohci->lock held and
+ * irqs disabled if and only if autostopped is true. This
+ * will cause sparse to warn about a "context imbalance".
*/
-
skip_resume:
/* interrupts might have been disabled */
ohci_writel (ohci, OHCI_INTR_INIT, &ohci->regs->intrenable);
@@ -274,6 +293,7 @@ skip_resume:
(void) ohci_readl (ohci, &ohci->regs->control);
}
+ ohci->rh_state = OHCI_RH_RUNNING;
return 0;
}
@@ -314,54 +334,6 @@ static int ohci_bus_resume (struct usb_hcd *hcd)
return rc;
}
-/* Carry out the final steps of resuming the controller device */
-static void ohci_finish_controller_resume(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int port;
- bool need_reinit = false;
-
- /* See if the controller is already running or has been reset */
- ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
- if (ohci->hc_control & (OHCI_CTRL_IR | OHCI_SCHED_ENABLES)) {
- need_reinit = true;
- } else {
- switch (ohci->hc_control & OHCI_CTRL_HCFS) {
- case OHCI_USB_OPER:
- case OHCI_USB_RESET:
- need_reinit = true;
- }
- }
-
- /* If needed, reinitialize and suspend the root hub */
- if (need_reinit) {
- spin_lock_irq(&ohci->lock);
- hcd->state = HC_STATE_RESUMING;
- ohci_rh_resume(ohci);
- hcd->state = HC_STATE_QUIESCING;
- ohci_rh_suspend(ohci, 0);
- hcd->state = HC_STATE_SUSPENDED;
- spin_unlock_irq(&ohci->lock);
- }
-
- /* Normally just turn on port power and enable interrupts */
- else {
- ohci_dbg(ohci, "powerup ports\n");
- for (port = 0; port < ohci->num_ports; port++)
- ohci_writel(ohci, RH_PS_PPS,
- &ohci->regs->roothub.portstatus[port]);
-
- ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrenable);
- ohci_readl(ohci, &ohci->regs->intrenable);
- msleep(20);
- }
-
- /* Does the root hub have a port wakeup pending? */
- if (ohci_readl(ohci, &ohci->regs->intrstatus) &
- (OHCI_INTR_RD | OHCI_INTR_RHSC))
- usb_hcd_resume_root_hub(hcd);
-}
-
/* Carry out polling-, autostop-, and autoresume-related state changes */
static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
int any_connected, int rhsc_status)
@@ -484,8 +456,7 @@ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
/* build "status change" packet (one or two bytes) from HC registers */
-static int
-ohci_hub_status_data (struct usb_hcd *hcd, char *buf)
+int ohci_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
int i, changed = 0, length = 1;
@@ -550,6 +521,7 @@ done:
return changed ? length : 0;
}
+EXPORT_SYMBOL_GPL(ohci_hub_status_data);
/*-------------------------------------------------------------------------*/
@@ -578,17 +550,18 @@ ohci_hub_descriptor (
temp |= 0x0010;
else if (rh & RH_A_OCPM) /* per-port overcurrent reporting? */
temp |= 0x0008;
- desc->wHubCharacteristics = (__force __u16)cpu_to_hc16(ohci, temp);
+ desc->wHubCharacteristics = cpu_to_le16(temp);
- /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+ /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
rh = roothub_b (ohci);
- memset(desc->bitmap, 0xff, sizeof(desc->bitmap));
- desc->bitmap [0] = rh & RH_B_DR;
+ memset(desc->u.hs.DeviceRemovable, 0xff,
+ sizeof(desc->u.hs.DeviceRemovable));
+ desc->u.hs.DeviceRemovable[0] = rh & RH_B_DR;
if (ohci->num_ports > 7) {
- desc->bitmap [1] = (rh & RH_B_DR) >> 8;
- desc->bitmap [2] = 0xff;
+ desc->u.hs.DeviceRemovable[1] = (rh & RH_B_DR) >> 8;
+ desc->u.hs.DeviceRemovable[2] = 0xff;
} else
- desc->bitmap [1] = 0xff;
+ desc->u.hs.DeviceRemovable[1] = 0xff;
}
/*-------------------------------------------------------------------------*/
@@ -625,14 +598,8 @@ static int ohci_start_port_reset (struct usb_hcd *hcd, unsigned port)
/* See usb 7.1.7.5: root hubs must issue at least 50 msec reset signaling,
* not necessarily continuous ... to guard against resume signaling.
- * The short timeout is safe for non-root hubs, and is backward-compatible
- * with earlier Linux hosts.
*/
-#ifdef CONFIG_USB_SUSPEND
#define PORT_RESET_MSEC 50
-#else
-#define PORT_RESET_MSEC 10
-#endif
/* this timer value might be vendor-specific ... */
#define PORT_RESET_HW_MSEC 10
@@ -697,7 +664,7 @@ static inline int root_port_reset (struct ohci_hcd *ohci, unsigned port)
return 0;
}
-static int ohci_hub_control (
+int ohci_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
@@ -776,10 +743,8 @@ static int ohci_hub_control (
temp = roothub_portstatus (ohci, wIndex);
put_unaligned_le32(temp, buf);
-#ifndef OHCI_VERBOSE_DEBUG
- if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
-#endif
- dbg_port (ohci, "GetStatus", wIndex, temp);
+ if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
+ dbg_port(ohci, "GetStatus", wIndex, temp);
break;
case SetHubFeature:
switch (wValue) {
@@ -825,4 +790,4 @@ error:
}
return retval;
}
-
+EXPORT_SYMBOL_GPL(ohci_hub_control);
diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c
index 931d588c3fb..c2c221a332e 100644
--- a/drivers/usb/host/ohci-jz4740.c
+++ b/drivers/usb/host/ohci-jz4740.c
@@ -82,14 +82,14 @@ static int ohci_jz4740_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct jz4740_ohci_hcd *jz4740_ohci = hcd_to_jz4740_hcd(hcd);
- int ret;
+ int ret = 0;
switch (typeReq) {
- case SetHubFeature:
+ case SetPortFeature:
if (wValue == USB_PORT_FEAT_POWER)
ret = ohci_jz4740_set_vbus_power(jz4740_ohci, true);
break;
- case ClearHubFeature:
+ case ClearPortFeature:
if (wValue == USB_PORT_FEAT_POWER)
ret = ohci_jz4740_set_vbus_power(jz4740_ohci, false);
break;
@@ -145,7 +145,7 @@ static const struct hc_driver ohci_jz4740_hc_driver = {
};
-static __devinit int jz4740_ohci_probe(struct platform_device *pdev)
+static int jz4740_ohci_probe(struct platform_device *pdev)
{
int ret;
struct usb_hcd *hcd;
@@ -174,31 +174,23 @@ static __devinit int jz4740_ohci_probe(struct platform_device *pdev)
jz4740_ohci = hcd_to_jz4740_hcd(hcd);
- res = request_mem_region(res->start, resource_size(res), hcd_name);
- if (!res) {
- dev_err(&pdev->dev, "Failed to request mem region.\n");
- ret = -EBUSY;
- goto err_free;
- }
-
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- hcd->regs = ioremap(res->start, resource_size(res));
- if (!hcd->regs) {
- dev_err(&pdev->dev, "Failed to ioremap registers.\n");
- ret = -EBUSY;
- goto err_release_mem;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
+ goto err_free;
}
- jz4740_ohci->clk = clk_get(&pdev->dev, "uhc");
+ jz4740_ohci->clk = devm_clk_get(&pdev->dev, "uhc");
if (IS_ERR(jz4740_ohci->clk)) {
ret = PTR_ERR(jz4740_ohci->clk);
dev_err(&pdev->dev, "Failed to get clock: %d\n", ret);
- goto err_iounmap;
+ goto err_free;
}
- jz4740_ohci->vbus = regulator_get(&pdev->dev, "vbus");
+ jz4740_ohci->vbus = devm_regulator_get(&pdev->dev, "vbus");
if (IS_ERR(jz4740_ohci->vbus))
jz4740_ohci->vbus = NULL;
@@ -217,47 +209,32 @@ static __devinit int jz4740_ohci_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to add hcd: %d\n", ret);
goto err_disable;
}
+ device_wakeup_enable(hcd->self.controller);
return 0;
err_disable:
- platform_set_drvdata(pdev, NULL);
- if (jz4740_ohci->vbus) {
+ if (jz4740_ohci->vbus)
regulator_disable(jz4740_ohci->vbus);
- regulator_put(jz4740_ohci->vbus);
- }
clk_disable(jz4740_ohci->clk);
- clk_put(jz4740_ohci->clk);
-err_iounmap:
- iounmap(hcd->regs);
-err_release_mem:
- release_mem_region(res->start, resource_size(res));
err_free:
usb_put_hcd(hcd);
return ret;
}
-static __devexit int jz4740_ohci_remove(struct platform_device *pdev)
+static int jz4740_ohci_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct jz4740_ohci_hcd *jz4740_ohci = hcd_to_jz4740_hcd(hcd);
usb_remove_hcd(hcd);
- platform_set_drvdata(pdev, NULL);
-
- if (jz4740_ohci->vbus) {
+ if (jz4740_ohci->vbus)
regulator_disable(jz4740_ohci->vbus);
- regulator_put(jz4740_ohci->vbus);
- }
clk_disable(jz4740_ohci->clk);
- clk_put(jz4740_ohci->clk);
-
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
@@ -266,7 +243,7 @@ static __devexit int jz4740_ohci_remove(struct platform_device *pdev)
static struct platform_driver ohci_hcd_jz4740_driver = {
.probe = jz4740_ohci_probe,
- .remove = __devexit_p(jz4740_ohci_remove),
+ .remove = jz4740_ohci_remove,
.driver = {
.name = "jz4740-ohci",
.owner = THIS_MODULE,
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
deleted file mode 100644
index 18d39f0463e..00000000000
--- a/drivers/usb/host/ohci-lh7a404.c
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * OHCI HCD (Host Controller Driver) for USB.
- *
- * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
- * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
- * (C) Copyright 2002 Hewlett-Packard Company
- *
- * Bus Glue for Sharp LH7A404
- *
- * Written by Christopher Hoover <ch@hpl.hp.com>
- * Based on fragments of previous driver by Russell King et al.
- *
- * Modified for LH7A404 from ohci-sa1111.c
- * by Durgesh Pattamatta <pattamattad@sharpsec.com>
- *
- * This file is licenced under the GPL.
- */
-
-#include <linux/platform_device.h>
-#include <linux/signal.h>
-
-#include <mach/hardware.h>
-
-
-extern int usb_disabled(void);
-
-/*-------------------------------------------------------------------------*/
-
-static void lh7a404_start_hc(struct platform_device *dev)
-{
- printk(KERN_DEBUG "%s: starting LH7A404 OHCI USB Controller\n",
- __FILE__);
-
- /*
- * Now, carefully enable the USB clock, and take
- * the USB host controller out of reset.
- */
- CSC_PWRCNT |= CSC_PWRCNT_USBH_EN; /* Enable clock */
- udelay(1000);
- USBH_CMDSTATUS = OHCI_HCR;
-
- printk(KERN_DEBUG "%s: Clock to USB host has been enabled \n", __FILE__);
-}
-
-static void lh7a404_stop_hc(struct platform_device *dev)
-{
- printk(KERN_DEBUG "%s: stopping LH7A404 OHCI USB Controller\n",
- __FILE__);
-
- CSC_PWRCNT &= ~CSC_PWRCNT_USBH_EN; /* Disable clock */
-}
-
-
-/*-------------------------------------------------------------------------*/
-
-/* configure so an HC device and id are always provided */
-/* always called with process context; sleeping is OK */
-
-
-/**
- * usb_hcd_lh7a404_probe - initialize LH7A404-based HCDs
- * Context: !in_interrupt()
- *
- * Allocates basic resources for this USB host controller, and
- * then invokes the start() method for the HCD associated with it
- * through the hotplug entry's driver_data.
- *
- */
-int usb_hcd_lh7a404_probe (const struct hc_driver *driver,
- struct platform_device *dev)
-{
- int retval;
- struct usb_hcd *hcd;
-
- if (dev->resource[1].flags != IORESOURCE_IRQ) {
- pr_debug("resource[1] is not IORESOURCE_IRQ");
- return -ENOMEM;
- }
-
- hcd = usb_create_hcd(driver, &dev->dev, "lh7a404");
- if (!hcd)
- return -ENOMEM;
- hcd->rsrc_start = dev->resource[0].start;
- hcd->rsrc_len = dev->resource[0].end - dev->resource[0].start + 1;
-
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- pr_debug("request_mem_region failed");
- retval = -EBUSY;
- goto err1;
- }
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- pr_debug("ioremap failed");
- retval = -ENOMEM;
- goto err2;
- }
-
- lh7a404_start_hc(dev);
- ohci_hcd_init(hcd_to_ohci(hcd));
-
- retval = usb_add_hcd(hcd, dev->resource[1].start, IRQF_DISABLED);
- if (retval == 0)
- return retval;
-
- lh7a404_stop_hc(dev);
- iounmap(hcd->regs);
- err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- err1:
- usb_put_hcd(hcd);
- return retval;
-}
-
-
-/* may be called without controller electrically present */
-/* may be called with controller, bus, and devices active */
-
-/**
- * usb_hcd_lh7a404_remove - shutdown processing for LH7A404-based HCDs
- * @dev: USB Host Controller being removed
- * Context: !in_interrupt()
- *
- * Reverses the effect of usb_hcd_lh7a404_probe(), first invoking
- * the HCD's stop() method. It is always called from a thread
- * context, normally "rmmod", "apmd", or something similar.
- *
- */
-void usb_hcd_lh7a404_remove (struct usb_hcd *hcd, struct platform_device *dev)
-{
- usb_remove_hcd(hcd);
- lh7a404_stop_hc(dev);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
-}
-
-/*-------------------------------------------------------------------------*/
-
-static int __devinit
-ohci_lh7a404_start (struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- int ret;
-
- ohci_dbg (ohci, "ohci_lh7a404_start, ohci:%p", ohci);
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
-
- if ((ret = ohci_run (ohci)) < 0) {
- err ("can't start %s", hcd->self.bus_name);
- ohci_stop (hcd);
- return ret;
- }
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static const struct hc_driver ohci_lh7a404_hc_driver = {
- .description = hcd_name,
- .product_desc = "LH7A404 OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .start = ohci_lh7a404_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-/*-------------------------------------------------------------------------*/
-
-static int ohci_hcd_lh7a404_drv_probe(struct platform_device *pdev)
-{
- int ret;
-
- pr_debug ("In ohci_hcd_lh7a404_drv_probe");
-
- if (usb_disabled())
- return -ENODEV;
-
- ret = usb_hcd_lh7a404_probe(&ohci_lh7a404_hc_driver, pdev);
- return ret;
-}
-
-static int ohci_hcd_lh7a404_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_hcd_lh7a404_remove(hcd, pdev);
- return 0;
-}
- /*TBD*/
-/*static int ohci_hcd_lh7a404_drv_suspend(struct platform_device *dev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(dev);
-
- return 0;
-}
-static int ohci_hcd_lh7a404_drv_resume(struct platform_device *dev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(dev);
-
-
- return 0;
-}
-*/
-
-static struct platform_driver ohci_hcd_lh7a404_driver = {
- .probe = ohci_hcd_lh7a404_drv_probe,
- .remove = ohci_hcd_lh7a404_drv_remove,
- .shutdown = usb_hcd_platform_shutdown,
- /*.suspend = ohci_hcd_lh7a404_drv_suspend, */
- /*.resume = ohci_hcd_lh7a404_drv_resume, */
- .driver = {
- .name = "lh7a404-ohci",
- .owner = THIS_MODULE,
- },
-};
-
-MODULE_ALIAS("platform:lh7a404-ohci");
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
new file mode 100644
index 00000000000..ba180ed0f81
--- /dev/null
+++ b/drivers/usb/host/ohci-nxp.c
@@ -0,0 +1,351 @@
+/*
+ * driver for NXP USB Host devices
+ *
+ * Currently supported OHCI host devices:
+ * - NXP LPC32xx
+ *
+ * Authors: Dmitry Chigirev <source@mvista.com>
+ * Vitaly Wool <vitalywool@gmail.com>
+ *
+ * register initialization is based on code examples provided by Philips
+ * Copyright (c) 2005 Koninklijke Philips Electronics N.V.
+ *
+ * NOTE: This driver does not have suspend/resume functionality
+ * This driver is intended for engineering development purposes only
+ *
+ * 2005-2006 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/usb/isp1301.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
+
+
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/io.h>
+
+#include <mach/platform.h>
+#include <mach/irqs.h>
+
+#define USB_CONFIG_BASE 0x31020000
+#define PWRMAN_BASE 0x40004000
+
+#define USB_CTRL IO_ADDRESS(PWRMAN_BASE + 0x64)
+
+/* USB_CTRL bit defines */
+#define USB_SLAVE_HCLK_EN (1 << 24)
+#define USB_DEV_NEED_CLK_EN (1 << 22)
+#define USB_HOST_NEED_CLK_EN (1 << 21)
+#define PAD_CONTROL_LAST_DRIVEN (1 << 19)
+
+#define USB_OTG_STAT_CONTROL IO_ADDRESS(USB_CONFIG_BASE + 0x110)
+
+/* USB_OTG_STAT_CONTROL bit defines */
+#define TRANSPARENT_I2C_EN (1 << 7)
+#define HOST_EN (1 << 0)
+
+/* On LPC32xx, those are undefined */
+#ifndef start_int_set_falling_edge
+#define start_int_set_falling_edge(irq)
+#define start_int_set_rising_edge(irq)
+#define start_int_ack(irq)
+#define start_int_mask(irq)
+#define start_int_umask(irq)
+#endif
+
+#define DRIVER_DESC "OHCI NXP driver"
+
+static const char hcd_name[] = "ohci-nxp";
+static struct hc_driver __read_mostly ohci_nxp_hc_driver;
+
+static struct i2c_client *isp1301_i2c_client;
+
+extern int usb_disabled(void);
+
+static struct clk *usb_pll_clk;
+static struct clk *usb_dev_clk;
+static struct clk *usb_otg_clk;
+
+static void isp1301_configure_lpc32xx(void)
+{
+ /* LPC32XX only supports DAT_SE0 USB mode */
+ /* This sequence is important */
+
+ /* Disable transparent UART mode first */
+ i2c_smbus_write_byte_data(isp1301_i2c_client,
+ (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
+ MC1_UART_EN);
+ i2c_smbus_write_byte_data(isp1301_i2c_client,
+ (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
+ ~MC1_SPEED_REG);
+ i2c_smbus_write_byte_data(isp1301_i2c_client,
+ ISP1301_I2C_MODE_CONTROL_1, MC1_SPEED_REG);
+ i2c_smbus_write_byte_data(isp1301_i2c_client,
+ (ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR),
+ ~0);
+ i2c_smbus_write_byte_data(isp1301_i2c_client,
+ ISP1301_I2C_MODE_CONTROL_2,
+ (MC2_BI_DI | MC2_PSW_EN | MC2_SPD_SUSP_CTRL));
+ i2c_smbus_write_byte_data(isp1301_i2c_client,
+ (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
+ i2c_smbus_write_byte_data(isp1301_i2c_client,
+ ISP1301_I2C_MODE_CONTROL_1, MC1_DAT_SE0);
+ i2c_smbus_write_byte_data(isp1301_i2c_client,
+ ISP1301_I2C_OTG_CONTROL_1,
+ (OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN));
+ i2c_smbus_write_byte_data(isp1301_i2c_client,
+ (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
+ (OTG1_DM_PULLUP | OTG1_DP_PULLUP));
+ i2c_smbus_write_byte_data(isp1301_i2c_client,
+ ISP1301_I2C_INTERRUPT_LATCH | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
+ i2c_smbus_write_byte_data(isp1301_i2c_client,
+ ISP1301_I2C_INTERRUPT_FALLING | ISP1301_I2C_REG_CLEAR_ADDR,
+ ~0);
+ i2c_smbus_write_byte_data(isp1301_i2c_client,
+ ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
+
+ /* Enable usb_need_clk clock after transceiver is initialized */
+ __raw_writel(__raw_readl(USB_CTRL) | USB_HOST_NEED_CLK_EN, USB_CTRL);
+
+ printk(KERN_INFO "ISP1301 Vendor ID : 0x%04x\n",
+ i2c_smbus_read_word_data(isp1301_i2c_client, 0x00));
+ printk(KERN_INFO "ISP1301 Product ID : 0x%04x\n",
+ i2c_smbus_read_word_data(isp1301_i2c_client, 0x02));
+ printk(KERN_INFO "ISP1301 Version ID : 0x%04x\n",
+ i2c_smbus_read_word_data(isp1301_i2c_client, 0x14));
+}
+
+static void isp1301_configure(void)
+{
+ isp1301_configure_lpc32xx();
+}
+
+static inline void isp1301_vbus_on(void)
+{
+ i2c_smbus_write_byte_data(isp1301_i2c_client, ISP1301_I2C_OTG_CONTROL_1,
+ OTG1_VBUS_DRV);
+}
+
+static inline void isp1301_vbus_off(void)
+{
+ i2c_smbus_write_byte_data(isp1301_i2c_client,
+ ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
+ OTG1_VBUS_DRV);
+}
+
+static void ohci_nxp_start_hc(void)
+{
+ unsigned long tmp = __raw_readl(USB_OTG_STAT_CONTROL) | HOST_EN;
+ __raw_writel(tmp, USB_OTG_STAT_CONTROL);
+ isp1301_vbus_on();
+}
+
+static void ohci_nxp_stop_hc(void)
+{
+ unsigned long tmp;
+ isp1301_vbus_off();
+ tmp = __raw_readl(USB_OTG_STAT_CONTROL) & ~HOST_EN;
+ __raw_writel(tmp, USB_OTG_STAT_CONTROL);
+}
+
+static int ohci_hcd_nxp_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = 0;
+ const struct hc_driver *driver = &ohci_nxp_hc_driver;
+ struct resource *res;
+ int ret = 0, irq;
+ struct device_node *isp1301_node;
+
+ if (pdev->dev.of_node) {
+ isp1301_node = of_parse_phandle(pdev->dev.of_node,
+ "transceiver", 0);
+ } else {
+ isp1301_node = NULL;
+ }
+
+ isp1301_i2c_client = isp1301_get_client(isp1301_node);
+ if (!isp1301_i2c_client) {
+ return -EPROBE_DEFER;
+ }
+
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail_disable;
+
+ dev_dbg(&pdev->dev, "%s: " DRIVER_DESC " (nxp)\n", hcd_name);
+ if (usb_disabled()) {
+ dev_err(&pdev->dev, "USB is disabled\n");
+ ret = -ENODEV;
+ goto fail_disable;
+ }
+
+ /* Enable AHB slave USB clock, needed for further USB clock control */
+ __raw_writel(USB_SLAVE_HCLK_EN | PAD_CONTROL_LAST_DRIVEN, USB_CTRL);
+
+ /* Enable USB PLL */
+ usb_pll_clk = devm_clk_get(&pdev->dev, "ck_pll5");
+ if (IS_ERR(usb_pll_clk)) {
+ dev_err(&pdev->dev, "failed to acquire USB PLL\n");
+ ret = PTR_ERR(usb_pll_clk);
+ goto fail_disable;
+ }
+
+ ret = clk_enable(usb_pll_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to start USB PLL\n");
+ goto fail_disable;
+ }
+
+ ret = clk_set_rate(usb_pll_clk, 48000);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to set USB clock rate\n");
+ goto fail_rate;
+ }
+
+ /* Enable USB device clock */
+ usb_dev_clk = devm_clk_get(&pdev->dev, "ck_usbd");
+ if (IS_ERR(usb_dev_clk)) {
+ dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n");
+ ret = PTR_ERR(usb_dev_clk);
+ goto fail_rate;
+ }
+
+ ret = clk_enable(usb_dev_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to start USB DEV Clock\n");
+ goto fail_rate;
+ }
+
+ /* Enable USB otg clocks */
+ usb_otg_clk = devm_clk_get(&pdev->dev, "ck_usb_otg");
+ if (IS_ERR(usb_otg_clk)) {
+ dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n");
+ ret = PTR_ERR(usb_otg_clk);
+ goto fail_otg;
+ }
+
+ __raw_writel(__raw_readl(USB_CTRL) | USB_HOST_NEED_CLK_EN, USB_CTRL);
+
+ ret = clk_enable(usb_otg_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to start USB DEV Clock\n");
+ goto fail_otg;
+ }
+
+ isp1301_configure();
+
+ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ dev_err(&pdev->dev, "Failed to allocate HC buffer\n");
+ ret = -ENOMEM;
+ goto fail_hcd;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
+ goto fail_resource;
+ }
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = -ENXIO;
+ goto fail_resource;
+ }
+
+ ohci_nxp_start_hc();
+ platform_set_drvdata(pdev, hcd);
+
+ dev_info(&pdev->dev, "at 0x%p, irq %d\n", hcd->regs, hcd->irq);
+ ret = usb_add_hcd(hcd, irq, 0);
+ if (ret == 0) {
+ device_wakeup_enable(hcd->self.controller);
+ return ret;
+ }
+
+ ohci_nxp_stop_hc();
+fail_resource:
+ usb_put_hcd(hcd);
+fail_hcd:
+ clk_disable(usb_otg_clk);
+fail_otg:
+ clk_disable(usb_dev_clk);
+fail_rate:
+ clk_disable(usb_pll_clk);
+fail_disable:
+ isp1301_i2c_client = NULL;
+ return ret;
+}
+
+static int ohci_hcd_nxp_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_remove_hcd(hcd);
+ ohci_nxp_stop_hc();
+ usb_put_hcd(hcd);
+ clk_disable(usb_pll_clk);
+ clk_disable(usb_dev_clk);
+ i2c_unregister_device(isp1301_i2c_client);
+ isp1301_i2c_client = NULL;
+
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:usb-ohci");
+
+#ifdef CONFIG_OF
+static const struct of_device_id ohci_hcd_nxp_match[] = {
+ { .compatible = "nxp,ohci-nxp" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ohci_hcd_nxp_match);
+#endif
+
+static struct platform_driver ohci_hcd_nxp_driver = {
+ .driver = {
+ .name = "usb-ohci",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ohci_hcd_nxp_match),
+ },
+ .probe = ohci_hcd_nxp_probe,
+ .remove = ohci_hcd_nxp_remove,
+};
+
+static int __init ohci_nxp_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ohci_init_driver(&ohci_nxp_hc_driver, NULL);
+ return platform_driver_register(&ohci_hcd_nxp_driver);
+}
+module_init(ohci_nxp_init);
+
+static void __exit ohci_nxp_cleanup(void)
+{
+ platform_driver_unregister(&ohci_hcd_nxp_driver);
+}
+module_exit(ohci_nxp_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ohci-octeon.c b/drivers/usb/host/ohci-octeon.c
index e4ddfaf8870..15af8954085 100644
--- a/drivers/usb/host/ohci-octeon.c
+++ b/drivers/usb/host/ohci-octeon.c
@@ -42,7 +42,7 @@ static void ohci_octeon_hw_stop(void)
octeon2_usb_clocks_stop();
}
-static int __devinit ohci_octeon_start(struct usb_hcd *hcd)
+static int ohci_octeon_start(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int ret;
@@ -127,30 +127,23 @@ static int ohci_octeon_drv_probe(struct platform_device *pdev)
}
/* Ohci is a 32-bit device. */
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
hcd = usb_create_hcd(&ohci_octeon_hc_driver, &pdev->dev, "octeon");
if (!hcd)
return -ENOMEM;
hcd->rsrc_start = res_mem->start;
- hcd->rsrc_len = res_mem->end - res_mem->start + 1;
+ hcd->rsrc_len = resource_size(res_mem);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
- OCTEON_OHCI_HCD_NAME)) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
- ret = -EBUSY;
+ reg_base = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(reg_base)) {
+ ret = PTR_ERR(reg_base);
goto err1;
}
- reg_base = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!reg_base) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto err2;
- }
-
ohci_octeon_hw_start();
hcd->regs = reg_base;
@@ -164,22 +157,21 @@ static int ohci_octeon_drv_probe(struct platform_device *pdev)
ohci_hcd_init(ohci);
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret) {
dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
- goto err3;
+ goto err2;
}
+ device_wakeup_enable(hcd->self.controller);
+
platform_set_drvdata(pdev, hcd);
return 0;
-err3:
+err2:
ohci_octeon_hw_stop();
- iounmap(hcd->regs);
-err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err1:
usb_put_hcd(hcd);
return ret;
@@ -192,12 +184,8 @@ static int ohci_octeon_drv_remove(struct platform_device *pdev)
usb_remove_hcd(hcd);
ohci_octeon_hw_stop();
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 5645f70b921..c923cafcaca 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -14,20 +14,30 @@
* This file is licenced under the GPL.
*/
-#include <linux/signal.h> /* IRQF_DISABLED */
-#include <linux/jiffies.h>
-#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb/otg.h>
+#include <linux/platform_device.h>
+#include <linux/signal.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
-#include <mach/hardware.h>
#include <asm/io.h>
#include <asm/mach-types.h>
-#include <plat/mux.h>
+#include <mach/mux.h>
+
+#include <mach/hardware.h>
#include <mach/irqs.h>
-#include <plat/fpga.h>
-#include <plat/usb.h>
+#include <mach/usb.h>
/* OMAP-1510 OHCI has its own MMU for DMA */
@@ -41,10 +51,7 @@
#define OMAP1510_LB_MMU_RAM_H 0xfffec234
#define OMAP1510_LB_MMU_RAM_L 0xfffec238
-
-#ifndef CONFIG_ARCH_OMAP
-#error "This file is OMAP bus glue. CONFIG_OMAP must be defined."
-#endif
+#define DRIVER_DESC "OHCI OMAP driver"
#ifdef CONFIG_TPS65010
#include <linux/i2c/tps65010.h>
@@ -67,8 +74,9 @@ extern int ocpi_enable(void);
static struct clk *usb_host_ck;
static struct clk *usb_dc_ck;
-static int host_enabled;
-static int host_initialized;
+
+static const char hcd_name[] = "ohci-omap";
+static struct hc_driver __read_mostly ohci_omap_hc_driver;
static void omap_ohci_clock_power(int on)
{
@@ -91,14 +99,14 @@ static int omap_ohci_transceiver_power(int on)
{
if (on) {
if (machine_is_omap_innovator() && cpu_is_omap1510())
- fpga_write(fpga_read(INNOVATOR_FPGA_CAM_USB_CONTROL)
+ __raw_writeb(__raw_readb(INNOVATOR_FPGA_CAM_USB_CONTROL)
| ((1 << 5/*usb1*/) | (1 << 3/*usb2*/)),
INNOVATOR_FPGA_CAM_USB_CONTROL);
else if (machine_is_omap_osk())
tps65010_set_gpio_out_value(GPIO1, LOW);
} else {
if (machine_is_omap_innovator() && cpu_is_omap1510())
- fpga_write(fpga_read(INNOVATOR_FPGA_CAM_USB_CONTROL)
+ __raw_writeb(__raw_readb(INNOVATOR_FPGA_CAM_USB_CONTROL)
& ~((1 << 5/*usb1*/) | (1 << 3/*usb2*/)),
INNOVATOR_FPGA_CAM_USB_CONTROL);
else if (machine_is_omap_osk())
@@ -167,14 +175,15 @@ static int omap_1510_local_bus_init(void)
static void start_hnp(struct ohci_hcd *ohci)
{
- const unsigned port = ohci_to_hcd(ohci)->self.otg_port - 1;
+ struct usb_hcd *hcd = ohci_to_hcd(ohci);
+ const unsigned port = hcd->self.otg_port - 1;
unsigned long flags;
u32 l;
- otg_start_hnp(ohci->transceiver);
+ otg_start_hnp(hcd->phy->otg);
local_irq_save(flags);
- ohci->transceiver->state = OTG_STATE_A_SUSPEND;
+ hcd->phy->state = OTG_STATE_A_SUSPEND;
writel (RH_PS_PSS, &ohci->regs->roothub.portstatus [port]);
l = omap_readl(OTG_CTRL);
l &= ~OTG_A_BUSREQ;
@@ -186,43 +195,43 @@ static void start_hnp(struct ohci_hcd *ohci)
/*-------------------------------------------------------------------------*/
-static int ohci_omap_init(struct usb_hcd *hcd)
+static int ohci_omap_reset(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- struct omap_usb_config *config = hcd->self.controller->platform_data;
+ struct omap_usb_config *config = dev_get_platdata(hcd->self.controller);
int need_transceiver = (config->otg != 0);
int ret;
dev_dbg(hcd->self.controller, "starting USB Controller\n");
if (config->otg) {
- ohci_to_hcd(ohci)->self.otg_port = config->otg;
+ hcd->self.otg_port = config->otg;
/* default/minimum OTG power budget: 8 mA */
- ohci_to_hcd(ohci)->power_budget = 8;
+ hcd->power_budget = 8;
}
/* boards can use OTG transceivers in non-OTG modes */
need_transceiver = need_transceiver
|| machine_is_omap_h2() || machine_is_omap_h3();
- if (cpu_is_omap16xx())
- ocpi_enable();
+ /* XXX OMAP16xx only */
+ if (config->ocpi_enable)
+ config->ocpi_enable();
#ifdef CONFIG_USB_OTG
if (need_transceiver) {
- ohci->transceiver = otg_get_transceiver();
- if (ohci->transceiver) {
- int status = otg_set_host(ohci->transceiver,
+ hcd->phy = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (!IS_ERR_OR_NULL(hcd->phy)) {
+ int status = otg_set_host(hcd->phy->otg,
&ohci_to_hcd(ohci)->self);
- dev_dbg(hcd->self.controller, "init %s transceiver, status %d\n",
- ohci->transceiver->label, status);
+ dev_dbg(hcd->self.controller, "init %s phy, status %d\n",
+ hcd->phy->label, status);
if (status) {
- if (ohci->transceiver)
- put_device(ohci->transceiver->dev);
+ usb_put_phy(hcd->phy);
return status;
}
} else {
- dev_err(hcd->self.controller, "can't find transceiver\n");
+ dev_err(hcd->self.controller, "can't find phy\n");
return -ENODEV;
}
ohci->start_hnp = start_hnp;
@@ -236,9 +245,15 @@ static int ohci_omap_init(struct usb_hcd *hcd)
omap_1510_local_bus_init();
}
- if ((ret = ohci_init(ohci)) < 0)
+ ret = ohci_setup(hcd);
+ if (ret < 0)
return ret;
+ if (config->otg || config->rwc) {
+ ohci->hc_control = OHCI_CTRL_RWC;
+ writel(OHCI_CTRL_RWC, &ohci->regs->control);
+ }
+
/* board-specific power switching and overcurrent support */
if (machine_is_omap_osk() || machine_is_omap_innovator()) {
u32 rh = roothub_a (ohci);
@@ -279,14 +294,6 @@ static int ohci_omap_init(struct usb_hcd *hcd)
return 0;
}
-static void ohci_omap_stop(struct usb_hcd *hcd)
-{
- dev_dbg(hcd->self.controller, "stopping USB Controller\n");
- ohci_stop(hcd);
- omap_ohci_clock_power(0);
-}
-
-
/*-------------------------------------------------------------------------*/
/**
@@ -302,17 +309,16 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver,
{
int retval, irq;
struct usb_hcd *hcd = 0;
- struct ohci_hcd *ohci;
if (pdev->num_resources != 2) {
- printk(KERN_ERR "hcd probe: invalid num_resources: %i\n",
+ dev_err(&pdev->dev, "invalid num_resources: %i\n",
pdev->num_resources);
return -ENODEV;
}
if (pdev->resource[0].flags != IORESOURCE_MEM
|| pdev->resource[1].flags != IORESOURCE_IRQ) {
- printk(KERN_ERR "hcd probe: invalid resource type\n");
+ dev_err(&pdev->dev, "invalid resource type\n");
return -ENODEV;
}
@@ -352,26 +358,16 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver,
goto err2;
}
- ohci = hcd_to_ohci(hcd);
- ohci_hcd_init(ohci);
-
- host_initialized = 0;
- host_enabled = 1;
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
retval = -ENXIO;
goto err3;
}
- retval = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ retval = usb_add_hcd(hcd, irq, 0);
if (retval)
goto err3;
- host_initialized = 1;
-
- if (!host_enabled)
- omap_ohci_clock_power(0);
-
+ device_wakeup_enable(hcd->self.controller);
return 0;
err3:
iounmap(hcd->regs);
@@ -400,12 +396,12 @@ err0:
static inline void
usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev)
{
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
-
+ dev_dbg(hcd->self.controller, "stopping USB Controller\n");
usb_remove_hcd(hcd);
- if (ohci->transceiver) {
- (void) otg_set_host(ohci->transceiver, 0);
- put_device(ohci->transceiver->dev);
+ omap_ohci_clock_power(0);
+ if (!IS_ERR_OR_NULL(hcd->phy)) {
+ (void) otg_set_host(hcd->phy->otg, 0);
+ usb_put_phy(hcd->phy);
}
if (machine_is_omap_osk())
gpio_free(9);
@@ -418,76 +414,6 @@ usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev)
/*-------------------------------------------------------------------------*/
-static int
-ohci_omap_start (struct usb_hcd *hcd)
-{
- struct omap_usb_config *config;
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- int ret;
-
- if (!host_enabled)
- return 0;
- config = hcd->self.controller->platform_data;
- if (config->otg || config->rwc) {
- ohci->hc_control = OHCI_CTRL_RWC;
- writel(OHCI_CTRL_RWC, &ohci->regs->control);
- }
-
- if ((ret = ohci_run (ohci)) < 0) {
- dev_err(hcd->self.controller, "can't start\n");
- ohci_stop (hcd);
- return ret;
- }
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static const struct hc_driver ohci_omap_hc_driver = {
- .description = hcd_name,
- .product_desc = "OMAP OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .reset = ohci_omap_init,
- .start = ohci_omap_start,
- .stop = ohci_omap_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-/*-------------------------------------------------------------------------*/
-
static int ohci_hcd_omap_drv_probe(struct platform_device *dev)
{
return usb_hcd_omap_probe(&ohci_omap_hc_driver, dev);
@@ -498,7 +424,6 @@ static int ohci_hcd_omap_drv_remove(struct platform_device *dev)
struct usb_hcd *hcd = platform_get_drvdata(dev);
usb_hcd_omap_remove(hcd, dev);
- platform_set_drvdata(dev, NULL);
return 0;
}
@@ -507,17 +432,23 @@ static int ohci_hcd_omap_drv_remove(struct platform_device *dev)
#ifdef CONFIG_PM
-static int ohci_omap_suspend(struct platform_device *dev, pm_message_t message)
+static int ohci_omap_suspend(struct platform_device *pdev, pm_message_t message)
{
- struct ohci_hcd *ohci = hcd_to_ohci(platform_get_drvdata(dev));
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ bool do_wakeup = device_may_wakeup(&pdev->dev);
+ int ret;
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
+ ret = ohci_suspend(hcd, do_wakeup);
+ if (ret)
+ return ret;
+
omap_ohci_clock_power(0);
- ohci_to_hcd(ohci)->state = HC_STATE_SUSPENDED;
- return 0;
+ return ret;
}
static int ohci_omap_resume(struct platform_device *dev)
@@ -530,7 +461,7 @@ static int ohci_omap_resume(struct platform_device *dev)
ohci->next_statechange = jiffies;
omap_ohci_clock_power(1);
- ohci_finish_controller_resume(hcd);
+ ohci_resume(hcd, false);
return 0;
}
@@ -555,4 +486,29 @@ static struct platform_driver ohci_hcd_omap_driver = {
},
};
+static const struct ohci_driver_overrides omap_overrides __initconst = {
+ .product_desc = "OMAP OHCI",
+ .reset = ohci_omap_reset
+};
+
+static int __init ohci_omap_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ohci_init_driver(&ohci_omap_hc_driver, &omap_overrides);
+ return platform_driver_register(&ohci_hcd_omap_driver);
+}
+module_init(ohci_omap_init);
+
+static void __exit ohci_omap_cleanup(void)
+{
+ platform_driver_unregister(&ohci_hcd_omap_driver);
+}
+module_exit(ohci_omap_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_ALIAS("platform:ohci");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
index 2cc8a504b18..ec15aebe878 100644
--- a/drivers/usb/host/ohci-omap3.c
+++ b/drivers/usb/host/ohci-omap3.c
@@ -7,6 +7,7 @@
* Copyright (C) 2007-2010 Texas Instruments, Inc.
* Author: Vikram Pandita <vikram.pandita@ti.com>
* Author: Anand Gadiyar <gadiyar@ti.com>
+ * Author: Keshava Munegowda <keshava_mgowda@ti.com>
*
* Based on ehci-omap.c and some other ohci glue layers
*
@@ -24,547 +25,26 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
- * TODO (last updated Mar 10th, 2010):
+ * TODO (last updated Feb 27, 2011):
* - add kernel-doc
- * - Factor out code common to EHCI to a separate file
- * - Make EHCI and OHCI coexist together
- * - needs newer silicon versions to actually work
- * - the last one to be loaded currently steps on the other's toes
- * - Add hooks for configuring transceivers, etc. at init/exit
- * - Add aggressive clock-management code
*/
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/usb/otg.h>
#include <linux/platform_device.h>
-#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
-#include <plat/usb.h>
+#include "ohci.h"
-/*
- * OMAP USBHOST Register addresses: VIRTUAL ADDRESSES
- * Use ohci_omap_readl()/ohci_omap_writel() functions
- */
-
-/* TLL Register Set */
-#define OMAP_USBTLL_REVISION (0x00)
-#define OMAP_USBTLL_SYSCONFIG (0x10)
-#define OMAP_USBTLL_SYSCONFIG_CACTIVITY (1 << 8)
-#define OMAP_USBTLL_SYSCONFIG_SIDLEMODE (1 << 3)
-#define OMAP_USBTLL_SYSCONFIG_ENAWAKEUP (1 << 2)
-#define OMAP_USBTLL_SYSCONFIG_SOFTRESET (1 << 1)
-#define OMAP_USBTLL_SYSCONFIG_AUTOIDLE (1 << 0)
-
-#define OMAP_USBTLL_SYSSTATUS (0x14)
-#define OMAP_USBTLL_SYSSTATUS_RESETDONE (1 << 0)
-
-#define OMAP_USBTLL_IRQSTATUS (0x18)
-#define OMAP_USBTLL_IRQENABLE (0x1C)
-
-#define OMAP_TLL_SHARED_CONF (0x30)
-#define OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN (1 << 6)
-#define OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN (1 << 5)
-#define OMAP_TLL_SHARED_CONF_USB_DIVRATION (1 << 2)
-#define OMAP_TLL_SHARED_CONF_FCLK_REQ (1 << 1)
-#define OMAP_TLL_SHARED_CONF_FCLK_IS_ON (1 << 0)
-
-#define OMAP_TLL_CHANNEL_CONF(num) (0x040 + 0x004 * num)
-#define OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT 24
-#define OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF (1 << 11)
-#define OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE (1 << 10)
-#define OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE (1 << 9)
-#define OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE (1 << 8)
-#define OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS (1 << 1)
-#define OMAP_TLL_CHANNEL_CONF_CHANEN (1 << 0)
-
-#define OMAP_TLL_CHANNEL_COUNT 3
-
-/* UHH Register Set */
-#define OMAP_UHH_REVISION (0x00)
-#define OMAP_UHH_SYSCONFIG (0x10)
-#define OMAP_UHH_SYSCONFIG_MIDLEMODE (1 << 12)
-#define OMAP_UHH_SYSCONFIG_CACTIVITY (1 << 8)
-#define OMAP_UHH_SYSCONFIG_SIDLEMODE (1 << 3)
-#define OMAP_UHH_SYSCONFIG_ENAWAKEUP (1 << 2)
-#define OMAP_UHH_SYSCONFIG_SOFTRESET (1 << 1)
-#define OMAP_UHH_SYSCONFIG_AUTOIDLE (1 << 0)
-
-#define OMAP_UHH_SYSSTATUS (0x14)
-#define OMAP_UHH_SYSSTATUS_UHHRESETDONE (1 << 0)
-#define OMAP_UHH_SYSSTATUS_OHCIRESETDONE (1 << 1)
-#define OMAP_UHH_SYSSTATUS_EHCIRESETDONE (1 << 2)
-#define OMAP_UHH_HOSTCONFIG (0x40)
-#define OMAP_UHH_HOSTCONFIG_ULPI_BYPASS (1 << 0)
-#define OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS (1 << 0)
-#define OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS (1 << 11)
-#define OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS (1 << 12)
-#define OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN (1 << 2)
-#define OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN (1 << 3)
-#define OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN (1 << 4)
-#define OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN (1 << 5)
-#define OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS (1 << 8)
-#define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS (1 << 9)
-#define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS (1 << 10)
-
-#define OMAP_UHH_DEBUG_CSR (0x44)
-
-/*-------------------------------------------------------------------------*/
-
-static inline void ohci_omap_writel(void __iomem *base, u32 reg, u32 val)
-{
- __raw_writel(val, base + reg);
-}
-
-static inline u32 ohci_omap_readl(void __iomem *base, u32 reg)
-{
- return __raw_readl(base + reg);
-}
-
-static inline void ohci_omap_writeb(void __iomem *base, u8 reg, u8 val)
-{
- __raw_writeb(val, base + reg);
-}
-
-static inline u8 ohci_omap_readb(void __iomem *base, u8 reg)
-{
- return __raw_readb(base + reg);
-}
-
-/*-------------------------------------------------------------------------*/
-
-struct ohci_hcd_omap3 {
- struct ohci_hcd *ohci;
- struct device *dev;
-
- struct clk *usbhost_ick;
- struct clk *usbhost2_120m_fck;
- struct clk *usbhost1_48m_fck;
- struct clk *usbtll_fck;
- struct clk *usbtll_ick;
-
- /* port_mode: TLL/PHY, 2/3/4/6-PIN, DP-DM/DAT-SE0 */
- enum ohci_omap3_port_mode port_mode[OMAP3_HS_USB_PORTS];
- void __iomem *uhh_base;
- void __iomem *tll_base;
- void __iomem *ohci_base;
-
- unsigned es2_compatibility:1;
-};
-
-/*-------------------------------------------------------------------------*/
-
-static void ohci_omap3_clock_power(struct ohci_hcd_omap3 *omap, int on)
-{
- if (on) {
- clk_enable(omap->usbtll_ick);
- clk_enable(omap->usbtll_fck);
- clk_enable(omap->usbhost_ick);
- clk_enable(omap->usbhost1_48m_fck);
- clk_enable(omap->usbhost2_120m_fck);
- } else {
- clk_disable(omap->usbhost2_120m_fck);
- clk_disable(omap->usbhost1_48m_fck);
- clk_disable(omap->usbhost_ick);
- clk_disable(omap->usbtll_fck);
- clk_disable(omap->usbtll_ick);
- }
-}
-
-static int ohci_omap3_init(struct usb_hcd *hcd)
-{
- dev_dbg(hcd->self.controller, "starting OHCI controller\n");
-
- return ohci_init(hcd_to_ohci(hcd));
-}
-
-
-/*-------------------------------------------------------------------------*/
-
-static int ohci_omap3_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int ret;
-
- /*
- * RemoteWakeupConnected has to be set explicitly before
- * calling ohci_run. The reset value of RWC is 0.
- */
- ohci->hc_control = OHCI_CTRL_RWC;
- writel(OHCI_CTRL_RWC, &ohci->regs->control);
-
- ret = ohci_run(ohci);
-
- if (ret < 0) {
- dev_err(hcd->self.controller, "can't start\n");
- ohci_stop(hcd);
- }
-
- return ret;
-}
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * convert the port-mode enum to a value we can use in the FSLSMODE
- * field of USBTLL_CHANNEL_CONF
- */
-static unsigned ohci_omap3_fslsmode(enum ohci_omap3_port_mode mode)
-{
- switch (mode) {
- case OMAP_OHCI_PORT_MODE_UNUSED:
- case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0:
- return 0x0;
-
- case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM:
- return 0x1;
-
- case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0:
- return 0x2;
-
- case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM:
- return 0x3;
-
- case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0:
- return 0x4;
-
- case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM:
- return 0x5;
-
- case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0:
- return 0x6;
-
- case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM:
- return 0x7;
-
- case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0:
- return 0xA;
-
- case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM:
- return 0xB;
- default:
- pr_warning("Invalid port mode, using default\n");
- return 0x0;
- }
-}
-
-static void ohci_omap3_tll_config(struct ohci_hcd_omap3 *omap)
-{
- u32 reg;
- int i;
-
- /* Program TLL SHARED CONF */
- reg = ohci_omap_readl(omap->tll_base, OMAP_TLL_SHARED_CONF);
- reg &= ~OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN;
- reg &= ~OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN;
- reg |= OMAP_TLL_SHARED_CONF_USB_DIVRATION;
- reg |= OMAP_TLL_SHARED_CONF_FCLK_IS_ON;
- ohci_omap_writel(omap->tll_base, OMAP_TLL_SHARED_CONF, reg);
-
- /* Program each TLL channel */
- /*
- * REVISIT: Only the 3-pin and 4-pin PHY modes have
- * actually been tested.
- */
- for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) {
-
- /* Enable only those channels that are actually used */
- if (omap->port_mode[i] == OMAP_OHCI_PORT_MODE_UNUSED)
- continue;
-
- reg = ohci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
- reg |= ohci_omap3_fslsmode(omap->port_mode[i])
- << OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT;
- reg |= OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS;
- reg |= OMAP_TLL_CHANNEL_CONF_CHANEN;
- ohci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg);
- }
-}
-
-/* omap3_start_ohci
- * - Start the TI USBHOST controller
- */
-static int omap3_start_ohci(struct ohci_hcd_omap3 *omap, struct usb_hcd *hcd)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
- u32 reg = 0;
- int ret = 0;
-
- dev_dbg(omap->dev, "starting TI OHCI USB Controller\n");
-
- /* Get all the clock handles we need */
- omap->usbhost_ick = clk_get(omap->dev, "usbhost_ick");
- if (IS_ERR(omap->usbhost_ick)) {
- dev_err(omap->dev, "could not get usbhost_ick\n");
- ret = PTR_ERR(omap->usbhost_ick);
- goto err_host_ick;
- }
-
- omap->usbhost2_120m_fck = clk_get(omap->dev, "usbhost_120m_fck");
- if (IS_ERR(omap->usbhost2_120m_fck)) {
- dev_err(omap->dev, "could not get usbhost_120m_fck\n");
- ret = PTR_ERR(omap->usbhost2_120m_fck);
- goto err_host_120m_fck;
- }
-
- omap->usbhost1_48m_fck = clk_get(omap->dev, "usbhost_48m_fck");
- if (IS_ERR(omap->usbhost1_48m_fck)) {
- dev_err(omap->dev, "could not get usbhost_48m_fck\n");
- ret = PTR_ERR(omap->usbhost1_48m_fck);
- goto err_host_48m_fck;
- }
-
- omap->usbtll_fck = clk_get(omap->dev, "usbtll_fck");
- if (IS_ERR(omap->usbtll_fck)) {
- dev_err(omap->dev, "could not get usbtll_fck\n");
- ret = PTR_ERR(omap->usbtll_fck);
- goto err_tll_fck;
- }
-
- omap->usbtll_ick = clk_get(omap->dev, "usbtll_ick");
- if (IS_ERR(omap->usbtll_ick)) {
- dev_err(omap->dev, "could not get usbtll_ick\n");
- ret = PTR_ERR(omap->usbtll_ick);
- goto err_tll_ick;
- }
-
- /* Now enable all the clocks in the correct order */
- ohci_omap3_clock_power(omap, 1);
-
- /* perform TLL soft reset, and wait until reset is complete */
- ohci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
- OMAP_USBTLL_SYSCONFIG_SOFTRESET);
-
- /* Wait for TLL reset to complete */
- while (!(ohci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
- & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
- cpu_relax();
-
- if (time_after(jiffies, timeout)) {
- dev_dbg(omap->dev, "operation timed out\n");
- ret = -EINVAL;
- goto err_sys_status;
- }
- }
-
- dev_dbg(omap->dev, "TLL reset done\n");
-
- /* (1<<3) = no idle mode only for initial debugging */
- ohci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
- OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
- OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
- OMAP_USBTLL_SYSCONFIG_CACTIVITY);
-
-
- /* Put UHH in NoIdle/NoStandby mode */
- reg = ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG);
- reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
- | OMAP_UHH_SYSCONFIG_SIDLEMODE
- | OMAP_UHH_SYSCONFIG_CACTIVITY
- | OMAP_UHH_SYSCONFIG_MIDLEMODE);
- reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
- reg &= ~OMAP_UHH_SYSCONFIG_SOFTRESET;
-
- ohci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
-
- reg = ohci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
-
- /* setup ULPI bypass and burst configurations */
- reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
- | OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN
- | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN);
- reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN;
-
- /*
- * REVISIT: Pi_CONNECT_STATUS controls MStandby
- * assertion and Swakeup generation - let us not
- * worry about this for now. OMAP HWMOD framework
- * might take care of this later. If not, we can
- * update these registers when adding aggressive
- * clock management code.
- *
- * For now, turn off all the Pi_CONNECT_STATUS bits
- *
- if (omap->port_mode[0] == OMAP_OHCI_PORT_MODE_UNUSED)
- reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
- if (omap->port_mode[1] == OMAP_OHCI_PORT_MODE_UNUSED)
- reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
- if (omap->port_mode[2] == OMAP_OHCI_PORT_MODE_UNUSED)
- reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
- */
- reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
- reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
- reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
-
- if (omap->es2_compatibility) {
- /*
- * All OHCI modes need to go through the TLL,
- * unlike in the EHCI case. So use UTMI mode
- * for all ports for OHCI, on ES2.x silicon
- */
- dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1\n");
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
- } else {
- dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n");
- if (omap->port_mode[0] == OMAP_OHCI_PORT_MODE_UNUSED)
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
- else
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
-
- if (omap->port_mode[1] == OMAP_OHCI_PORT_MODE_UNUSED)
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
- else
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
-
- if (omap->port_mode[2] == OMAP_OHCI_PORT_MODE_UNUSED)
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
- else
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
-
- }
- ohci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg);
- dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
-
- ohci_omap3_tll_config(omap);
-
- return 0;
-
-err_sys_status:
- ohci_omap3_clock_power(omap, 0);
- clk_put(omap->usbtll_ick);
-
-err_tll_ick:
- clk_put(omap->usbtll_fck);
-
-err_tll_fck:
- clk_put(omap->usbhost1_48m_fck);
-
-err_host_48m_fck:
- clk_put(omap->usbhost2_120m_fck);
-
-err_host_120m_fck:
- clk_put(omap->usbhost_ick);
-
-err_host_ick:
- return ret;
-}
-
-static void omap3_stop_ohci(struct ohci_hcd_omap3 *omap, struct usb_hcd *hcd)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(100);
-
- dev_dbg(omap->dev, "stopping TI EHCI USB Controller\n");
-
- /* Reset USBHOST for insmod/rmmod to work */
- ohci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG,
- OMAP_UHH_SYSCONFIG_SOFTRESET);
- while (!(ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & OMAP_UHH_SYSSTATUS_UHHRESETDONE)) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(omap->dev, "operation timed out\n");
- }
-
- while (!(ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & OMAP_UHH_SYSSTATUS_OHCIRESETDONE)) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(omap->dev, "operation timed out\n");
- }
-
- while (!(ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & OMAP_UHH_SYSSTATUS_EHCIRESETDONE)) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(omap->dev, "operation timed out\n");
- }
-
- ohci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1));
-
- while (!(ohci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
- & (1 << 0))) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(omap->dev, "operation timed out\n");
- }
+#define DRIVER_DESC "OHCI OMAP3 driver"
- ohci_omap3_clock_power(omap, 0);
-
- if (omap->usbtll_fck != NULL) {
- clk_put(omap->usbtll_fck);
- omap->usbtll_fck = NULL;
- }
-
- if (omap->usbhost_ick != NULL) {
- clk_put(omap->usbhost_ick);
- omap->usbhost_ick = NULL;
- }
-
- if (omap->usbhost1_48m_fck != NULL) {
- clk_put(omap->usbhost1_48m_fck);
- omap->usbhost1_48m_fck = NULL;
- }
-
- if (omap->usbhost2_120m_fck != NULL) {
- clk_put(omap->usbhost2_120m_fck);
- omap->usbhost2_120m_fck = NULL;
- }
-
- if (omap->usbtll_ick != NULL) {
- clk_put(omap->usbtll_ick);
- omap->usbtll_ick = NULL;
- }
-
- dev_dbg(omap->dev, "Clock to USB host has been disabled\n");
-}
-
-/*-------------------------------------------------------------------------*/
-
-static const struct hc_driver ohci_omap3_hc_driver = {
- .description = hcd_name,
- .product_desc = "OMAP3 OHCI Host Controller",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .reset = ohci_omap3_init,
- .start = ohci_omap3_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-/*-------------------------------------------------------------------------*/
+static const char hcd_name[] = "ohci-omap3";
+static struct hc_driver __read_mostly ohci_omap3_hc_driver;
/*
* configure so an HC device and id are always provided
@@ -578,109 +58,89 @@ static const struct hc_driver ohci_omap3_hc_driver = {
* then invokes the start() method for the HCD associated with it
* through the hotplug entry's driver_data.
*/
-static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
+static int ohci_hcd_omap3_probe(struct platform_device *pdev)
{
- struct ohci_hcd_omap_platform_data *pdata = pdev->dev.platform_data;
- struct ohci_hcd_omap3 *omap;
- struct resource *res;
- struct usb_hcd *hcd;
- int ret = -ENODEV;
- int irq;
+ struct device *dev = &pdev->dev;
+ struct ohci_hcd *ohci;
+ struct usb_hcd *hcd = NULL;
+ void __iomem *regs = NULL;
+ struct resource *res;
+ int ret;
+ int irq;
if (usb_disabled())
- goto err_disabled;
+ return -ENODEV;
- if (!pdata) {
- dev_dbg(&pdev->dev, "missing platform_data\n");
- goto err_pdata;
+ if (!dev->parent) {
+ dev_err(dev, "Missing parent device\n");
+ return -ENODEV;
}
irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "OHCI irq failed\n");
+ return -ENODEV;
+ }
- omap = kzalloc(sizeof(*omap), GFP_KERNEL);
- if (!omap) {
- ret = -ENOMEM;
- goto err_disabled;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "UHH OHCI get resource failed\n");
+ return -ENOMEM;
}
- hcd = usb_create_hcd(&ohci_omap3_hc_driver, &pdev->dev,
- dev_name(&pdev->dev));
- if (!hcd) {
- ret = -ENOMEM;
- goto err_create_hcd;
+ regs = ioremap(res->start, resource_size(res));
+ if (!regs) {
+ dev_err(dev, "UHH OHCI ioremap failed\n");
+ return -ENOMEM;
}
- platform_set_drvdata(pdev, omap);
- omap->dev = &pdev->dev;
- omap->port_mode[0] = pdata->port_mode[0];
- omap->port_mode[1] = pdata->port_mode[1];
- omap->port_mode[2] = pdata->port_mode[2];
- omap->es2_compatibility = pdata->es2_compatibility;
- omap->ohci = hcd_to_ohci(hcd);
+ /*
+ * Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto err_io;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ret = -ENODEV;
+ hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev,
+ dev_name(dev));
+ if (!hcd) {
+ dev_err(dev, "usb_create_hcd failed\n");
+ goto err_io;
+ }
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
+ hcd->regs = regs;
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- dev_err(&pdev->dev, "OHCI ioremap failed\n");
- ret = -ENOMEM;
- goto err_ioremap;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- omap->uhh_base = ioremap(res->start, resource_size(res));
- if (!omap->uhh_base) {
- dev_err(&pdev->dev, "UHH ioremap failed\n");
- ret = -ENOMEM;
- goto err_uhh_ioremap;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- omap->tll_base = ioremap(res->start, resource_size(res));
- if (!omap->tll_base) {
- dev_err(&pdev->dev, "TLL ioremap failed\n");
- ret = -ENOMEM;
- goto err_tll_ioremap;
- }
-
- ret = omap3_start_ohci(omap, hcd);
- if (ret) {
- dev_dbg(&pdev->dev, "failed to start ehci\n");
- goto err_start;
- }
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
- ohci_hcd_init(omap->ohci);
+ ohci = hcd_to_ohci(hcd);
+ /*
+ * RemoteWakeupConnected has to be set explicitly before
+ * calling ohci_run. The reset value of RWC is 0.
+ */
+ ohci->hc_control = OHCI_CTRL_RWC;
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ ret = usb_add_hcd(hcd, irq, 0);
if (ret) {
- dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
+ dev_dbg(dev, "failed to add hcd with err %d\n", ret);
goto err_add_hcd;
}
+ device_wakeup_enable(hcd->self.controller);
return 0;
err_add_hcd:
- omap3_stop_ohci(omap, hcd);
-
-err_start:
- iounmap(omap->tll_base);
-
-err_tll_ioremap:
- iounmap(omap->uhh_base);
-
-err_uhh_ioremap:
- iounmap(hcd->regs);
-
-err_ioremap:
+ pm_runtime_put_sync(dev);
usb_put_hcd(hcd);
-err_create_hcd:
- kfree(omap);
-err_pdata:
-err_disabled:
+err_io:
+ iounmap(regs);
+
return ret;
}
@@ -697,39 +157,55 @@ err_disabled:
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*/
-static int __devexit ohci_hcd_omap3_remove(struct platform_device *pdev)
+static int ohci_hcd_omap3_remove(struct platform_device *pdev)
{
- struct ohci_hcd_omap3 *omap = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = ohci_to_hcd(omap->ohci);
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
- usb_remove_hcd(hcd);
- omap3_stop_ohci(omap, hcd);
iounmap(hcd->regs);
- iounmap(omap->tll_base);
- iounmap(omap->uhh_base);
+ usb_remove_hcd(hcd);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
usb_put_hcd(hcd);
- kfree(omap);
-
return 0;
}
-static void ohci_hcd_omap3_shutdown(struct platform_device *pdev)
-{
- struct ohci_hcd_omap3 *omap = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = ohci_to_hcd(omap->ohci);
+static const struct of_device_id omap_ohci_dt_ids[] = {
+ { .compatible = "ti,ohci-omap3" },
+ { }
+};
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
-}
+MODULE_DEVICE_TABLE(of, omap_ohci_dt_ids);
static struct platform_driver ohci_hcd_omap3_driver = {
.probe = ohci_hcd_omap3_probe,
- .remove = __devexit_p(ohci_hcd_omap3_remove),
- .shutdown = ohci_hcd_omap3_shutdown,
+ .remove = ohci_hcd_omap3_remove,
+ .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ohci-omap3",
+ .of_match_table = omap_ohci_dt_ids,
},
};
+static int __init ohci_omap3_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ohci_init_driver(&ohci_omap3_hc_driver, NULL);
+ return platform_driver_register(&ohci_hcd_omap3_driver);
+}
+module_init(ohci_omap3_init);
+
+static void __exit ohci_omap3_cleanup(void)
+{
+ platform_driver_unregister(&ohci_hcd_omap3_driver);
+}
+module_exit(ohci_omap3_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_ALIAS("platform:ohci-omap3");
MODULE_AUTHOR("Anand Gadiyar <gadiyar@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 36ee9a666e9..bb150967572 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -14,31 +14,20 @@
* This file is licenced under the GPL.
*/
-#ifndef CONFIG_PCI
-#error "This file is PCI bus glue. CONFIG_PCI must be defined."
-#endif
-
-#include <linux/pci.h>
#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include "ohci.h"
+#include "pci-quirks.h"
-/* constants used to work around PM-related transfer
- * glitches in some AMD 700 series southbridges
- */
-#define AB_REG_BAR 0xf0
-#define AB_INDX(addr) ((addr) + 0x00)
-#define AB_DATA(addr) ((addr) + 0x04)
-#define AX_INDXC 0X30
-#define AX_DATAC 0x34
+#define DRIVER_DESC "OHCI PCI platform driver"
-#define NB_PCIE_INDX_ADDR 0xe0
-#define NB_PCIE_INDX_DATA 0xe4
-#define PCIE_P_CNTL 0x10040
-#define BIF_NB 0x10002
+static const char hcd_name[] = "ohci-pci";
-static struct pci_dev *amd_smbus_dev;
-static struct pci_dev *amd_hb_dev;
-static int amd_ohci_iso_count;
/*-------------------------------------------------------------------------*/
@@ -141,13 +130,6 @@ static void ohci_quirk_nec_worker(struct work_struct *work)
struct ohci_hcd *ohci = container_of(work, struct ohci_hcd, nec_work);
int status;
- status = ohci_init(ohci);
- if (status != 0) {
- ohci_err(ohci, "Restarting NEC controller failed in %s, %d\n",
- "ohci_init", status);
- return;
- }
-
status = ohci_restart(ohci);
if (status != 0)
ohci_err(ohci, "Restarting NEC controller failed in %s, %d\n",
@@ -168,134 +150,20 @@ static int ohci_quirk_nec(struct usb_hcd *hcd)
static int ohci_quirk_amd700(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- u8 rev = 0;
-
- if (!amd_smbus_dev)
- amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
- PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
- if (!amd_smbus_dev)
- return 0;
- pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+ if (usb_amd_find_chipset_info())
+ ohci->flags |= OHCI_QUIRK_AMD_PLL;
/* SB800 needs pre-fetch fix */
- if ((rev >= 0x40) && (rev <= 0x4f)) {
+ if (usb_amd_prefetch_quirk()) {
ohci->flags |= OHCI_QUIRK_AMD_PREFETCH;
ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
}
- if ((rev > 0x3b) || (rev < 0x30)) {
- pci_dev_put(amd_smbus_dev);
- amd_smbus_dev = NULL;
- return 0;
- }
-
- amd_ohci_iso_count++;
-
- if (!amd_hb_dev)
- amd_hb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9600, NULL);
-
- ohci->flags |= OHCI_QUIRK_AMD_ISO;
- ohci_dbg(ohci, "enabled AMD ISO transfers quirk\n");
-
+ ohci->flags |= OHCI_QUIRK_GLOBAL_SUSPEND;
return 0;
}
-/* nVidia controllers continue to drive Reset signalling on the bus
- * even after system shutdown, wasting power. This flag tells the
- * shutdown routine to leave the controller OPERATIONAL instead of RESET.
- */
-static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
-
- ohci->flags |= OHCI_QUIRK_SHUTDOWN;
- ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
-
- return 0;
-}
-
-/*
- * The hardware normally enables the A-link power management feature, which
- * lets the system lower the power consumption in idle states.
- *
- * Assume the system is configured to have USB 1.1 ISO transfers going
- * to or from a USB device. Without this quirk, that stream may stutter
- * or have breaks occasionally. For transfers going to speakers, this
- * makes a very audible mess...
- *
- * That audio playback corruption is due to the audio stream getting
- * interrupted occasionally when the link goes in lower power state
- * This USB quirk prevents the link going into that lower power state
- * during audio playback or other ISO operations.
- */
-static void quirk_amd_pll(int on)
-{
- u32 addr;
- u32 val;
- u32 bit = (on > 0) ? 1 : 0;
-
- pci_read_config_dword(amd_smbus_dev, AB_REG_BAR, &addr);
-
- /* BIT names/meanings are NDA-protected, sorry ... */
-
- outl(AX_INDXC, AB_INDX(addr));
- outl(0x40, AB_DATA(addr));
- outl(AX_DATAC, AB_INDX(addr));
- val = inl(AB_DATA(addr));
- val &= ~((1 << 3) | (1 << 4) | (1 << 9));
- val |= (bit << 3) | ((!bit) << 4) | ((!bit) << 9);
- outl(val, AB_DATA(addr));
-
- if (amd_hb_dev) {
- addr = PCIE_P_CNTL;
- pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr);
-
- pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val);
- val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
- val |= bit | (bit << 3) | (bit << 12);
- val |= ((!bit) << 4) | ((!bit) << 9);
- pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val);
-
- addr = BIF_NB;
- pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr);
-
- pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val);
- val &= ~(1 << 8);
- val |= bit << 8;
- pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val);
- }
-}
-
-static void amd_iso_dev_put(void)
-{
- amd_ohci_iso_count--;
- if (amd_ohci_iso_count == 0) {
- if (amd_smbus_dev) {
- pci_dev_put(amd_smbus_dev);
- amd_smbus_dev = NULL;
- }
- if (amd_hb_dev) {
- pci_dev_put(amd_hb_dev);
- amd_hb_dev = NULL;
- }
- }
-
-}
-
-static void sb800_prefetch(struct ohci_hcd *ohci, int on)
-{
- struct pci_dev *pdev;
- u16 misc;
-
- pdev = to_pci_dev(ohci_to_hcd(ohci)->self.controller);
- pci_read_config_word(pdev, 0x50, &misc);
- if (on == 0)
- pci_write_config_word(pdev, 0x50, misc & 0xfcff);
- else
- pci_write_config_word(pdev, 0x50, misc | 0x0300);
-}
-
/* List of quirks for OHCI */
static const struct pci_device_id ohci_pci_quirks[] = {
{
@@ -346,10 +214,6 @@ static const struct pci_device_id ohci_pci_quirks[] = {
PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
.driver_data = (unsigned long)ohci_quirk_amd700,
},
- {
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
- .driver_data = (unsigned long) ohci_quirk_nvidia_shutdown,
- },
/* FIXME for some of the early AMD 760 southbridges, OHCI
* won't work at all. blacklist them.
@@ -361,10 +225,10 @@ static const struct pci_device_id ohci_pci_quirks[] = {
static int ohci_pci_reset (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int ret = 0;
if (hcd->self.controller) {
- struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
const struct pci_device_id *quirk_id;
quirk_id = pci_match_id(ohci_pci_quirks, pdev);
@@ -374,144 +238,33 @@ static int ohci_pci_reset (struct usb_hcd *hcd)
ret = quirk(hcd);
}
}
- if (ret == 0) {
- ohci_hcd_init (ohci);
- return ohci_init (ohci);
- }
- return ret;
-}
-
-
-static int __devinit ohci_pci_start (struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- int ret;
-#ifdef CONFIG_PM /* avoid warnings about unused pdev */
- if (hcd->self.controller) {
- struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
-
- /* RWC may not be set for add-in PCI cards, since boot
- * firmware probably ignored them. This transfers PCI
- * PM wakeup capabilities.
- */
- if (device_can_wakeup(&pdev->dev))
- ohci->hc_control |= OHCI_CTRL_RWC;
- }
-#endif /* CONFIG_PM */
-
- ret = ohci_run (ohci);
- if (ret < 0) {
- ohci_err (ohci, "can't start\n");
- ohci_stop (hcd);
- }
+ if (ret == 0)
+ ret = ohci_setup(hcd);
+ /*
+ * After ohci setup RWC may not be set for add-in PCI cards.
+ * This transfers PCI PM wakeup capabilities.
+ */
+ if (device_can_wakeup(&pdev->dev))
+ ohci->hc_control |= OHCI_CTRL_RWC;
return ret;
}
-#ifdef CONFIG_PM
+static struct hc_driver __read_mostly ohci_pci_hc_driver;
-static int ohci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
-{
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- unsigned long flags;
- int rc = 0;
-
- /* Root hub was already suspended. Disable irq emission and
- * mark HW unaccessible, bail out if RH has been resumed. Use
- * the spinlock to properly synchronize with possible pending
- * RH suspend or resume activity.
- *
- * This is still racy as hcd->state is manipulated outside of
- * any locks =P But that will be a different fix.
- */
- spin_lock_irqsave (&ohci->lock, flags);
- if (hcd->state != HC_STATE_SUSPENDED) {
- rc = -EINVAL;
- goto bail;
- }
- ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
- (void)ohci_readl(ohci, &ohci->regs->intrdisable);
-
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- bail:
- spin_unlock_irqrestore (&ohci->lock, flags);
-
- return rc;
-}
-
-
-static int ohci_pci_resume(struct usb_hcd *hcd, bool hibernated)
-{
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-
- /* Make sure resume from hibernation re-enumerates everything */
- if (hibernated)
- ohci_usb_reset(hcd_to_ohci(hcd));
-
- ohci_finish_controller_resume(hcd);
- return 0;
-}
-
-#endif /* CONFIG_PM */
-
-
-/*-------------------------------------------------------------------------*/
-
-static const struct hc_driver ohci_pci_hc_driver = {
- .description = hcd_name,
- .product_desc = "OHCI Host Controller",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_MEMORY | HCD_USB11,
-
- /*
- * basic lifecycle operations
- */
+static const struct ohci_driver_overrides pci_overrides __initconst = {
+ .product_desc = "OHCI PCI host controller",
.reset = ohci_pci_reset,
- .start = ohci_pci_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
-#ifdef CONFIG_PM
- .pci_suspend = ohci_pci_suspend,
- .pci_resume = ohci_pci_resume,
-#endif
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
};
-/*-------------------------------------------------------------------------*/
-
-
static const struct pci_device_id pci_ids [] = { {
/* handle any USB OHCI controller */
PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_OHCI, ~0),
.driver_data = (unsigned long) &ohci_pci_hc_driver,
+ }, {
+ /* The device in the ConneXT I/O hub has no class reg */
+ PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_OHCI),
+ .driver_data = (unsigned long) &ohci_pci_hc_driver,
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE (pci, pci_ids);
@@ -525,9 +278,38 @@ static struct pci_driver ohci_pci_driver = {
.remove = usb_hcd_pci_remove,
.shutdown = usb_hcd_pci_shutdown,
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
.driver = {
.pm = &usb_hcd_pci_pm_ops
},
#endif
};
+
+static int __init ohci_pci_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides);
+
+#ifdef CONFIG_PM
+ /* Entries for the PCI suspend/resume callbacks are special */
+ ohci_pci_hc_driver.pci_suspend = ohci_suspend;
+ ohci_pci_hc_driver.pci_resume = ohci_resume;
+#endif
+
+ return pci_register_driver(&ohci_pci_driver);
+}
+module_init(ohci_pci_init);
+
+static void __exit ohci_pci_cleanup(void)
+{
+ pci_unregister_driver(&ohci_pci_driver);
+}
+module_exit(ohci_pci_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: ehci_pci");
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
new file mode 100644
index 00000000000..4369299064c
--- /dev/null
+++ b/drivers/usb/host/ohci-platform.c
@@ -0,0 +1,395 @@
+/*
+ * Generic platform ohci driver
+ *
+ * Copyright 2007 Michael Buesch <m@bues.ch>
+ * Copyright 2011-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright 2014 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Derived from the OCHI-SSB driver
+ * Derived from the OHCI-PCI driver
+ * Copyright 1999 Roman Weissgaerber
+ * Copyright 2000-2002 David Brownell
+ * Copyright 1999 Linus Torvalds
+ * Copyright 1999 Gregory P. Smith
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/hrtimer.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/usb/ohci_pdriver.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
+
+#define DRIVER_DESC "OHCI generic platform driver"
+#define OHCI_MAX_CLKS 3
+#define hcd_to_ohci_priv(h) ((struct ohci_platform_priv *)hcd_to_ohci(h)->priv)
+
+struct ohci_platform_priv {
+ struct clk *clks[OHCI_MAX_CLKS];
+ struct reset_control *rst;
+ struct phy *phy;
+};
+
+static const char hcd_name[] = "ohci-platform";
+
+static int ohci_platform_reset(struct usb_hcd *hcd)
+{
+ struct platform_device *pdev = to_platform_device(hcd->self.controller);
+ struct usb_ohci_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+ if (pdata->no_big_frame_no)
+ ohci->flags |= OHCI_QUIRK_FRAME_NO;
+ if (pdata->num_ports)
+ ohci->num_ports = pdata->num_ports;
+
+ return ohci_setup(hcd);
+}
+
+static int ohci_platform_power_on(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
+ int clk, ret;
+
+ for (clk = 0; clk < OHCI_MAX_CLKS && priv->clks[clk]; clk++) {
+ ret = clk_prepare_enable(priv->clks[clk]);
+ if (ret)
+ goto err_disable_clks;
+ }
+
+ if (priv->phy) {
+ ret = phy_init(priv->phy);
+ if (ret)
+ goto err_disable_clks;
+
+ ret = phy_power_on(priv->phy);
+ if (ret)
+ goto err_exit_phy;
+ }
+
+ return 0;
+
+err_exit_phy:
+ phy_exit(priv->phy);
+err_disable_clks:
+ while (--clk >= 0)
+ clk_disable_unprepare(priv->clks[clk]);
+
+ return ret;
+}
+
+static void ohci_platform_power_off(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
+ int clk;
+
+ if (priv->phy) {
+ phy_power_off(priv->phy);
+ phy_exit(priv->phy);
+ }
+
+ for (clk = OHCI_MAX_CLKS - 1; clk >= 0; clk--)
+ if (priv->clks[clk])
+ clk_disable_unprepare(priv->clks[clk]);
+}
+
+static struct hc_driver __read_mostly ohci_platform_hc_driver;
+
+static const struct ohci_driver_overrides platform_overrides __initconst = {
+ .product_desc = "Generic Platform OHCI controller",
+ .reset = ohci_platform_reset,
+ .extra_priv_size = sizeof(struct ohci_platform_priv),
+};
+
+static struct usb_ohci_pdata ohci_platform_defaults = {
+ .power_on = ohci_platform_power_on,
+ .power_suspend = ohci_platform_power_off,
+ .power_off = ohci_platform_power_off,
+};
+
+static int ohci_platform_probe(struct platform_device *dev)
+{
+ struct usb_hcd *hcd;
+ struct resource *res_mem;
+ struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
+ struct ohci_platform_priv *priv;
+ struct ohci_hcd *ohci;
+ int err, irq, clk = 0;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ /*
+ * Use reasonable defaults so platforms don't have to provide these
+ * with DT probing on ARM.
+ */
+ if (!pdata)
+ pdata = &ohci_platform_defaults;
+
+ err = dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ dev_err(&dev->dev, "no irq provided");
+ return irq;
+ }
+
+ res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res_mem) {
+ dev_err(&dev->dev, "no memory resource provided");
+ return -ENXIO;
+ }
+
+ hcd = usb_create_hcd(&ohci_platform_hc_driver, &dev->dev,
+ dev_name(&dev->dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ platform_set_drvdata(dev, hcd);
+ dev->dev.platform_data = pdata;
+ priv = hcd_to_ohci_priv(hcd);
+ ohci = hcd_to_ohci(hcd);
+
+ if (pdata == &ohci_platform_defaults && dev->dev.of_node) {
+ if (of_property_read_bool(dev->dev.of_node, "big-endian-regs"))
+ ohci->flags |= OHCI_QUIRK_BE_MMIO;
+
+ if (of_property_read_bool(dev->dev.of_node, "big-endian-desc"))
+ ohci->flags |= OHCI_QUIRK_BE_DESC;
+
+ if (of_property_read_bool(dev->dev.of_node, "big-endian"))
+ ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC;
+
+ priv->phy = devm_phy_get(&dev->dev, "usb");
+ if (IS_ERR(priv->phy)) {
+ err = PTR_ERR(priv->phy);
+ if (err == -EPROBE_DEFER)
+ goto err_put_hcd;
+ priv->phy = NULL;
+ }
+
+ for (clk = 0; clk < OHCI_MAX_CLKS; clk++) {
+ priv->clks[clk] = of_clk_get(dev->dev.of_node, clk);
+ if (IS_ERR(priv->clks[clk])) {
+ err = PTR_ERR(priv->clks[clk]);
+ if (err == -EPROBE_DEFER)
+ goto err_put_clks;
+ priv->clks[clk] = NULL;
+ break;
+ }
+ }
+
+ }
+
+ priv->rst = devm_reset_control_get_optional(&dev->dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ err = PTR_ERR(priv->rst);
+ if (err == -EPROBE_DEFER)
+ goto err_put_clks;
+ priv->rst = NULL;
+ } else {
+ err = reset_control_deassert(priv->rst);
+ if (err)
+ goto err_put_clks;
+ }
+
+ if (pdata->big_endian_desc)
+ ohci->flags |= OHCI_QUIRK_BE_DESC;
+ if (pdata->big_endian_mmio)
+ ohci->flags |= OHCI_QUIRK_BE_MMIO;
+
+#ifndef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
+ if (ohci->flags & OHCI_QUIRK_BE_MMIO) {
+ dev_err(&dev->dev,
+ "Error: CONFIG_USB_OHCI_BIG_ENDIAN_MMIO not set\n");
+ err = -EINVAL;
+ goto err_reset;
+ }
+#endif
+#ifndef CONFIG_USB_OHCI_BIG_ENDIAN_DESC
+ if (ohci->flags & OHCI_QUIRK_BE_DESC) {
+ dev_err(&dev->dev,
+ "Error: CONFIG_USB_OHCI_BIG_ENDIAN_DESC not set\n");
+ err = -EINVAL;
+ goto err_reset;
+ }
+#endif
+
+ if (pdata->power_on) {
+ err = pdata->power_on(dev);
+ if (err < 0)
+ goto err_reset;
+ }
+
+ hcd->rsrc_start = res_mem->start;
+ hcd->rsrc_len = resource_size(res_mem);
+
+ hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
+ if (IS_ERR(hcd->regs)) {
+ err = PTR_ERR(hcd->regs);
+ goto err_power;
+ }
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (err)
+ goto err_power;
+
+ device_wakeup_enable(hcd->self.controller);
+
+ platform_set_drvdata(dev, hcd);
+
+ return err;
+
+err_power:
+ if (pdata->power_off)
+ pdata->power_off(dev);
+err_reset:
+ if (priv->rst)
+ reset_control_assert(priv->rst);
+err_put_clks:
+ while (--clk >= 0)
+ clk_put(priv->clks[clk]);
+err_put_hcd:
+ if (pdata == &ohci_platform_defaults)
+ dev->dev.platform_data = NULL;
+
+ usb_put_hcd(hcd);
+
+ return err;
+}
+
+static int ohci_platform_remove(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
+ struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
+ int clk;
+
+ usb_remove_hcd(hcd);
+
+ if (pdata->power_off)
+ pdata->power_off(dev);
+
+ if (priv->rst)
+ reset_control_assert(priv->rst);
+
+ for (clk = 0; clk < OHCI_MAX_CLKS && priv->clks[clk]; clk++)
+ clk_put(priv->clks[clk]);
+
+ usb_put_hcd(hcd);
+
+ if (pdata == &ohci_platform_defaults)
+ dev->dev.platform_data = NULL;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int ohci_platform_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct usb_ohci_pdata *pdata = dev->platform_data;
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ bool do_wakeup = device_may_wakeup(dev);
+ int ret;
+
+ ret = ohci_suspend(hcd, do_wakeup);
+ if (ret)
+ return ret;
+
+ if (pdata->power_suspend)
+ pdata->power_suspend(pdev);
+
+ return ret;
+}
+
+static int ohci_platform_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct usb_ohci_pdata *pdata = dev_get_platdata(dev);
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+
+ if (pdata->power_on) {
+ int err = pdata->power_on(pdev);
+ if (err < 0)
+ return err;
+ }
+
+ ohci_resume(hcd, false);
+ return 0;
+}
+
+#else /* !CONFIG_PM */
+#define ohci_platform_suspend NULL
+#define ohci_platform_resume NULL
+#endif /* CONFIG_PM */
+
+static const struct of_device_id ohci_platform_ids[] = {
+ { .compatible = "generic-ohci", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ohci_platform_ids);
+
+static const struct platform_device_id ohci_platform_table[] = {
+ { "ohci-platform", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, ohci_platform_table);
+
+static const struct dev_pm_ops ohci_platform_pm_ops = {
+ .suspend = ohci_platform_suspend,
+ .resume = ohci_platform_resume,
+};
+
+static struct platform_driver ohci_platform_driver = {
+ .id_table = ohci_platform_table,
+ .probe = ohci_platform_probe,
+ .remove = ohci_platform_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ohci-platform",
+ .pm = &ohci_platform_pm_ops,
+ .of_match_table = ohci_platform_ids,
+ }
+};
+
+static int __init ohci_platform_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ohci_init_driver(&ohci_platform_hc_driver, &platform_overrides);
+ return platform_driver_register(&ohci_platform_driver);
+}
+module_init(ohci_platform_init);
+
+static void __exit ohci_platform_cleanup(void)
+{
+ platform_driver_unregister(&ohci_platform_driver);
+}
+module_exit(ohci_platform_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Hauke Mehrtens");
+MODULE_AUTHOR("Alan Stern");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
deleted file mode 100644
index 653d6a60edb..00000000000
--- a/drivers/usb/host/ohci-pnx4008.c
+++ /dev/null
@@ -1,453 +0,0 @@
-/*
- * drivers/usb/host/ohci-pnx4008.c
- *
- * driver for Philips PNX4008 USB Host
- *
- * Authors: Dmitry Chigirev <source@mvista.com>
- * Vitaly Wool <vitalywool@gmail.com>
- *
- * register initialization is based on code examples provided by Philips
- * Copyright (c) 2005 Koninklijke Philips Electronics N.V.
- *
- * NOTE: This driver does not have suspend/resume functionality
- * This driver is intended for engineering development purposes only
- *
- * 2005-2006 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
-
-#include <mach/hardware.h>
-#include <asm/io.h>
-
-#include <mach/platform.h>
-#include <mach/irqs.h>
-#include <mach/gpio.h>
-
-#define USB_CTRL IO_ADDRESS(PNX4008_PWRMAN_BASE + 0x64)
-
-/* USB_CTRL bit defines */
-#define USB_SLAVE_HCLK_EN (1 << 24)
-#define USB_HOST_NEED_CLK_EN (1 << 21)
-
-#define USB_OTG_CLK_CTRL IO_ADDRESS(PNX4008_USB_CONFIG_BASE + 0xFF4)
-#define USB_OTG_CLK_STAT IO_ADDRESS(PNX4008_USB_CONFIG_BASE + 0xFF8)
-
-/* USB_OTG_CLK_CTRL bit defines */
-#define AHB_M_CLOCK_ON (1 << 4)
-#define OTG_CLOCK_ON (1 << 3)
-#define I2C_CLOCK_ON (1 << 2)
-#define DEV_CLOCK_ON (1 << 1)
-#define HOST_CLOCK_ON (1 << 0)
-
-#define USB_OTG_STAT_CONTROL IO_ADDRESS(PNX4008_USB_CONFIG_BASE + 0x110)
-
-/* USB_OTG_STAT_CONTROL bit defines */
-#define TRANSPARENT_I2C_EN (1 << 7)
-#define HOST_EN (1 << 0)
-
-/* ISP1301 USB transceiver I2C registers */
-#define ISP1301_MODE_CONTROL_1 0x04 /* u8 read, set, +1 clear */
-
-#define MC1_SPEED_REG (1 << 0)
-#define MC1_SUSPEND_REG (1 << 1)
-#define MC1_DAT_SE0 (1 << 2)
-#define MC1_TRANSPARENT (1 << 3)
-#define MC1_BDIS_ACON_EN (1 << 4)
-#define MC1_OE_INT_EN (1 << 5)
-#define MC1_UART_EN (1 << 6)
-#define MC1_MASK 0x7f
-
-#define ISP1301_MODE_CONTROL_2 0x12 /* u8 read, set, +1 clear */
-
-#define MC2_GLOBAL_PWR_DN (1 << 0)
-#define MC2_SPD_SUSP_CTRL (1 << 1)
-#define MC2_BI_DI (1 << 2)
-#define MC2_TRANSP_BDIR0 (1 << 3)
-#define MC2_TRANSP_BDIR1 (1 << 4)
-#define MC2_AUDIO_EN (1 << 5)
-#define MC2_PSW_EN (1 << 6)
-#define MC2_EN2V7 (1 << 7)
-
-#define ISP1301_OTG_CONTROL_1 0x06 /* u8 read, set, +1 clear */
-# define OTG1_DP_PULLUP (1 << 0)
-# define OTG1_DM_PULLUP (1 << 1)
-# define OTG1_DP_PULLDOWN (1 << 2)
-# define OTG1_DM_PULLDOWN (1 << 3)
-# define OTG1_ID_PULLDOWN (1 << 4)
-# define OTG1_VBUS_DRV (1 << 5)
-# define OTG1_VBUS_DISCHRG (1 << 6)
-# define OTG1_VBUS_CHRG (1 << 7)
-#define ISP1301_OTG_STATUS 0x10 /* u8 readonly */
-# define OTG_B_SESS_END (1 << 6)
-# define OTG_B_SESS_VLD (1 << 7)
-
-#define ISP1301_I2C_ADDR 0x2C
-
-#define ISP1301_I2C_MODE_CONTROL_1 0x4
-#define ISP1301_I2C_MODE_CONTROL_2 0x12
-#define ISP1301_I2C_OTG_CONTROL_1 0x6
-#define ISP1301_I2C_OTG_CONTROL_2 0x10
-#define ISP1301_I2C_INTERRUPT_SOURCE 0x8
-#define ISP1301_I2C_INTERRUPT_LATCH 0xA
-#define ISP1301_I2C_INTERRUPT_FALLING 0xC
-#define ISP1301_I2C_INTERRUPT_RISING 0xE
-#define ISP1301_I2C_REG_CLEAR_ADDR 1
-
-static struct i2c_driver isp1301_driver;
-static struct i2c_client *isp1301_i2c_client;
-
-extern int usb_disabled(void);
-extern int ocpi_enable(void);
-
-static struct clk *usb_clk;
-
-static const unsigned short normal_i2c[] =
- { ISP1301_I2C_ADDR, ISP1301_I2C_ADDR + 1, I2C_CLIENT_END };
-
-static int isp1301_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- return 0;
-}
-
-static int isp1301_remove(struct i2c_client *client)
-{
- return 0;
-}
-
-static const struct i2c_device_id isp1301_id[] = {
- { "isp1301_pnx", 0 },
- { }
-};
-
-static struct i2c_driver isp1301_driver = {
- .driver = {
- .name = "isp1301_pnx",
- },
- .probe = isp1301_probe,
- .remove = isp1301_remove,
- .id_table = isp1301_id,
-};
-
-static void i2c_write(u8 buf, u8 subaddr)
-{
- char tmpbuf[2];
-
- tmpbuf[0] = subaddr; /*register number */
- tmpbuf[1] = buf; /*register data */
- i2c_master_send(isp1301_i2c_client, &tmpbuf[0], 2);
-}
-
-static void isp1301_configure(void)
-{
- /* PNX4008 only supports DAT_SE0 USB mode */
- /* PNX4008 R2A requires setting the MAX603 to output 3.6V */
- /* Power up externel charge-pump */
-
- i2c_write(MC1_DAT_SE0 | MC1_SPEED_REG, ISP1301_I2C_MODE_CONTROL_1);
- i2c_write(~(MC1_DAT_SE0 | MC1_SPEED_REG),
- ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR);
- i2c_write(MC2_BI_DI | MC2_PSW_EN | MC2_SPD_SUSP_CTRL,
- ISP1301_I2C_MODE_CONTROL_2);
- i2c_write(~(MC2_BI_DI | MC2_PSW_EN | MC2_SPD_SUSP_CTRL),
- ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR);
- i2c_write(OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN,
- ISP1301_I2C_OTG_CONTROL_1);
- i2c_write(~(OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN),
- ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR);
- i2c_write(0xFF,
- ISP1301_I2C_INTERRUPT_LATCH | ISP1301_I2C_REG_CLEAR_ADDR);
- i2c_write(0xFF,
- ISP1301_I2C_INTERRUPT_FALLING | ISP1301_I2C_REG_CLEAR_ADDR);
- i2c_write(0xFF,
- ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR);
-
-}
-
-static inline void isp1301_vbus_on(void)
-{
- i2c_write(OTG1_VBUS_DRV, ISP1301_I2C_OTG_CONTROL_1);
-}
-
-static inline void isp1301_vbus_off(void)
-{
- i2c_write(OTG1_VBUS_DRV,
- ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR);
-}
-
-static void pnx4008_start_hc(void)
-{
- unsigned long tmp = __raw_readl(USB_OTG_STAT_CONTROL) | HOST_EN;
- __raw_writel(tmp, USB_OTG_STAT_CONTROL);
- isp1301_vbus_on();
-}
-
-static void pnx4008_stop_hc(void)
-{
- unsigned long tmp;
- isp1301_vbus_off();
- tmp = __raw_readl(USB_OTG_STAT_CONTROL) & ~HOST_EN;
- __raw_writel(tmp, USB_OTG_STAT_CONTROL);
-}
-
-static int __devinit ohci_pnx4008_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int ret;
-
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
-
- if ((ret = ohci_run(ohci)) < 0) {
- dev_err(hcd->self.controller, "can't start\n");
- ohci_stop(hcd);
- return ret;
- }
- return 0;
-}
-
-static const struct hc_driver ohci_pnx4008_hc_driver = {
- .description = hcd_name,
- .product_desc = "pnx4008 OHCI",
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- .hcd_priv_size = sizeof(struct ohci_hcd),
- /*
- * basic lifecycle operations
- */
- .start = ohci_pnx4008_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-#define USB_CLOCK_MASK (AHB_M_CLOCK_ON| OTG_CLOCK_ON | HOST_CLOCK_ON | I2C_CLOCK_ON)
-
-static void pnx4008_set_usb_bits(void)
-{
- start_int_set_falling_edge(SE_USB_OTG_ATX_INT_N);
- start_int_ack(SE_USB_OTG_ATX_INT_N);
- start_int_umask(SE_USB_OTG_ATX_INT_N);
-
- start_int_set_rising_edge(SE_USB_OTG_TIMER_INT);
- start_int_ack(SE_USB_OTG_TIMER_INT);
- start_int_umask(SE_USB_OTG_TIMER_INT);
-
- start_int_set_rising_edge(SE_USB_I2C_INT);
- start_int_ack(SE_USB_I2C_INT);
- start_int_umask(SE_USB_I2C_INT);
-
- start_int_set_rising_edge(SE_USB_INT);
- start_int_ack(SE_USB_INT);
- start_int_umask(SE_USB_INT);
-
- start_int_set_rising_edge(SE_USB_NEED_CLK_INT);
- start_int_ack(SE_USB_NEED_CLK_INT);
- start_int_umask(SE_USB_NEED_CLK_INT);
-
- start_int_set_rising_edge(SE_USB_AHB_NEED_CLK_INT);
- start_int_ack(SE_USB_AHB_NEED_CLK_INT);
- start_int_umask(SE_USB_AHB_NEED_CLK_INT);
-}
-
-static void pnx4008_unset_usb_bits(void)
-{
- start_int_mask(SE_USB_OTG_ATX_INT_N);
- start_int_mask(SE_USB_OTG_TIMER_INT);
- start_int_mask(SE_USB_I2C_INT);
- start_int_mask(SE_USB_INT);
- start_int_mask(SE_USB_NEED_CLK_INT);
- start_int_mask(SE_USB_AHB_NEED_CLK_INT);
-}
-
-static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = 0;
- struct ohci_hcd *ohci;
- const struct hc_driver *driver = &ohci_pnx4008_hc_driver;
- struct i2c_adapter *i2c_adap;
- struct i2c_board_info i2c_info;
-
- int ret = 0, irq;
-
- dev_dbg(&pdev->dev, "%s: " DRIVER_DESC " (pnx4008)\n", hcd_name);
- if (usb_disabled()) {
- err("USB is disabled");
- ret = -ENODEV;
- goto out;
- }
-
- if (pdev->num_resources != 2
- || pdev->resource[0].flags != IORESOURCE_MEM
- || pdev->resource[1].flags != IORESOURCE_IRQ) {
- err("Invalid resource configuration");
- ret = -ENODEV;
- goto out;
- }
-
- /* Enable AHB slave USB clock, needed for further USB clock control */
- __raw_writel(USB_SLAVE_HCLK_EN | (1 << 19), USB_CTRL);
-
- ret = i2c_add_driver(&isp1301_driver);
- if (ret < 0) {
- err("failed to add ISP1301 driver");
- goto out;
- }
- i2c_adap = i2c_get_adapter(2);
- memset(&i2c_info, 0, sizeof(struct i2c_board_info));
- strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE);
- isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info,
- normal_i2c, NULL);
- i2c_put_adapter(i2c_adap);
- if (!isp1301_i2c_client) {
- err("failed to connect I2C to ISP1301 USB Transceiver");
- ret = -ENODEV;
- goto out_i2c_driver;
- }
-
- isp1301_configure();
-
- /* Enable USB PLL */
- usb_clk = clk_get(&pdev->dev, "ck_pll5");
- if (IS_ERR(usb_clk)) {
- err("failed to acquire USB PLL");
- ret = PTR_ERR(usb_clk);
- goto out1;
- }
-
- ret = clk_enable(usb_clk);
- if (ret < 0) {
- err("failed to start USB PLL");
- goto out2;
- }
-
- ret = clk_set_rate(usb_clk, 48000);
- if (ret < 0) {
- err("failed to set USB clock rate");
- goto out3;
- }
-
- __raw_writel(__raw_readl(USB_CTRL) | USB_HOST_NEED_CLK_EN, USB_CTRL);
-
- /* Set to enable all needed USB clocks */
- __raw_writel(USB_CLOCK_MASK, USB_OTG_CLK_CTRL);
-
- while ((__raw_readl(USB_OTG_CLK_STAT) & USB_CLOCK_MASK) !=
- USB_CLOCK_MASK) ;
-
- hcd = usb_create_hcd (driver, &pdev->dev, dev_name(&pdev->dev));
- if (!hcd) {
- err("Failed to allocate HC buffer");
- ret = -ENOMEM;
- goto out3;
- }
-
- /* Set all USB bits in the Start Enable register */
- pnx4008_set_usb_bits();
-
- hcd->rsrc_start = pdev->resource[0].start;
- hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- dev_dbg(&pdev->dev, "request_mem_region failed\n");
- ret = -ENOMEM;
- goto out4;
- }
- hcd->regs = (void __iomem *)pdev->resource[0].start;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = -ENXIO;
- goto out4;
- }
-
- pnx4008_start_hc();
- platform_set_drvdata(pdev, hcd);
- ohci = hcd_to_ohci(hcd);
- ohci_hcd_init(ohci);
-
- dev_info(&pdev->dev, "at 0x%p, irq %d\n", hcd->regs, hcd->irq);
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
- if (ret == 0)
- return ret;
-
- pnx4008_stop_hc();
-out4:
- pnx4008_unset_usb_bits();
- usb_put_hcd(hcd);
-out3:
- clk_disable(usb_clk);
-out2:
- clk_put(usb_clk);
-out1:
- i2c_unregister_device(isp1301_i2c_client);
- isp1301_i2c_client = NULL;
-out_i2c_driver:
- i2c_del_driver(&isp1301_driver);
-out:
- return ret;
-}
-
-static int usb_hcd_pnx4008_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_remove_hcd(hcd);
- pnx4008_stop_hc();
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
- pnx4008_unset_usb_bits();
- clk_disable(usb_clk);
- clk_put(usb_clk);
- i2c_unregister_device(isp1301_i2c_client);
- isp1301_i2c_client = NULL;
- i2c_del_driver(&isp1301_driver);
-
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:usb-ohci");
-
-static struct platform_driver usb_hcd_pnx4008_driver = {
- .driver = {
- .name = "usb-ohci",
- .owner = THIS_MODULE,
- },
- .probe = usb_hcd_pnx4008_probe,
- .remove = usb_hcd_pnx4008_remove,
-};
-
diff --git a/drivers/usb/host/ohci-pnx8550.c b/drivers/usb/host/ohci-pnx8550.c
deleted file mode 100644
index 28467e288a9..00000000000
--- a/drivers/usb/host/ohci-pnx8550.c
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * OHCI HCD (Host Controller Driver) for USB.
- *
- * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
- * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
- * (C) Copyright 2002 Hewlett-Packard Company
- * (C) Copyright 2005 Embedded Alley Solutions, Inc.
- *
- * Bus Glue for PNX8550
- *
- * Written by Christopher Hoover <ch@hpl.hp.com>
- * Based on fragments of previous driver by Russell King et al.
- *
- * Modified for LH7A404 from ohci-sa1111.c
- * by Durgesh Pattamatta <pattamattad@sharpsec.com>
- *
- * Modified for PNX8550 from ohci-sa1111.c and sa-omap.c
- * by Vitaly Wool <vitalywool@gmail.com>
- *
- * This file is licenced under the GPL.
- */
-
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <asm/mach-pnx8550/usb.h>
-#include <asm/mach-pnx8550/int.h>
-#include <asm/mach-pnx8550/pci.h>
-
-#ifndef CONFIG_PNX8550
-#error "This file is PNX8550 bus glue. CONFIG_PNX8550 must be defined."
-#endif
-
-extern int usb_disabled(void);
-
-/*-------------------------------------------------------------------------*/
-
-static void pnx8550_start_hc(struct platform_device *dev)
-{
- /*
- * Set register CLK48CTL to enable and 48MHz
- */
- outl(0x00000003, PCI_BASE | 0x0004770c);
-
- /*
- * Set register CLK12CTL to enable and 48MHz
- */
- outl(0x00000003, PCI_BASE | 0x00047710);
-
- udelay(100);
-}
-
-static void pnx8550_stop_hc(struct platform_device *dev)
-{
- udelay(10);
-}
-
-
-/*-------------------------------------------------------------------------*/
-
-/* configure so an HC device and id are always provided */
-/* always called with process context; sleeping is OK */
-
-
-/**
- * usb_hcd_pnx8550_probe - initialize pnx8550-based HCDs
- * Context: !in_interrupt()
- *
- * Allocates basic resources for this USB host controller, and
- * then invokes the start() method for the HCD associated with it
- * through the hotplug entry's driver_data.
- *
- */
-int usb_hcd_pnx8550_probe (const struct hc_driver *driver,
- struct platform_device *dev)
-{
- int retval;
- struct usb_hcd *hcd;
-
- if (dev->resource[0].flags != IORESOURCE_MEM ||
- dev->resource[1].flags != IORESOURCE_IRQ) {
- dev_err (&dev->dev,"invalid resource type\n");
- return -ENOMEM;
- }
-
- hcd = usb_create_hcd (driver, &dev->dev, "pnx8550");
- if (!hcd)
- return -ENOMEM;
- hcd->rsrc_start = dev->resource[0].start;
- hcd->rsrc_len = dev->resource[0].end - dev->resource[0].start + 1;
-
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- dev_err(&dev->dev, "request_mem_region [0x%08llx, 0x%08llx] "
- "failed\n", hcd->rsrc_start, hcd->rsrc_len);
- retval = -EBUSY;
- goto err1;
- }
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- dev_err(&dev->dev, "ioremap [[0x%08llx, 0x%08llx] failed\n",
- hcd->rsrc_start, hcd->rsrc_len);
- retval = -ENOMEM;
- goto err2;
- }
-
- pnx8550_start_hc(dev);
-
- ohci_hcd_init(hcd_to_ohci(hcd));
-
- retval = usb_add_hcd(hcd, dev->resource[1].start, IRQF_DISABLED);
- if (retval == 0)
- return retval;
-
- pnx8550_stop_hc(dev);
- iounmap(hcd->regs);
- err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- err1:
- usb_put_hcd(hcd);
- return retval;
-}
-
-
-/* may be called without controller electrically present */
-/* may be called with controller, bus, and devices active */
-
-/**
- * usb_hcd_pnx8550_remove - shutdown processing for pnx8550-based HCDs
- * @dev: USB Host Controller being removed
- * Context: !in_interrupt()
- *
- * Reverses the effect of usb_hcd_pnx8550_probe(), first invoking
- * the HCD's stop() method. It is always called from a thread
- * context, normally "rmmod", "apmd", or something similar.
- *
- */
-void usb_hcd_pnx8550_remove (struct usb_hcd *hcd, struct platform_device *dev)
-{
- usb_remove_hcd(hcd);
- pnx8550_stop_hc(dev);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
-}
-
-/*-------------------------------------------------------------------------*/
-
-static int __devinit
-ohci_pnx8550_start (struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- int ret;
-
- ohci_dbg (ohci, "ohci_pnx8550_start, ohci:%p", ohci);
-
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
-
- if ((ret = ohci_run (ohci)) < 0) {
- err ("can't start %s", hcd->self.bus_name);
- ohci_stop (hcd);
- return ret;
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static const struct hc_driver ohci_pnx8550_hc_driver = {
- .description = hcd_name,
- .product_desc = "PNX8550 OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .start = ohci_pnx8550_start,
- .stop = ohci_stop,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-/*-------------------------------------------------------------------------*/
-
-static int ohci_hcd_pnx8550_drv_probe(struct platform_device *pdev)
-{
- int ret;
-
- if (usb_disabled())
- return -ENODEV;
-
- ret = usb_hcd_pnx8550_probe(&ohci_pnx8550_hc_driver, pdev);
- return ret;
-}
-
-static int ohci_hcd_pnx8550_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_hcd_pnx8550_remove(hcd, pdev);
- return 0;
-}
-
-MODULE_ALIAS("platform:pnx8550-ohci");
-
-static struct platform_driver ohci_hcd_pnx8550_driver = {
- .driver = {
- .name = "pnx8550-ohci",
- .owner = THIS_MODULE,
- },
- .probe = ohci_hcd_pnx8550_drv_probe,
- .remove = ohci_hcd_pnx8550_drv_remove,
-};
-
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index b2c2dbf0876..965e3e9e688 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -14,12 +14,14 @@
*/
#include <linux/signal.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/prom.h>
-static int __devinit
+static int
ohci_ppc_of_start(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
@@ -29,7 +31,8 @@ ohci_ppc_of_start(struct usb_hcd *hcd)
return ret;
if ((ret = ohci_run(ohci)) < 0) {
- err("can't start %s", ohci_to_hcd(ohci)->self.bus_name);
+ dev_err(hcd->self.controller, "can't start %s\n",
+ hcd->self.bus_name);
ohci_stop(hcd);
return ret;
}
@@ -80,8 +83,7 @@ static const struct hc_driver ohci_ppc_of_hc_driver = {
};
-static int __devinit
-ohci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *match)
+static int ohci_hcd_ppc_of_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
struct usb_hcd *hcd;
@@ -111,26 +113,20 @@ ohci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *mat
return -ENOMEM;
hcd->rsrc_start = res.start;
- hcd->rsrc_len = res.end - res.start + 1;
+ hcd->rsrc_len = resource_size(&res);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
- rv = -EBUSY;
+ hcd->regs = devm_ioremap_resource(&op->dev, &res);
+ if (IS_ERR(hcd->regs)) {
+ rv = PTR_ERR(hcd->regs);
goto err_rmr;
}
irq = irq_of_parse_and_map(dn, 0);
if (irq == NO_IRQ) {
- printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
+ dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
+ __FILE__);
rv = -EBUSY;
- goto err_irq;
- }
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
- rv = -ENOMEM;
- goto err_ioremap;
+ goto err_rmr;
}
ohci = hcd_to_ohci(hcd);
@@ -144,9 +140,11 @@ ohci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *mat
ohci_hcd_init(ohci);
- rv = usb_add_hcd(hcd, irq, IRQF_DISABLED);
- if (rv == 0)
+ rv = usb_add_hcd(hcd, irq, 0);
+ if (rv == 0) {
+ device_wakeup_enable(hcd->self.controller);
return 0;
+ }
/* by now, 440epx is known to show usb_23 erratum */
np = of_find_compatible_node(NULL, NULL, "ibm,usb-ehci-440epx");
@@ -172,11 +170,7 @@ ohci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *mat
pr_debug("%s: cannot get ehci offset from fdt\n", __FILE__);
}
- iounmap(hcd->regs);
-err_ioremap:
irq_dispose_mapping(irq);
-err_irq:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err_rmr:
usb_put_hcd(hcd);
@@ -185,33 +179,19 @@ err_rmr:
static int ohci_hcd_ppc_of_remove(struct platform_device *op)
{
- struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
- dev_set_drvdata(&op->dev, NULL);
+ struct usb_hcd *hcd = platform_get_drvdata(op);
dev_dbg(&op->dev, "stopping PPC-OF USB Controller\n");
usb_remove_hcd(hcd);
- iounmap(hcd->regs);
irq_dispose_mapping(hcd->irq);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
return 0;
}
-static int ohci_hcd_ppc_of_shutdown(struct platform_device *op)
-{
- struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
-
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
-
- return 0;
-}
-
-
static const struct of_device_id ohci_hcd_ppc_of_match[] = {
#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_BE
{
@@ -239,14 +219,14 @@ MODULE_DEVICE_TABLE(of, ohci_hcd_ppc_of_match);
#if !defined(CONFIG_USB_OHCI_HCD_PPC_OF_BE) && \
!defined(CONFIG_USB_OHCI_HCD_PPC_OF_LE)
-#error "No endianess selected for ppc-of-ohci"
+#error "No endianness selected for ppc-of-ohci"
#endif
-static struct of_platform_driver ohci_hcd_ppc_of_driver = {
+static struct platform_driver ohci_hcd_ppc_of_driver = {
.probe = ohci_hcd_ppc_of_probe,
.remove = ohci_hcd_ppc_of_remove,
- .shutdown = ohci_hcd_ppc_of_shutdown,
+ .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ppc-of-ohci",
.owner = THIS_MODULE,
diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c
deleted file mode 100644
index 89e670e38c1..00000000000
--- a/drivers/usb/host/ohci-ppc-soc.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * OHCI HCD (Host Controller Driver) for USB.
- *
- * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
- * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
- * (C) Copyright 2002 Hewlett-Packard Company
- * (C) Copyright 2003-2005 MontaVista Software Inc.
- *
- * Bus Glue for PPC On-Chip OHCI driver
- * Tested on Freescale MPC5200 and IBM STB04xxx
- *
- * Modified by Dale Farnsworth <dale@farnsworth.org> from ohci-sa1111.c
- *
- * This file is licenced under the GPL.
- */
-
-#include <linux/platform_device.h>
-#include <linux/signal.h>
-
-/* configure so an HC device and id are always provided */
-/* always called with process context; sleeping is OK */
-
-/**
- * usb_hcd_ppc_soc_probe - initialize On-Chip HCDs
- * Context: !in_interrupt()
- *
- * Allocates basic resources for this USB host controller.
- *
- * Store this function in the HCD's struct pci_driver as probe().
- */
-static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver,
- struct platform_device *pdev)
-{
- int retval;
- struct usb_hcd *hcd;
- struct ohci_hcd *ohci;
- struct resource *res;
- int irq;
-
- pr_debug("initializing PPC-SOC USB Controller\n");
-
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- pr_debug("%s: no irq\n", __FILE__);
- return -ENODEV;
- }
- irq = res->start;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- pr_debug("%s: no reg addr\n", __FILE__);
- return -ENODEV;
- }
-
- hcd = usb_create_hcd(driver, &pdev->dev, "PPC-SOC USB");
- if (!hcd)
- return -ENOMEM;
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
-
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- pr_debug("%s: request_mem_region failed\n", __FILE__);
- retval = -EBUSY;
- goto err1;
- }
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- pr_debug("%s: ioremap failed\n", __FILE__);
- retval = -ENOMEM;
- goto err2;
- }
-
- ohci = hcd_to_ohci(hcd);
- ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC;
-
-#ifdef CONFIG_PPC_MPC52xx
- /* MPC52xx doesn't need frame_no shift */
- ohci->flags |= OHCI_QUIRK_FRAME_NO;
-#endif
- ohci_hcd_init(ohci);
-
- retval = usb_add_hcd(hcd, irq, IRQF_DISABLED);
- if (retval == 0)
- return retval;
-
- pr_debug("Removing PPC-SOC USB Controller\n");
-
- iounmap(hcd->regs);
- err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- err1:
- usb_put_hcd(hcd);
- return retval;
-}
-
-
-/* may be called without controller electrically present */
-/* may be called with controller, bus, and devices active */
-
-/**
- * usb_hcd_ppc_soc_remove - shutdown processing for On-Chip HCDs
- * @pdev: USB Host Controller being removed
- * Context: !in_interrupt()
- *
- * Reverses the effect of usb_hcd_ppc_soc_probe().
- * It is always called from a thread
- * context, normally "rmmod", "apmd", or something similar.
- *
- */
-static void usb_hcd_ppc_soc_remove(struct usb_hcd *hcd,
- struct platform_device *pdev)
-{
- usb_remove_hcd(hcd);
-
- pr_debug("stopping PPC-SOC USB Controller\n");
-
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
-}
-
-static int __devinit
-ohci_ppc_soc_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int ret;
-
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
-
- if ((ret = ohci_run(ohci)) < 0) {
- err("can't start %s", ohci_to_hcd(ohci)->self.bus_name);
- ohci_stop(hcd);
- return ret;
- }
-
- return 0;
-}
-
-static const struct hc_driver ohci_ppc_soc_hc_driver = {
- .description = hcd_name,
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .start = ohci_ppc_soc_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-static int ohci_hcd_ppc_soc_drv_probe(struct platform_device *pdev)
-{
- int ret;
-
- if (usb_disabled())
- return -ENODEV;
-
- ret = usb_hcd_ppc_soc_probe(&ohci_ppc_soc_hc_driver, pdev);
- return ret;
-}
-
-static int ohci_hcd_ppc_soc_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_hcd_ppc_soc_remove(hcd, pdev);
- return 0;
-}
-
-static struct platform_driver ohci_hcd_ppc_soc_driver = {
- .probe = ohci_hcd_ppc_soc_drv_probe,
- .remove = ohci_hcd_ppc_soc_drv_remove,
- .shutdown = usb_hcd_platform_shutdown,
-#ifdef CONFIG_PM
- /*.suspend = ohci_hcd_ppc_soc_drv_suspend,*/
- /*.resume = ohci_hcd_ppc_soc_drv_resume,*/
-#endif
- .driver = {
- .name = "ppc-soc-ohci",
- .owner = THIS_MODULE,
- },
-};
-
-MODULE_ALIAS("platform:ppc-soc-ohci");
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c
index 700950455f4..71d8bc4c27f 100644
--- a/drivers/usb/host/ohci-ps3.c
+++ b/drivers/usb/host/ohci-ps3.c
@@ -30,7 +30,7 @@ static int ps3_ohci_hc_reset(struct usb_hcd *hcd)
return ohci_init(ohci);
}
-static int __devinit ps3_ohci_hc_start(struct usb_hcd *hcd)
+static int ps3_ohci_hc_start(struct usb_hcd *hcd)
{
int result;
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
@@ -45,7 +45,8 @@ static int __devinit ps3_ohci_hc_start(struct usb_hcd *hcd)
result = ohci_run(ohci);
if (result < 0) {
- err("can't start %s", hcd->self.bus_name);
+ dev_err(hcd->self.controller, "can't start %s\n",
+ hcd->self.bus_name);
ohci_stop(hcd);
}
@@ -75,7 +76,7 @@ static const struct hc_driver ps3_ohci_hc_driver = {
#endif
};
-static int __devinit ps3_ohci_probe(struct ps3_system_bus_device *dev)
+static int ps3_ohci_probe(struct ps3_system_bus_device *dev)
{
int result;
struct usb_hcd *hcd;
@@ -164,7 +165,7 @@ static int __devinit ps3_ohci_probe(struct ps3_system_bus_device *dev)
ps3_system_bus_set_drvdata(dev, hcd);
- result = usb_add_hcd(hcd, virq, IRQF_DISABLED);
+ result = usb_add_hcd(hcd, virq, 0);
if (result) {
dev_dbg(&dev->core, "%s:%d: usb_add_hcd failed (%d)\n",
@@ -172,6 +173,7 @@ static int __devinit ps3_ohci_probe(struct ps3_system_bus_device *dev)
goto fail_add_hcd;
}
+ device_wakeup_enable(hcd->self.controller);
return result;
fail_add_hcd:
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index afef7b0a419..e68f3d02cd1 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -19,12 +19,28 @@
* This file is licenced under the GPL.
*/
+#include <linux/clk.h>
#include <linux/device.h>
-#include <linux/signal.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_data/usb-ohci-pxa27x.h>
+#include <linux/platform_data/usb-pxa3xx-ulpi.h>
#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <mach/ohci.h>
-#include <mach/pxa3xx-u2d.h>
+#include <linux/regulator/consumer.h>
+#include <linux/signal.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
+
+#include <mach/hardware.h>
+
+#include "ohci.h"
+
+#define DRIVER_DESC "OHCI PXA27x/PXA3x driver"
/*
* UHC: USB Host Controller (OHCI-like) register definitions
@@ -98,16 +114,18 @@
#define PXA_UHC_MAX_PORTNUM 3
-struct pxa27x_ohci {
- /* must be 1st member here for hcd_to_ohci() to work */
- struct ohci_hcd ohci;
+static const char hcd_name[] = "ohci-pxa27x";
+
+static struct hc_driver __read_mostly ohci_pxa27x_hc_driver;
- struct device *dev;
+struct pxa27x_ohci {
struct clk *clk;
void __iomem *mmio_base;
+ struct regulator *vbus[3];
+ bool vbus_enabled[3];
};
-#define to_pxa27x_ohci(hcd) (struct pxa27x_ohci *)hcd_to_ohci(hcd)
+#define to_pxa27x_ohci(hcd) (struct pxa27x_ohci *)(hcd_to_ohci(hcd)->priv)
/*
PMM_NPS_MODE -- PMM Non-power switching mode
@@ -119,10 +137,10 @@ struct pxa27x_ohci {
PMM_PERPORT_MODE -- PMM per port switching mode
Ports are powered individually.
*/
-static int pxa27x_ohci_select_pmm(struct pxa27x_ohci *ohci, int mode)
+static int pxa27x_ohci_select_pmm(struct pxa27x_ohci *pxa_ohci, int mode)
{
- uint32_t uhcrhda = __raw_readl(ohci->mmio_base + UHCRHDA);
- uint32_t uhcrhdb = __raw_readl(ohci->mmio_base + UHCRHDB);
+ uint32_t uhcrhda = __raw_readl(pxa_ohci->mmio_base + UHCRHDA);
+ uint32_t uhcrhdb = __raw_readl(pxa_ohci->mmio_base + UHCRHDB);
switch (mode) {
case PMM_NPS_MODE:
@@ -146,20 +164,64 @@ static int pxa27x_ohci_select_pmm(struct pxa27x_ohci *ohci, int mode)
uhcrhda |= RH_A_NPS;
}
- __raw_writel(uhcrhda, ohci->mmio_base + UHCRHDA);
- __raw_writel(uhcrhdb, ohci->mmio_base + UHCRHDB);
+ __raw_writel(uhcrhda, pxa_ohci->mmio_base + UHCRHDA);
+ __raw_writel(uhcrhdb, pxa_ohci->mmio_base + UHCRHDB);
return 0;
}
-extern int usb_disabled(void);
+static int pxa27x_ohci_set_vbus_power(struct pxa27x_ohci *pxa_ohci,
+ unsigned int port, bool enable)
+{
+ struct regulator *vbus = pxa_ohci->vbus[port];
+ int ret = 0;
+ if (IS_ERR_OR_NULL(vbus))
+ return 0;
+
+ if (enable && !pxa_ohci->vbus_enabled[port])
+ ret = regulator_enable(vbus);
+ else if (!enable && pxa_ohci->vbus_enabled[port])
+ ret = regulator_disable(vbus);
+
+ if (ret < 0)
+ return ret;
+
+ pxa_ohci->vbus_enabled[port] = enable;
+
+ return 0;
+}
+
+static int pxa27x_ohci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
+ int ret;
+
+ switch (typeReq) {
+ case SetPortFeature:
+ case ClearPortFeature:
+ if (!wIndex || wIndex > 3)
+ return -EPIPE;
+
+ if (wValue != USB_PORT_FEAT_POWER)
+ break;
+
+ ret = pxa27x_ohci_set_vbus_power(pxa_ohci, wIndex - 1,
+ typeReq == SetPortFeature);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+}
/*-------------------------------------------------------------------------*/
-static inline void pxa27x_setup_hc(struct pxa27x_ohci *ohci,
+static inline void pxa27x_setup_hc(struct pxa27x_ohci *pxa_ohci,
struct pxaohci_platform_data *inf)
{
- uint32_t uhchr = __raw_readl(ohci->mmio_base + UHCHR);
- uint32_t uhcrhda = __raw_readl(ohci->mmio_base + UHCRHDA);
+ uint32_t uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR);
+ uint32_t uhcrhda = __raw_readl(pxa_ohci->mmio_base + UHCRHDA);
if (inf->flags & ENABLE_PORT1)
uhchr &= ~UHCHR_SSEP1;
@@ -191,17 +253,17 @@ static inline void pxa27x_setup_hc(struct pxa27x_ohci *ohci,
uhcrhda |= UHCRHDA_POTPGT(inf->power_on_delay / 2);
}
- __raw_writel(uhchr, ohci->mmio_base + UHCHR);
- __raw_writel(uhcrhda, ohci->mmio_base + UHCRHDA);
+ __raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR);
+ __raw_writel(uhcrhda, pxa_ohci->mmio_base + UHCRHDA);
}
-static inline void pxa27x_reset_hc(struct pxa27x_ohci *ohci)
+static inline void pxa27x_reset_hc(struct pxa27x_ohci *pxa_ohci)
{
- uint32_t uhchr = __raw_readl(ohci->mmio_base + UHCHR);
+ uint32_t uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR);
- __raw_writel(uhchr | UHCHR_FHR, ohci->mmio_base + UHCHR);
+ __raw_writel(uhchr | UHCHR_FHR, pxa_ohci->mmio_base + UHCHR);
udelay(11);
- __raw_writel(uhchr & ~UHCHR_FHR, ohci->mmio_base + UHCHR);
+ __raw_writel(uhchr & ~UHCHR_FHR, pxa_ohci->mmio_base + UHCHR);
}
#ifdef CONFIG_PXA27x
@@ -210,25 +272,26 @@ extern void pxa27x_clear_otgph(void);
#define pxa27x_clear_otgph() do {} while (0)
#endif
-static int pxa27x_start_hc(struct pxa27x_ohci *ohci, struct device *dev)
+static int pxa27x_start_hc(struct pxa27x_ohci *pxa_ohci, struct device *dev)
{
int retval = 0;
struct pxaohci_platform_data *inf;
uint32_t uhchr;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
- inf = dev->platform_data;
+ inf = dev_get_platdata(dev);
- clk_enable(ohci->clk);
+ clk_prepare_enable(pxa_ohci->clk);
- pxa27x_reset_hc(ohci);
+ pxa27x_reset_hc(pxa_ohci);
- uhchr = __raw_readl(ohci->mmio_base + UHCHR) | UHCHR_FSBIR;
- __raw_writel(uhchr, ohci->mmio_base + UHCHR);
+ uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR) | UHCHR_FSBIR;
+ __raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR);
- while (__raw_readl(ohci->mmio_base + UHCHR) & UHCHR_FSBIR)
+ while (__raw_readl(pxa_ohci->mmio_base + UHCHR) & UHCHR_FSBIR)
cpu_relax();
- pxa27x_setup_hc(ohci, inf);
+ pxa27x_setup_hc(pxa_ohci, inf);
if (inf->init)
retval = inf->init(dev);
@@ -237,40 +300,102 @@ static int pxa27x_start_hc(struct pxa27x_ohci *ohci, struct device *dev)
return retval;
if (cpu_is_pxa3xx())
- pxa3xx_u2d_start_hc(&ohci_to_hcd(&ohci->ohci)->self);
+ pxa3xx_u2d_start_hc(&hcd->self);
- uhchr = __raw_readl(ohci->mmio_base + UHCHR) & ~UHCHR_SSE;
- __raw_writel(uhchr, ohci->mmio_base + UHCHR);
- __raw_writel(UHCHIE_UPRIE | UHCHIE_RWIE, ohci->mmio_base + UHCHIE);
+ uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR) & ~UHCHR_SSE;
+ __raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR);
+ __raw_writel(UHCHIE_UPRIE | UHCHIE_RWIE, pxa_ohci->mmio_base + UHCHIE);
/* Clear any OTG Pin Hold */
pxa27x_clear_otgph();
return 0;
}
-static void pxa27x_stop_hc(struct pxa27x_ohci *ohci, struct device *dev)
+static void pxa27x_stop_hc(struct pxa27x_ohci *pxa_ohci, struct device *dev)
{
struct pxaohci_platform_data *inf;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
uint32_t uhccoms;
- inf = dev->platform_data;
+ inf = dev_get_platdata(dev);
if (cpu_is_pxa3xx())
- pxa3xx_u2d_stop_hc(&ohci_to_hcd(&ohci->ohci)->self);
+ pxa3xx_u2d_stop_hc(&hcd->self);
if (inf->exit)
inf->exit(dev);
- pxa27x_reset_hc(ohci);
+ pxa27x_reset_hc(pxa_ohci);
/* Host Controller Reset */
- uhccoms = __raw_readl(ohci->mmio_base + UHCCOMS) | 0x01;
- __raw_writel(uhccoms, ohci->mmio_base + UHCCOMS);
+ uhccoms = __raw_readl(pxa_ohci->mmio_base + UHCCOMS) | 0x01;
+ __raw_writel(uhccoms, pxa_ohci->mmio_base + UHCCOMS);
udelay(10);
- clk_disable(ohci->clk);
+ clk_disable_unprepare(pxa_ohci->clk);
}
+#ifdef CONFIG_OF
+static const struct of_device_id pxa_ohci_dt_ids[] = {
+ { .compatible = "marvell,pxa-ohci" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, pxa_ohci_dt_ids);
+
+static int ohci_pxa_of_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct pxaohci_platform_data *pdata;
+ u32 tmp;
+ int ret;
+
+ if (!np)
+ return 0;
+
+ /* Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ if (of_get_property(np, "marvell,enable-port1", NULL))
+ pdata->flags |= ENABLE_PORT1;
+ if (of_get_property(np, "marvell,enable-port2", NULL))
+ pdata->flags |= ENABLE_PORT2;
+ if (of_get_property(np, "marvell,enable-port3", NULL))
+ pdata->flags |= ENABLE_PORT3;
+ if (of_get_property(np, "marvell,port-sense-low", NULL))
+ pdata->flags |= POWER_SENSE_LOW;
+ if (of_get_property(np, "marvell,power-control-low", NULL))
+ pdata->flags |= POWER_CONTROL_LOW;
+ if (of_get_property(np, "marvell,no-oc-protection", NULL))
+ pdata->flags |= NO_OC_PROTECTION;
+ if (of_get_property(np, "marvell,oc-mode-perport", NULL))
+ pdata->flags |= OC_MODE_PERPORT;
+ if (!of_property_read_u32(np, "marvell,power-on-delay", &tmp))
+ pdata->power_on_delay = tmp;
+ if (!of_property_read_u32(np, "marvell,port-mode", &tmp))
+ pdata->port_mode = tmp;
+ if (!of_property_read_u32(np, "marvell,power-budget", &tmp))
+ pdata->power_budget = tmp;
+
+ pdev->dev.platform_data = pdata;
+
+ return 0;
+}
+#else
+static int ohci_pxa_of_init(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
/*-------------------------------------------------------------------------*/
@@ -292,11 +417,17 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
int retval, irq;
struct usb_hcd *hcd;
struct pxaohci_platform_data *inf;
- struct pxa27x_ohci *ohci;
+ struct pxa27x_ohci *pxa_ohci;
+ struct ohci_hcd *ohci;
struct resource *r;
struct clk *usb_clk;
+ unsigned int i;
+
+ retval = ohci_pxa_of_init(pdev);
+ if (retval)
+ return retval;
- inf = pdev->dev.platform_data;
+ inf = dev_get_platdata(&pdev->dev);
if (!inf)
return -ENODEV;
@@ -307,7 +438,7 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
return -ENXIO;
}
- usb_clk = clk_get(&pdev->dev, NULL);
+ usb_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usb_clk))
return PTR_ERR(usb_clk);
@@ -319,56 +450,58 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
if (!r) {
pr_err("no resource of IORESOURCE_MEM");
retval = -ENXIO;
- goto err1;
+ goto err;
}
hcd->rsrc_start = r->start;
hcd->rsrc_len = resource_size(r);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- pr_debug("request_mem_region failed");
- retval = -EBUSY;
- goto err1;
- }
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- pr_debug("ioremap failed");
- retval = -ENOMEM;
- goto err2;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
+ goto err;
}
/* initialize "struct pxa27x_ohci" */
- ohci = (struct pxa27x_ohci *)hcd_to_ohci(hcd);
- ohci->dev = &pdev->dev;
- ohci->clk = usb_clk;
- ohci->mmio_base = (void __iomem *)hcd->regs;
+ pxa_ohci = to_pxa27x_ohci(hcd);
+ pxa_ohci->clk = usb_clk;
+ pxa_ohci->mmio_base = (void __iomem *)hcd->regs;
+
+ for (i = 0; i < 3; ++i) {
+ char name[6];
+
+ if (!(inf->flags & (ENABLE_PORT1 << i)))
+ continue;
+
+ sprintf(name, "vbus%u", i + 1);
+ pxa_ohci->vbus[i] = devm_regulator_get(&pdev->dev, name);
+ }
- if ((retval = pxa27x_start_hc(ohci, &pdev->dev)) < 0) {
+ retval = pxa27x_start_hc(pxa_ohci, &pdev->dev);
+ if (retval < 0) {
pr_debug("pxa27x_start_hc failed");
- goto err3;
+ goto err;
}
/* Select Power Management Mode */
- pxa27x_ohci_select_pmm(ohci, inf->port_mode);
+ pxa27x_ohci_select_pmm(pxa_ohci, inf->port_mode);
if (inf->power_budget)
hcd->power_budget = inf->power_budget;
- ohci_hcd_init(hcd_to_ohci(hcd));
+ /* The value of NDP in roothub_a is incorrect on this hardware */
+ ohci = hcd_to_ohci(hcd);
+ ohci->num_ports = 3;
- retval = usb_add_hcd(hcd, irq, IRQF_DISABLED);
- if (retval == 0)
+ retval = usb_add_hcd(hcd, irq, 0);
+ if (retval == 0) {
+ device_wakeup_enable(hcd->self.controller);
return retval;
+ }
- pxa27x_stop_hc(ohci, &pdev->dev);
- err3:
- iounmap(hcd->regs);
- err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- err1:
+ pxa27x_stop_hc(pxa_ohci, &pdev->dev);
+ err:
usb_put_hcd(hcd);
- clk_put(usb_clk);
return retval;
}
@@ -388,87 +521,20 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
*/
void usb_hcd_pxa27x_remove (struct usb_hcd *hcd, struct platform_device *pdev)
{
- struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd);
+ struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
+ unsigned int i;
usb_remove_hcd(hcd);
- pxa27x_stop_hc(ohci, &pdev->dev);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
- clk_put(ohci->clk);
-}
-
-/*-------------------------------------------------------------------------*/
+ pxa27x_stop_hc(pxa_ohci, &pdev->dev);
-static int __devinit
-ohci_pxa27x_start (struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- int ret;
-
- ohci_dbg (ohci, "ohci_pxa27x_start, ohci:%p", ohci);
-
- /* The value of NDP in roothub_a is incorrect on this hardware */
- ohci->num_ports = 3;
-
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
+ for (i = 0; i < 3; ++i)
+ pxa27x_ohci_set_vbus_power(pxa_ohci, i, false);
- if ((ret = ohci_run (ohci)) < 0) {
- err ("can't start %s", hcd->self.bus_name);
- ohci_stop (hcd);
- return ret;
- }
-
- return 0;
+ usb_put_hcd(hcd);
}
/*-------------------------------------------------------------------------*/
-static const struct hc_driver ohci_pxa27x_hc_driver = {
- .description = hcd_name,
- .product_desc = "PXA27x OHCI",
- .hcd_priv_size = sizeof(struct pxa27x_ohci),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .start = ohci_pxa27x_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-/*-------------------------------------------------------------------------*/
-
static int ohci_hcd_pxa27x_drv_probe(struct platform_device *pdev)
{
pr_debug ("In ohci_hcd_pxa27x_drv_probe");
@@ -484,7 +550,6 @@ static int ohci_hcd_pxa27x_drv_remove(struct platform_device *pdev)
struct usb_hcd *hcd = platform_get_drvdata(pdev);
usb_hcd_pxa27x_remove(hcd, pdev);
- platform_set_drvdata(pdev, NULL);
return 0;
}
@@ -492,36 +557,44 @@ static int ohci_hcd_pxa27x_drv_remove(struct platform_device *pdev)
static int ohci_hcd_pxa27x_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd);
+ struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ bool do_wakeup = device_may_wakeup(dev);
+ int ret;
- if (time_before(jiffies, ohci->ohci.next_statechange))
+
+ if (time_before(jiffies, ohci->next_statechange))
msleep(5);
- ohci->ohci.next_statechange = jiffies;
+ ohci->next_statechange = jiffies;
- pxa27x_stop_hc(ohci, dev);
- hcd->state = HC_STATE_SUSPENDED;
+ ret = ohci_suspend(hcd, do_wakeup);
+ if (ret)
+ return ret;
- return 0;
+ pxa27x_stop_hc(pxa_ohci, dev);
+ return ret;
}
static int ohci_hcd_pxa27x_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd);
- struct pxaohci_platform_data *inf = dev->platform_data;
+ struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
+ struct pxaohci_platform_data *inf = dev_get_platdata(dev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int status;
- if (time_before(jiffies, ohci->ohci.next_statechange))
+ if (time_before(jiffies, ohci->next_statechange))
msleep(5);
- ohci->ohci.next_statechange = jiffies;
+ ohci->next_statechange = jiffies;
- if ((status = pxa27x_start_hc(ohci, dev)) < 0)
+ status = pxa27x_start_hc(pxa_ohci, dev);
+ if (status < 0)
return status;
/* Select Power Management Mode */
- pxa27x_ohci_select_pmm(ohci, inf->port_mode);
+ pxa27x_ohci_select_pmm(pxa_ohci, inf->port_mode);
- ohci_finish_controller_resume(hcd);
+ ohci_resume(hcd, false);
return 0;
}
@@ -531,9 +604,6 @@ static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = {
};
#endif
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:pxa27x-ohci");
-
static struct platform_driver ohci_hcd_pxa27x_driver = {
.probe = ohci_hcd_pxa27x_drv_probe,
.remove = ohci_hcd_pxa27x_drv_remove,
@@ -541,9 +611,37 @@ static struct platform_driver ohci_hcd_pxa27x_driver = {
.driver = {
.name = "pxa27x-ohci",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pxa_ohci_dt_ids),
#ifdef CONFIG_PM
.pm = &ohci_hcd_pxa27x_pm_ops,
#endif
},
};
+static const struct ohci_driver_overrides pxa27x_overrides __initconst = {
+ .extra_priv_size = sizeof(struct pxa27x_ohci),
+};
+
+static int __init ohci_pxa27x_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ohci_init_driver(&ohci_pxa27x_hc_driver, &pxa27x_overrides);
+ ohci_pxa27x_hc_driver.hub_control = pxa27x_ohci_hub_control;
+
+ return platform_driver_register(&ohci_hcd_pxa27x_driver);
+}
+module_init(ohci_pxa27x_init);
+
+static void __exit ohci_pxa27x_cleanup(void)
+{
+ platform_driver_unregister(&ohci_hcd_pxa27x_driver);
+}
+module_exit(ohci_pxa27x_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa27x-ohci");
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 83094d067e0..d4253e31942 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -41,9 +41,15 @@ finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
__releases(ohci->lock)
__acquires(ohci->lock)
{
+ struct device *dev = ohci_to_hcd(ohci)->self.controller;
+ struct usb_host_endpoint *ep = urb->ep;
+ struct urb_priv *urb_priv;
+
// ASSERT (urb->hcpriv != 0);
+ restart:
urb_free_priv (ohci, urb->hcpriv);
+ urb->hcpriv = NULL;
if (likely(status == -EINPROGRESS))
status = 0;
@@ -52,9 +58,9 @@ __acquires(ohci->lock)
ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
if (quirk_amdiso(ohci))
- quirk_amd_pll(1);
+ usb_amd_quirk_pll_enable();
if (quirk_amdprefetch(ohci))
- sb800_prefetch(ohci, 0);
+ sb800_prefetch(dev, 0);
}
break;
case PIPE_INTERRUPT:
@@ -62,10 +68,6 @@ __acquires(ohci->lock)
break;
}
-#ifdef OHCI_VERBOSE_DEBUG
- urb_print(urb, "RET", usb_pipeout (urb->pipe), status);
-#endif
-
/* urb->complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb);
spin_unlock (&ohci->lock);
@@ -78,6 +80,21 @@ __acquires(ohci->lock)
ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
}
+
+ /*
+ * An isochronous URB that is sumitted too late won't have any TDs
+ * (marked by the fact that the td_cnt value is larger than the
+ * actual number of TDs). If the next URB on this endpoint is like
+ * that, give it back now.
+ */
+ if (!list_empty(&ep->urb_list)) {
+ urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
+ urb_priv = urb->hcpriv;
+ if (urb_priv->td_cnt > urb_priv->length) {
+ status = 0;
+ goto restart;
+ }
+ }
}
@@ -126,7 +143,7 @@ static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)
{
unsigned i;
- ohci_vdbg (ohci, "link %sed %p branch %d [%dus.], interval %d\n",
+ ohci_dbg(ohci, "link %sed %p branch %d [%dus.], interval %d\n",
(ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
ed, ed->branch, ed->load, ed->interval);
@@ -273,7 +290,7 @@ static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
}
ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval;
- ohci_vdbg (ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
+ ohci_dbg(ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
(ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
ed, ed->branch, ed->load, ed->interval);
}
@@ -428,7 +445,7 @@ static struct ed *ed_get (
ed->type = usb_pipetype(pipe);
info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << 7;
- info |= le16_to_cpu(ep->desc.wMaxPacketSize) << 16;
+ info |= usb_endpoint_maxp(&ep->desc) << 16;
if (udev->speed == USB_SPEED_LOW)
info |= ED_LOWSPEED;
/* only control transfers store pids in tds */
@@ -444,7 +461,7 @@ static struct ed *ed_get (
ed->load = usb_calc_bus_time (
udev->speed, !is_out,
ed->type == PIPE_ISOCHRONOUS,
- le16_to_cpu(ep->desc.wMaxPacketSize))
+ usb_endpoint_maxp(&ep->desc))
/ 1000;
}
}
@@ -544,7 +561,6 @@ td_fill (struct ohci_hcd *ohci, u32 info,
td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
*ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
(data & 0x0FFF) | 0xE000);
- td->ed->last_iso = info & 0xffff;
} else {
td->hwCBP = cpu_to_hc32 (ohci, data);
}
@@ -579,6 +595,7 @@ static void td_submit_urb (
struct urb *urb
) {
struct urb_priv *urb_priv = urb->hcpriv;
+ struct device *dev = ohci_to_hcd(ohci)->self.controller;
dma_addr_t data;
int data_len = urb->transfer_buffer_length;
int cnt = 0;
@@ -596,7 +613,6 @@ static void td_submit_urb (
urb_priv->ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_C);
}
- urb_priv->td_cnt = 0;
list_add (&urb_priv->pending, &ohci->pending);
if (data_len)
@@ -672,7 +688,8 @@ static void td_submit_urb (
* we could often reduce the number of TDs here.
*/
case PIPE_ISOCHRONOUS:
- for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
+ for (cnt = urb_priv->td_cnt; cnt < urb->number_of_packets;
+ cnt++) {
int frame = urb->start_frame;
// FIXME scheduling should handle frame counter
@@ -686,9 +703,9 @@ static void td_submit_urb (
}
if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
if (quirk_amdiso(ohci))
- quirk_amd_pll(0);
+ usb_amd_quirk_pll_disable();
if (quirk_amdprefetch(ohci))
- sb800_prefetch(ohci, 1);
+ sb800_prefetch(dev, 1);
}
periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
&& ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
@@ -744,7 +761,7 @@ static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td)
urb->iso_frame_desc [td->index].status = cc_to_error [cc];
if (cc != TD_CC_NOERROR)
- ohci_vdbg (ohci,
+ ohci_dbg(ohci,
"urb %p iso td %p (%d) len %d cc %d\n",
urb, td, 1 + td->index, dlen, cc);
@@ -776,7 +793,7 @@ static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td)
}
if (cc != TD_CC_NOERROR && cc < 0x0E)
- ohci_vdbg (ohci,
+ ohci_dbg(ohci,
"urb %p td %p (%d) cc %d, len=%d/%d\n",
urb, td, 1 + td->index, cc,
urb->actual_length,
@@ -912,7 +929,7 @@ rescan_all:
/* only take off EDs that the HC isn't using, accounting for
* frame counter wraps and EDs with partially retired TDs
*/
- if (likely (HC_IS_RUNNING(ohci_to_hcd(ohci)->state))) {
+ if (likely(ohci->rh_state == OHCI_RH_RUNNING)) {
if (tick_before (tick, ed->tick)) {
skip_ed:
last = &ed->ed_next;
@@ -993,7 +1010,7 @@ rescan_this:
urb_priv->td_cnt++;
/* if URB is done, clean up */
- if (urb_priv->td_cnt == urb_priv->length) {
+ if (urb_priv->td_cnt >= urb_priv->length) {
modified = completed = 1;
finish_urb(ohci, urb, 0);
}
@@ -1012,7 +1029,7 @@ rescan_this:
/* but if there's work queued, reschedule */
if (!list_empty (&ed->td_list)) {
- if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state))
+ if (ohci->rh_state == OHCI_RH_RUNNING)
ed_schedule (ohci, ed);
}
@@ -1021,9 +1038,7 @@ rescan_this:
}
/* maybe reenable control and bulk lists */
- if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state)
- && ohci_to_hcd(ohci)->state != HC_STATE_QUIESCING
- && !ohci->ed_rm_list) {
+ if (ohci->rh_state == OHCI_RH_RUNNING && !ohci->ed_rm_list) {
u32 command = 0, control = 0;
if (ohci->ed_controltail) {
@@ -1085,7 +1100,7 @@ static void takeback_td(struct ohci_hcd *ohci, struct td *td)
urb_priv->td_cnt++;
/* If all this urb's TDs are done, call complete() */
- if (urb_priv->td_cnt == urb_priv->length)
+ if (urb_priv->td_cnt >= urb_priv->length)
finish_urb(ohci, urb, status);
/* clean schedule: unlink EDs that are no longer busy */
@@ -1130,6 +1145,25 @@ dl_done_list (struct ohci_hcd *ohci)
while (td) {
struct td *td_next = td->next_dl_td;
+ struct ed *ed = td->ed;
+
+ /*
+ * Some OHCI controllers (NVIDIA for sure, maybe others)
+ * occasionally forget to add TDs to the done queue. Since
+ * TDs for a given endpoint are always processed in order,
+ * if we find a TD on the donelist then all of its
+ * predecessors must be finished as well.
+ */
+ for (;;) {
+ struct td *td2;
+
+ td2 = list_first_entry(&ed->td_list, struct td,
+ td_list);
+ if (td2 == td)
+ break;
+ takeback_td(ohci, td2);
+ }
+
takeback_td(ohci, td);
td = td_next;
}
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index a68af2dd55c..3d753a9d314 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -19,14 +19,27 @@
* This file is licenced under the GPL.
*/
-#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <plat/usb-control.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/usb-ohci-s3c2410.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
+
#define valid_port(idx) ((idx) == 1 || (idx) == 2)
/* clock device associated with the hcd */
+
+#define DRIVER_DESC "OHCI S3C2410 driver"
+
+static const char hcd_name[] = "ohci-s3c2410";
+
static struct clk *clk;
static struct clk *usb_clk;
@@ -38,33 +51,32 @@ static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc);
static struct s3c2410_hcd_info *to_s3c2410_info(struct usb_hcd *hcd)
{
- return hcd->self.controller->platform_data;
+ return dev_get_platdata(hcd->self.controller);
}
static void s3c2410_start_hc(struct platform_device *dev, struct usb_hcd *hcd)
{
- struct s3c2410_hcd_info *info = dev->dev.platform_data;
+ struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev);
dev_dbg(&dev->dev, "s3c2410_start_hc:\n");
- clk_enable(usb_clk);
+ clk_prepare_enable(usb_clk);
mdelay(2); /* let the bus clock stabilise */
- clk_enable(clk);
+ clk_prepare_enable(clk);
if (info != NULL) {
info->hcd = hcd;
info->report_oc = s3c2410_hcd_oc;
- if (info->enable_oc != NULL) {
+ if (info->enable_oc != NULL)
(info->enable_oc)(info, 1);
- }
}
}
static void s3c2410_stop_hc(struct platform_device *dev)
{
- struct s3c2410_hcd_info *info = dev->dev.platform_data;
+ struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev);
dev_dbg(&dev->dev, "s3c2410_stop_hc:\n");
@@ -72,13 +84,12 @@ static void s3c2410_stop_hc(struct platform_device *dev)
info->report_oc = NULL;
info->hcd = NULL;
- if (info->enable_oc != NULL) {
+ if (info->enable_oc != NULL)
(info->enable_oc)(info, 0);
- }
}
- clk_disable(clk);
- clk_disable(usb_clk);
+ clk_disable_unprepare(clk);
+ clk_disable_unprepare(usb_clk);
}
/* ohci_s3c2410_hub_status_data
@@ -88,14 +99,14 @@ static void s3c2410_stop_hc(struct platform_device *dev)
*/
static int
-ohci_s3c2410_hub_status_data (struct usb_hcd *hcd, char *buf)
+ohci_s3c2410_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct s3c2410_hcd_info *info = to_s3c2410_info(hcd);
struct s3c2410_hcd_port *port;
int orig;
int portno;
- orig = ohci_hub_status_data (hcd, buf);
+ orig = ohci_hub_status_data(hcd, buf);
if (info == NULL)
return orig;
@@ -145,7 +156,7 @@ static void s3c2410_usb_set_power(struct s3c2410_hcd_info *info,
* request.
*/
-static int ohci_s3c2410_hub_control (
+static int ohci_s3c2410_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
@@ -199,9 +210,8 @@ static int ohci_s3c2410_hub_control (
dev_dbg(hcd->self.controller,
"ClearPortFeature: OVER_CURRENT\n");
- if (valid_port(wIndex)) {
+ if (valid_port(wIndex))
info->port[wIndex-1].oc_status = 0;
- }
goto out;
@@ -242,8 +252,11 @@ static int ohci_s3c2410_hub_control (
desc->wHubCharacteristics |= cpu_to_le16(0x0001);
if (info->enable_oc) {
- desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_OCPM);
- desc->wHubCharacteristics |= cpu_to_le16(0x0008|0x0001);
+ desc->wHubCharacteristics &= ~cpu_to_le16(
+ HUB_CHAR_OCPM);
+ desc->wHubCharacteristics |= cpu_to_le16(
+ 0x0008 |
+ 0x0001);
}
dev_dbg(hcd->self.controller, "wHubCharacteristics after 0x%04x\n",
@@ -257,13 +270,11 @@ static int ohci_s3c2410_hub_control (
dev_dbg(hcd->self.controller, "GetPortStatus(%d)\n", wIndex);
if (valid_port(wIndex)) {
- if (info->port[wIndex-1].oc_changed) {
+ if (info->port[wIndex-1].oc_changed)
*data |= cpu_to_le32(RH_PS_OCIC);
- }
- if (info->port[wIndex-1].oc_status) {
+ if (info->port[wIndex-1].oc_status)
*data |= cpu_to_le32(RH_PS_POCI);
- }
}
}
@@ -321,12 +332,10 @@ static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc)
*/
static void
-usb_hcd_s3c2410_remove (struct usb_hcd *hcd, struct platform_device *dev)
+usb_hcd_s3c2410_remove(struct usb_hcd *hcd, struct platform_device *dev)
{
usb_remove_hcd(hcd);
s3c2410_stop_hc(dev);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
}
@@ -339,69 +348,54 @@ usb_hcd_s3c2410_remove (struct usb_hcd *hcd, struct platform_device *dev)
* through the hotplug entry's driver_data.
*
*/
-static int usb_hcd_s3c2410_probe (const struct hc_driver *driver,
+static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
struct platform_device *dev)
{
struct usb_hcd *hcd = NULL;
+ struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev);
int retval;
- s3c2410_usb_set_power(dev->dev.platform_data, 1, 1);
- s3c2410_usb_set_power(dev->dev.platform_data, 2, 1);
+ s3c2410_usb_set_power(info, 1, 1);
+ s3c2410_usb_set_power(info, 2, 1);
hcd = usb_create_hcd(driver, &dev->dev, "s3c24xx");
if (hcd == NULL)
return -ENOMEM;
hcd->rsrc_start = dev->resource[0].start;
- hcd->rsrc_len = dev->resource[0].end - dev->resource[0].start + 1;
+ hcd->rsrc_len = resource_size(&dev->resource[0]);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- dev_err(&dev->dev, "request_mem_region failed\n");
- retval = -EBUSY;
+ hcd->regs = devm_ioremap_resource(&dev->dev, &dev->resource[0]);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
goto err_put;
}
- clk = clk_get(&dev->dev, "usb-host");
+ clk = devm_clk_get(&dev->dev, "usb-host");
if (IS_ERR(clk)) {
dev_err(&dev->dev, "cannot get usb-host clock\n");
- retval = -ENOENT;
- goto err_mem;
+ retval = PTR_ERR(clk);
+ goto err_put;
}
- usb_clk = clk_get(&dev->dev, "usb-bus-host");
+ usb_clk = devm_clk_get(&dev->dev, "usb-bus-host");
if (IS_ERR(usb_clk)) {
dev_err(&dev->dev, "cannot get usb-bus-host clock\n");
- retval = -ENOENT;
- goto err_clk;
+ retval = PTR_ERR(usb_clk);
+ goto err_put;
}
s3c2410_start_hc(dev, hcd);
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- dev_err(&dev->dev, "ioremap failed\n");
- retval = -ENOMEM;
- goto err_ioremap;
- }
-
- ohci_hcd_init(hcd_to_ohci(hcd));
-
- retval = usb_add_hcd(hcd, dev->resource[1].start, IRQF_DISABLED);
+ retval = usb_add_hcd(hcd, dev->resource[1].start, 0);
if (retval != 0)
goto err_ioremap;
+ device_wakeup_enable(hcd->self.controller);
return 0;
err_ioremap:
s3c2410_stop_hc(dev);
- iounmap(hcd->regs);
- clk_put(usb_clk);
-
- err_clk:
- clk_put(clk);
-
- err_mem:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err_put:
usb_put_hcd(hcd);
@@ -410,92 +404,100 @@ static int usb_hcd_s3c2410_probe (const struct hc_driver *driver,
/*-------------------------------------------------------------------------*/
-static int
-ohci_s3c2410_start (struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- int ret;
+static struct hc_driver __read_mostly ohci_s3c2410_hc_driver;
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
+static int ohci_hcd_s3c2410_drv_probe(struct platform_device *pdev)
+{
+ return usb_hcd_s3c2410_probe(&ohci_s3c2410_hc_driver, pdev);
+}
- if ((ret = ohci_run (ohci)) < 0) {
- err ("can't start %s", hcd->self.bus_name);
- ohci_stop (hcd);
- return ret;
- }
+static int ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ usb_hcd_s3c2410_remove(hcd, pdev);
return 0;
}
+#ifdef CONFIG_PM
+static int ohci_hcd_s3c2410_drv_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+ bool do_wakeup = device_may_wakeup(dev);
+ int rc = 0;
-static const struct hc_driver ohci_s3c2410_hc_driver = {
- .description = hcd_name,
- .product_desc = "S3C24XX OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .start = ohci_s3c2410_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
+ rc = ohci_suspend(hcd, do_wakeup);
+ if (rc)
+ return rc;
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
+ s3c2410_stop_hc(pdev);
- /*
- * root hub support
- */
- .hub_status_data = ohci_s3c2410_hub_status_data,
- .hub_control = ohci_s3c2410_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-/* device driver */
-
-static int ohci_hcd_s3c2410_drv_probe(struct platform_device *pdev)
-{
- return usb_hcd_s3c2410_probe(&ohci_s3c2410_hc_driver, pdev);
+ return rc;
}
-static int ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev)
+static int ohci_hcd_s3c2410_drv_resume(struct device *dev)
{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+
+ s3c2410_start_hc(pdev, hcd);
+
+ ohci_resume(hcd, false);
- usb_hcd_s3c2410_remove(hcd, pdev);
return 0;
}
+#else
+#define ohci_hcd_s3c2410_drv_suspend NULL
+#define ohci_hcd_s3c2410_drv_resume NULL
+#endif
+
+static const struct dev_pm_ops ohci_hcd_s3c2410_pm_ops = {
+ .suspend = ohci_hcd_s3c2410_drv_suspend,
+ .resume = ohci_hcd_s3c2410_drv_resume,
+};
static struct platform_driver ohci_hcd_s3c2410_driver = {
.probe = ohci_hcd_s3c2410_drv_probe,
.remove = ohci_hcd_s3c2410_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
- /*.suspend = ohci_hcd_s3c2410_drv_suspend, */
- /*.resume = ohci_hcd_s3c2410_drv_resume, */
.driver = {
.owner = THIS_MODULE,
.name = "s3c2410-ohci",
+ .pm = &ohci_hcd_s3c2410_pm_ops,
},
};
+static int __init ohci_s3c2410_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+ ohci_init_driver(&ohci_s3c2410_hc_driver, NULL);
+
+ /*
+ * The Samsung HW has some unusual quirks, which require
+ * Sumsung-specific workarounds. We override certain hc_driver
+ * functions here to achieve that. We explicitly do not enhance
+ * ohci_driver_overrides to allow this more easily, since this
+ * is an unusual case, and we don't want to encourage others to
+ * override these functions by making it too easy.
+ */
+
+ ohci_s3c2410_hc_driver.hub_status_data = ohci_s3c2410_hub_status_data;
+ ohci_s3c2410_hc_driver.hub_control = ohci_s3c2410_hub_control;
+
+ return platform_driver_register(&ohci_hcd_s3c2410_driver);
+}
+module_init(ohci_s3c2410_init);
+
+static void __exit ohci_s3c2410_cleanup(void)
+{
+ platform_driver_unregister(&ohci_hcd_s3c2410_driver);
+}
+module_exit(ohci_s3c2410_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:s3c2410-ohci");
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index d8eb3bdafab..2ac266d692a 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -16,29 +16,115 @@
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <mach/assabet.h>
-#include <mach/badge4.h>
#include <asm/hardware/sa1111.h>
#ifndef CONFIG_SA1111
#error "This file is SA-1111 bus glue. CONFIG_SA1111 must be defined."
#endif
-extern int usb_disabled(void);
+#define USB_STATUS 0x0118
+#define USB_RESET 0x011c
+#define USB_IRQTEST 0x0120
+
+#define USB_RESET_FORCEIFRESET (1 << 0)
+#define USB_RESET_FORCEHCRESET (1 << 1)
+#define USB_RESET_CLKGENRESET (1 << 2)
+#define USB_RESET_SIMSCALEDOWN (1 << 3)
+#define USB_RESET_USBINTTEST (1 << 4)
+#define USB_RESET_SLEEPSTBYEN (1 << 5)
+#define USB_RESET_PWRSENSELOW (1 << 6)
+#define USB_RESET_PWRCTRLLOW (1 << 7)
+
+#define USB_STATUS_IRQHCIRMTWKUP (1 << 7)
+#define USB_STATUS_IRQHCIBUFFACC (1 << 8)
+#define USB_STATUS_NIRQHCIM (1 << 9)
+#define USB_STATUS_NHCIMFCLR (1 << 10)
+#define USB_STATUS_USBPWRSENSE (1 << 11)
-/*-------------------------------------------------------------------------*/
+#if 0
+static void dump_hci_status(struct usb_hcd *hcd, const char *label)
+{
+ unsigned long status = sa1111_readl(hcd->regs + USB_STATUS);
+
+ printk(KERN_DEBUG "%s USB_STATUS = { %s%s%s%s%s}\n", label,
+ ((status & USB_STATUS_IRQHCIRMTWKUP) ? "IRQHCIRMTWKUP " : ""),
+ ((status & USB_STATUS_IRQHCIBUFFACC) ? "IRQHCIBUFFACC " : ""),
+ ((status & USB_STATUS_NIRQHCIM) ? "" : "IRQHCIM "),
+ ((status & USB_STATUS_NHCIMFCLR) ? "" : "HCIMFCLR "),
+ ((status & USB_STATUS_USBPWRSENSE) ? "USBPWRSENSE " : ""));
+}
+#endif
-static void sa1111_start_hc(struct sa1111_dev *dev)
+static int ohci_sa1111_reset(struct usb_hcd *hcd)
{
- unsigned int usb_rst = 0;
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+ ohci_hcd_init(ohci);
+ return ohci_init(ohci);
+}
- printk(KERN_DEBUG "%s: starting SA-1111 OHCI USB Controller\n",
- __FILE__);
+static int ohci_sa1111_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
-#ifdef CONFIG_SA1100_BADGE4
- if (machine_is_badge4()) {
- badge4_set_5V(BADGE4_5V_USB, 1);
+ ret = ohci_run(ohci);
+ if (ret < 0) {
+ ohci_err(ohci, "can't start\n");
+ ohci_stop(hcd);
}
+ return ret;
+}
+
+static const struct hc_driver ohci_sa1111_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "SA-1111 OHCI",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ohci_irq,
+ .flags = HCD_USB11 | HCD_MEMORY,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ohci_sa1111_reset,
+ .start = ohci_sa1111_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ohci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
#endif
+ .start_port_reset = ohci_start_port_reset,
+};
+
+static int sa1111_start_hc(struct sa1111_dev *dev)
+{
+ unsigned int usb_rst = 0;
+ int ret;
+
+ dev_dbg(&dev->dev, "starting SA-1111 OHCI USB Controller\n");
if (machine_is_xp860() ||
machine_has_neponset() ||
@@ -51,220 +137,129 @@ static void sa1111_start_hc(struct sa1111_dev *dev)
* host controller in reset.
*/
sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
- dev->mapbase + SA1111_USB_RESET);
+ dev->mapbase + USB_RESET);
/*
* Now, carefully enable the USB clock, and take
* the USB host controller out of reset.
*/
- sa1111_enable_device(dev);
- udelay(11);
- sa1111_writel(usb_rst, dev->mapbase + SA1111_USB_RESET);
+ ret = sa1111_enable_device(dev);
+ if (ret == 0) {
+ udelay(11);
+ sa1111_writel(usb_rst, dev->mapbase + USB_RESET);
+ }
+
+ return ret;
}
static void sa1111_stop_hc(struct sa1111_dev *dev)
{
unsigned int usb_rst;
- printk(KERN_DEBUG "%s: stopping SA-1111 OHCI USB Controller\n",
- __FILE__);
+
+ dev_dbg(&dev->dev, "stopping SA-1111 OHCI USB Controller\n");
/*
* Put the USB host controller into reset.
*/
- usb_rst = sa1111_readl(dev->mapbase + SA1111_USB_RESET);
+ usb_rst = sa1111_readl(dev->mapbase + USB_RESET);
sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
- dev->mapbase + SA1111_USB_RESET);
+ dev->mapbase + USB_RESET);
/*
* Stop the USB clock.
*/
sa1111_disable_device(dev);
-
-#ifdef CONFIG_SA1100_BADGE4
- if (machine_is_badge4()) {
- /* Disable power to the USB bus */
- badge4_set_5V(BADGE4_5V_USB, 0);
- }
-#endif
}
-
-/*-------------------------------------------------------------------------*/
-
-#if 0
-static void dump_hci_status(struct usb_hcd *hcd, const char *label)
-{
- unsigned long status = sa1111_readl(hcd->regs + SA1111_USB_STATUS);
-
- dbg ("%s USB_STATUS = { %s%s%s%s%s}", label,
- ((status & USB_STATUS_IRQHCIRMTWKUP) ? "IRQHCIRMTWKUP " : ""),
- ((status & USB_STATUS_IRQHCIBUFFACC) ? "IRQHCIBUFFACC " : ""),
- ((status & USB_STATUS_NIRQHCIM) ? "" : "IRQHCIM "),
- ((status & USB_STATUS_NHCIMFCLR) ? "" : "HCIMFCLR "),
- ((status & USB_STATUS_USBPWRSENSE) ? "USBPWRSENSE " : ""));
-}
-#endif
-
-/*-------------------------------------------------------------------------*/
-
-/* configure so an HC device and id are always provided */
-/* always called with process context; sleeping is OK */
-
-
/**
- * usb_hcd_sa1111_probe - initialize SA-1111-based HCDs
- * Context: !in_interrupt()
+ * ohci_hcd_sa1111_probe - initialize SA-1111-based HCDs
*
* Allocates basic resources for this USB host controller, and
- * then invokes the start() method for the HCD associated with it
- * through the hotplug entry's driver_data.
- *
- * Store this function in the HCD's struct pci_driver as probe().
+ * then invokes the start() method for the HCD associated with it.
*/
-int usb_hcd_sa1111_probe (const struct hc_driver *driver,
- struct sa1111_dev *dev)
+static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
{
struct usb_hcd *hcd;
- int retval;
+ int ret;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ /*
+ * We don't call dma_set_mask_and_coherent() here because the
+ * DMA mask has already been appropraitely setup by the core
+ * SA-1111 bus code (which includes bug workarounds.)
+ */
- hcd = usb_create_hcd (driver, &dev->dev, "sa1111");
+ hcd = usb_create_hcd(&ohci_sa1111_hc_driver, &dev->dev, "sa1111");
if (!hcd)
return -ENOMEM;
+
hcd->rsrc_start = dev->res.start;
- hcd->rsrc_len = dev->res.end - dev->res.start + 1;
+ hcd->rsrc_len = resource_size(&dev->res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- dbg("request_mem_region failed");
- retval = -EBUSY;
+ dev_dbg(&dev->dev, "request_mem_region failed\n");
+ ret = -EBUSY;
goto err1;
}
+
hcd->regs = dev->mapbase;
- sa1111_start_hc(dev);
- ohci_hcd_init(hcd_to_ohci(hcd));
+ ret = sa1111_start_hc(dev);
+ if (ret)
+ goto err2;
- retval = usb_add_hcd(hcd, dev->irq[1], IRQF_DISABLED);
- if (retval == 0)
- return retval;
+ ret = usb_add_hcd(hcd, dev->irq[1], 0);
+ if (ret == 0) {
+ device_wakeup_enable(hcd->self.controller);
+ return ret;
+ }
sa1111_stop_hc(dev);
+ err2:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err1:
usb_put_hcd(hcd);
- return retval;
+ return ret;
}
-
-/* may be called without controller electrically present */
-/* may be called with controller, bus, and devices active */
-
/**
- * usb_hcd_sa1111_remove - shutdown processing for SA-1111-based HCDs
+ * ohci_hcd_sa1111_remove - shutdown processing for SA-1111-based HCDs
* @dev: USB Host Controller being removed
- * Context: !in_interrupt()
- *
- * Reverses the effect of usb_hcd_sa1111_probe(), first invoking
- * the HCD's stop() method. It is always called from a thread
- * context, normally "rmmod", "apmd", or something similar.
*
+ * Reverses the effect of ohci_hcd_sa1111_probe(), first invoking
+ * the HCD's stop() method.
*/
-void usb_hcd_sa1111_remove (struct usb_hcd *hcd, struct sa1111_dev *dev)
+static int ohci_hcd_sa1111_remove(struct sa1111_dev *dev)
{
+ struct usb_hcd *hcd = sa1111_get_drvdata(dev);
+
usb_remove_hcd(hcd);
sa1111_stop_hc(dev);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
-}
-
-/*-------------------------------------------------------------------------*/
-
-static int __devinit
-ohci_sa1111_start (struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- int ret;
-
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
- if ((ret = ohci_run (ohci)) < 0) {
- err ("can't start %s", hcd->self.bus_name);
- ohci_stop (hcd);
- return ret;
- }
return 0;
}
-/*-------------------------------------------------------------------------*/
-
-static const struct hc_driver ohci_sa1111_hc_driver = {
- .description = hcd_name,
- .product_desc = "SA-1111 OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .start = ohci_sa1111_start,
- .stop = ohci_stop,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-/*-------------------------------------------------------------------------*/
-
-static int ohci_hcd_sa1111_drv_probe(struct sa1111_dev *dev)
-{
- int ret;
-
- if (usb_disabled())
- return -ENODEV;
-
- ret = usb_hcd_sa1111_probe(&ohci_sa1111_hc_driver, dev);
- return ret;
-}
-
-static int ohci_hcd_sa1111_drv_remove(struct sa1111_dev *dev)
+static void ohci_hcd_sa1111_shutdown(struct sa1111_dev *dev)
{
struct usb_hcd *hcd = sa1111_get_drvdata(dev);
- usb_hcd_sa1111_remove(hcd, dev);
- return 0;
+ if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ hcd->driver->shutdown(hcd);
+ sa1111_stop_hc(dev);
+ }
}
static struct sa1111_driver ohci_hcd_sa1111_driver = {
.drv = {
.name = "sa1111-ohci",
+ .owner = THIS_MODULE,
},
.devid = SA1111_DEVID_USB,
- .probe = ohci_hcd_sa1111_drv_probe,
- .remove = ohci_hcd_sa1111_drv_remove,
+ .probe = ohci_hcd_sa1111_probe,
+ .remove = ohci_hcd_sa1111_remove,
+ .shutdown = ohci_hcd_sa1111_shutdown,
};
-
diff --git a/drivers/usb/host/ohci-sh.c b/drivers/usb/host/ohci-sh.c
deleted file mode 100644
index 0b35d22cc70..00000000000
--- a/drivers/usb/host/ohci-sh.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * OHCI HCD (Host Controller Driver) for USB.
- *
- * Copyright (C) 2008 Renesas Solutions Corp.
- *
- * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- */
-
-#include <linux/platform_device.h>
-
-static int ohci_sh_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
-
- ohci_hcd_init(ohci);
- ohci_init(ohci);
- ohci_run(ohci);
- hcd->state = HC_STATE_RUNNING;
- return 0;
-}
-
-static const struct hc_driver ohci_sh_hc_driver = {
- .description = hcd_name,
- .product_desc = "SuperH OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .start = ohci_sh_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-/*-------------------------------------------------------------------------*/
-
-static int ohci_hcd_sh_probe(struct platform_device *pdev)
-{
- struct resource *res = NULL;
- struct usb_hcd *hcd = NULL;
- int irq = -1;
- int ret;
-
- if (usb_disabled())
- return -ENODEV;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- err("platform_get_resource error.");
- return -ENODEV;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err("platform_get_irq error.");
- return -ENODEV;
- }
-
- /* initialize hcd */
- hcd = usb_create_hcd(&ohci_sh_hc_driver, &pdev->dev, (char *)hcd_name);
- if (!hcd) {
- err("Failed to create hcd");
- return -ENOMEM;
- }
-
- hcd->regs = (void __iomem *)res->start;
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
- if (ret != 0) {
- err("Failed to add hcd");
- usb_put_hcd(hcd);
- return ret;
- }
-
- return ret;
-}
-
-static int ohci_hcd_sh_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_remove_hcd(hcd);
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-static struct platform_driver ohci_hcd_sh_driver = {
- .probe = ohci_hcd_sh_probe,
- .remove = ohci_hcd_sh_remove,
- .shutdown = usb_hcd_platform_shutdown,
- .driver = {
- .name = "sh_ohci",
- .owner = THIS_MODULE,
- },
-};
-
-MODULE_ALIAS("platform:sh_ohci");
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index 041d30f30c1..4e81c804c73 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -103,8 +103,7 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev)
goto err0;
}
- if (!request_mem_region(mem->start, mem->end - mem->start + 1,
- pdev->name)) {
+ if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
dev_err(dev, "request_mem_region failed\n");
retval = -EBUSY;
goto err0;
@@ -126,7 +125,7 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev)
if (!dma_declare_coherent_memory(dev, mem->start,
mem->start - mem->parent->start,
- (mem->end - mem->start) + 1,
+ resource_size(mem),
DMA_MEMORY_MAP |
DMA_MEMORY_EXCLUSIVE)) {
dev_err(dev, "cannot declare coherent memory\n");
@@ -149,7 +148,7 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev)
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, pdev->name)) {
dev_err(dev, "request_mem_region failed\n");
@@ -166,9 +165,10 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev)
ohci_hcd_init(hcd_to_ohci(hcd));
- retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval)
goto err5;
+ device_wakeup_enable(hcd->self.controller);
/* enable power and unmask interrupts */
@@ -185,7 +185,7 @@ err3:
err2:
dma_release_declared_memory(dev);
err1:
- release_mem_region(mem->start, mem->end - mem->start + 1);
+ release_mem_region(mem->start, resource_size(mem));
err0:
return retval;
}
@@ -201,14 +201,13 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev)
dma_release_declared_memory(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (mem)
- release_mem_region(mem->start, mem->end - mem->start + 1);
+ release_mem_region(mem->start, resource_size(mem));
/* mask interrupts and disable power */
sm501_modify_reg(pdev->dev.parent, SM501_IRQ_MASK, 0, 1 << 6);
sm501_unit_power(pdev->dev.parent, SM501_GATE_USB_HOST, 0);
- platform_set_drvdata(pdev, NULL);
return 0;
}
@@ -218,15 +217,21 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev)
static int ohci_sm501_suspend(struct platform_device *pdev, pm_message_t msg)
{
struct device *dev = &pdev->dev;
- struct ohci_hcd *ohci = hcd_to_ohci(platform_get_drvdata(pdev));
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ bool do_wakeup = device_may_wakeup(dev);
+ int ret;
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
+ ret = ohci_suspend(hcd, do_wakeup);
+ if (ret)
+ return ret;
+
sm501_unit_power(dev->parent, SM501_GATE_USB_HOST, 0);
- ohci_to_hcd(ohci)->state = HC_STATE_SUSPENDED;
- return 0;
+ return ret;
}
static int ohci_sm501_resume(struct platform_device *pdev)
@@ -240,7 +245,7 @@ static int ohci_sm501_resume(struct platform_device *pdev)
ohci->next_statechange = jiffies;
sm501_unit_power(dev->parent, SM501_GATE_USB_HOST, 1);
- ohci_finish_controller_resume(hcd);
+ ohci_resume(hcd, false);
return 0;
}
#else
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
new file mode 100644
index 00000000000..8b29a0c04c2
--- /dev/null
+++ b/drivers/usb/host/ohci-spear.c
@@ -0,0 +1,209 @@
+/*
+* OHCI HCD (Host Controller Driver) for USB.
+*
+* Copyright (C) 2010 ST Microelectronics.
+* Deepak Sikri<deepak.sikri@st.com>
+*
+* Based on various ohci-*.c drivers
+*
+* This file is licensed under the terms of the GNU General Public
+* License version 2. This program is licensed "as is" without any
+* warranty of any kind, whether express or implied.
+*/
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/signal.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
+
+#define DRIVER_DESC "OHCI SPEAr driver"
+
+static const char hcd_name[] = "SPEAr-ohci";
+struct spear_ohci {
+ struct clk *clk;
+};
+
+#define to_spear_ohci(hcd) (struct spear_ohci *)(hcd_to_ohci(hcd)->priv)
+
+static struct hc_driver __read_mostly ohci_spear_hc_driver;
+
+static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
+{
+ const struct hc_driver *driver = &ohci_spear_hc_driver;
+ struct ohci_hcd *ohci;
+ struct usb_hcd *hcd = NULL;
+ struct clk *usbh_clk;
+ struct spear_ohci *sohci_p;
+ struct resource *res;
+ int retval, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ retval = irq;
+ goto fail;
+ }
+
+ /*
+ * Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+ retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto fail;
+
+ usbh_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(usbh_clk)) {
+ dev_err(&pdev->dev, "Error getting interface clock\n");
+ retval = PTR_ERR(usbh_clk);
+ goto fail;
+ }
+
+ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ retval = -ENOMEM;
+ goto fail;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ retval = -ENODEV;
+ goto err_put_hcd;
+ }
+
+ hcd->rsrc_start = pdev->resource[0].start;
+ hcd->rsrc_len = resource_size(res);
+
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
+ goto err_put_hcd;
+ }
+
+ sohci_p = to_spear_ohci(hcd);
+ sohci_p->clk = usbh_clk;
+
+ clk_prepare_enable(sohci_p->clk);
+
+ ohci = hcd_to_ohci(hcd);
+
+ retval = usb_add_hcd(hcd, platform_get_irq(pdev, 0), 0);
+ if (retval == 0) {
+ device_wakeup_enable(hcd->self.controller);
+ return retval;
+ }
+
+ clk_disable_unprepare(sohci_p->clk);
+err_put_hcd:
+ usb_put_hcd(hcd);
+fail:
+ dev_err(&pdev->dev, "init fail, %d\n", retval);
+
+ return retval;
+}
+
+static int spear_ohci_hcd_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct spear_ohci *sohci_p = to_spear_ohci(hcd);
+
+ usb_remove_hcd(hcd);
+ if (sohci_p->clk)
+ clk_disable_unprepare(sohci_p->clk);
+
+ usb_put_hcd(hcd);
+ return 0;
+}
+
+#if defined(CONFIG_PM)
+static int spear_ohci_hcd_drv_suspend(struct platform_device *pdev,
+ pm_message_t message)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ struct spear_ohci *sohci_p = to_spear_ohci(hcd);
+ bool do_wakeup = device_may_wakeup(&pdev->dev);
+ int ret;
+
+ if (time_before(jiffies, ohci->next_statechange))
+ msleep(5);
+ ohci->next_statechange = jiffies;
+
+ ret = ohci_suspend(hcd, do_wakeup);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(sohci_p->clk);
+
+ return ret;
+}
+
+static int spear_ohci_hcd_drv_resume(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ struct spear_ohci *sohci_p = to_spear_ohci(hcd);
+
+ if (time_before(jiffies, ohci->next_statechange))
+ msleep(5);
+ ohci->next_statechange = jiffies;
+
+ clk_prepare_enable(sohci_p->clk);
+ ohci_resume(hcd, false);
+ return 0;
+}
+#endif
+
+static struct of_device_id spear_ohci_id_table[] = {
+ { .compatible = "st,spear600-ohci", },
+ { },
+};
+
+/* Driver definition to register with the platform bus */
+static struct platform_driver spear_ohci_hcd_driver = {
+ .probe = spear_ohci_hcd_drv_probe,
+ .remove = spear_ohci_hcd_drv_remove,
+#ifdef CONFIG_PM
+ .suspend = spear_ohci_hcd_drv_suspend,
+ .resume = spear_ohci_hcd_drv_resume,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "spear-ohci",
+ .of_match_table = spear_ohci_id_table,
+ },
+};
+
+static const struct ohci_driver_overrides spear_overrides __initconst = {
+ .extra_priv_size = sizeof(struct spear_ohci),
+};
+static int __init ohci_spear_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ohci_init_driver(&ohci_spear_hc_driver, &spear_overrides);
+ return platform_driver_register(&spear_ohci_hcd_driver);
+}
+module_init(ohci_spear_init);
+
+static void __exit ohci_spear_cleanup(void)
+{
+ platform_driver_unregister(&spear_ohci_hcd_driver);
+}
+module_exit(ohci_spear_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Deepak Sikri");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spear-ohci");
diff --git a/drivers/usb/host/ohci-ssb.c b/drivers/usb/host/ohci-ssb.c
deleted file mode 100644
index 48ee6943bf3..00000000000
--- a/drivers/usb/host/ohci-ssb.c
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * Sonics Silicon Backplane
- * Broadcom USB-core OHCI driver
- *
- * Copyright 2007 Michael Buesch <mb@bu3sch.de>
- *
- * Derived from the OHCI-PCI driver
- * Copyright 1999 Roman Weissgaerber
- * Copyright 2000-2002 David Brownell
- * Copyright 1999 Linus Torvalds
- * Copyright 1999 Gregory P. Smith
- *
- * Derived from the USBcore related parts of Broadcom-SB
- * Copyright 2005 Broadcom Corporation
- *
- * Licensed under the GNU/GPL. See COPYING for details.
- */
-#include <linux/ssb/ssb.h>
-
-
-#define SSB_OHCI_TMSLOW_HOSTMODE (1 << 29)
-
-struct ssb_ohci_device {
- struct ohci_hcd ohci; /* _must_ be at the beginning. */
-
- u32 enable_flags;
-};
-
-static inline
-struct ssb_ohci_device *hcd_to_ssb_ohci(struct usb_hcd *hcd)
-{
- return (struct ssb_ohci_device *)(hcd->hcd_priv);
-}
-
-
-static int ssb_ohci_reset(struct usb_hcd *hcd)
-{
- struct ssb_ohci_device *ohcidev = hcd_to_ssb_ohci(hcd);
- struct ohci_hcd *ohci = &ohcidev->ohci;
- int err;
-
- ohci_hcd_init(ohci);
- err = ohci_init(ohci);
-
- return err;
-}
-
-static int ssb_ohci_start(struct usb_hcd *hcd)
-{
- struct ssb_ohci_device *ohcidev = hcd_to_ssb_ohci(hcd);
- struct ohci_hcd *ohci = &ohcidev->ohci;
- int err;
-
- err = ohci_run(ohci);
- if (err < 0) {
- ohci_err(ohci, "can't start\n");
- ohci_stop(hcd);
- }
-
- return err;
-}
-
-static const struct hc_driver ssb_ohci_hc_driver = {
- .description = "ssb-usb-ohci",
- .product_desc = "SSB OHCI Controller",
- .hcd_priv_size = sizeof(struct ssb_ohci_device),
-
- .irq = ohci_irq,
- .flags = HCD_MEMORY | HCD_USB11,
-
- .reset = ssb_ohci_reset,
- .start = ssb_ohci_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- .get_frame_number = ohci_get_frame,
-
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
-
- .start_port_reset = ohci_start_port_reset,
-};
-
-static void ssb_ohci_detach(struct ssb_device *dev)
-{
- struct usb_hcd *hcd = ssb_get_drvdata(dev);
-
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
- usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
- ssb_device_disable(dev, 0);
-}
-
-static int ssb_ohci_attach(struct ssb_device *dev)
-{
- struct ssb_ohci_device *ohcidev;
- struct usb_hcd *hcd;
- int err = -ENOMEM;
- u32 tmp, flags = 0;
-
- if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) ||
- dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32)))
- return -EOPNOTSUPP;
-
- if (dev->id.coreid == SSB_DEV_USB11_HOSTDEV) {
- /* Put the device into host-mode. */
- flags |= SSB_OHCI_TMSLOW_HOSTMODE;
- ssb_device_enable(dev, flags);
- } else if (dev->id.coreid == SSB_DEV_USB20_HOST) {
- /*
- * USB 2.0 special considerations:
- *
- * In addition to the standard SSB reset sequence, the Host
- * Control Register must be programmed to bring the USB core
- * and various phy components out of reset.
- */
- ssb_device_enable(dev, 0);
- ssb_write32(dev, 0x200, 0x7ff);
-
- /* Change Flush control reg */
- tmp = ssb_read32(dev, 0x400);
- tmp &= ~8;
- ssb_write32(dev, 0x400, tmp);
- tmp = ssb_read32(dev, 0x400);
-
- /* Change Shim control reg */
- tmp = ssb_read32(dev, 0x304);
- tmp &= ~0x100;
- ssb_write32(dev, 0x304, tmp);
- tmp = ssb_read32(dev, 0x304);
-
- udelay(1);
-
- /* Work around for 5354 failures */
- if (dev->id.revision == 2 && dev->bus->chip_id == 0x5354) {
- /* Change syn01 reg */
- tmp = 0x00fe00fe;
- ssb_write32(dev, 0x894, tmp);
-
- /* Change syn03 reg */
- tmp = ssb_read32(dev, 0x89c);
- tmp |= 0x1;
- ssb_write32(dev, 0x89c, tmp);
- }
- } else
- ssb_device_enable(dev, 0);
-
- hcd = usb_create_hcd(&ssb_ohci_hc_driver, dev->dev,
- dev_name(dev->dev));
- if (!hcd)
- goto err_dev_disable;
- ohcidev = hcd_to_ssb_ohci(hcd);
- ohcidev->enable_flags = flags;
-
- tmp = ssb_read32(dev, SSB_ADMATCH0);
- hcd->rsrc_start = ssb_admatch_base(tmp);
- hcd->rsrc_len = ssb_admatch_size(tmp);
- hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs)
- goto err_put_hcd;
- err = usb_add_hcd(hcd, dev->irq, IRQF_DISABLED | IRQF_SHARED);
- if (err)
- goto err_iounmap;
-
- ssb_set_drvdata(dev, hcd);
-
- return err;
-
-err_iounmap:
- iounmap(hcd->regs);
-err_put_hcd:
- usb_put_hcd(hcd);
-err_dev_disable:
- ssb_device_disable(dev, flags);
- return err;
-}
-
-static int ssb_ohci_probe(struct ssb_device *dev,
- const struct ssb_device_id *id)
-{
- int err;
- u16 chipid_top;
-
- /* USBcores are only connected on embedded devices. */
- chipid_top = (dev->bus->chip_id & 0xFF00);
- if (chipid_top != 0x4700 && chipid_top != 0x5300)
- return -ENODEV;
-
- /* TODO: Probably need checks here; is the core connected? */
-
- if (usb_disabled())
- return -ENODEV;
-
- /* We currently always attach SSB_DEV_USB11_HOSTDEV
- * as HOST OHCI. If we want to attach it as Client device,
- * we must branch here and call into the (yet to
- * be written) Client mode driver. Same for remove(). */
-
- err = ssb_ohci_attach(dev);
-
- return err;
-}
-
-static void ssb_ohci_remove(struct ssb_device *dev)
-{
- ssb_ohci_detach(dev);
-}
-
-#ifdef CONFIG_PM
-
-static int ssb_ohci_suspend(struct ssb_device *dev, pm_message_t state)
-{
- ssb_device_disable(dev, 0);
-
- return 0;
-}
-
-static int ssb_ohci_resume(struct ssb_device *dev)
-{
- struct usb_hcd *hcd = ssb_get_drvdata(dev);
- struct ssb_ohci_device *ohcidev = hcd_to_ssb_ohci(hcd);
-
- ssb_device_enable(dev, ohcidev->enable_flags);
-
- ohci_finish_controller_resume(hcd);
- return 0;
-}
-
-#else /* !CONFIG_PM */
-#define ssb_ohci_suspend NULL
-#define ssb_ohci_resume NULL
-#endif /* CONFIG_PM */
-
-static const struct ssb_device_id ssb_ohci_table[] = {
- SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB11_HOSTDEV, SSB_ANY_REV),
- SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB11_HOST, SSB_ANY_REV),
- SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB20_HOST, SSB_ANY_REV),
- SSB_DEVTABLE_END
-};
-MODULE_DEVICE_TABLE(ssb, ssb_ohci_table);
-
-static struct ssb_driver ssb_ohci_driver = {
- .name = KBUILD_MODNAME,
- .id_table = ssb_ohci_table,
- .probe = ssb_ohci_probe,
- .remove = ssb_ohci_remove,
- .suspend = ssb_ohci_suspend,
- .resume = ssb_ohci_resume,
-};
diff --git a/drivers/usb/host/ohci-tilegx.c b/drivers/usb/host/ohci-tilegx.c
new file mode 100644
index 00000000000..bef6dfb0405
--- /dev/null
+++ b/drivers/usb/host/ohci-tilegx.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/*
+ * Tilera TILE-Gx USB OHCI host controller driver.
+ */
+
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/usb/tilegx.h>
+#include <linux/usb.h>
+
+#include <asm/homecache.h>
+
+#include <gxio/iorpc_usb_host.h>
+#include <gxio/usb_host.h>
+
+static void tilegx_start_ohc(void)
+{
+}
+
+static void tilegx_stop_ohc(void)
+{
+}
+
+static int tilegx_ohci_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
+
+ ret = ohci_init(ohci);
+ if (ret < 0)
+ return ret;
+
+ ret = ohci_run(ohci);
+ if (ret < 0) {
+ dev_err(hcd->self.controller, "can't start %s\n",
+ hcd->self.bus_name);
+ ohci_stop(hcd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct hc_driver ohci_tilegx_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Tile-Gx OHCI",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+
+ /*
+ * Generic hardware linkage.
+ */
+ .irq = ohci_irq,
+ .flags = HCD_MEMORY | HCD_LOCAL_MEM | HCD_USB11,
+
+ /*
+ * Basic lifecycle operations.
+ */
+ .start = tilegx_ohci_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+
+ /*
+ * Managing I/O requests and associated device resources.
+ */
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ /*
+ * Scheduling support.
+ */
+ .get_frame_number = ohci_get_frame,
+
+ /*
+ * Root hub support.
+ */
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_hub_control,
+ .start_port_reset = ohci_start_port_reset,
+};
+
+static int ohci_hcd_tilegx_drv_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ pte_t pte = { 0 };
+ int my_cpu = smp_processor_id();
+ int ret;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ /*
+ * Try to initialize our GXIO context; if we can't, the device
+ * doesn't exist.
+ */
+ if (gxio_usb_host_init(&pdata->usb_ctx, pdata->dev_index, 0) != 0)
+ return -ENXIO;
+
+ hcd = usb_create_hcd(&ohci_tilegx_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd) {
+ ret = -ENOMEM;
+ goto err_hcd;
+ }
+
+ /*
+ * We don't use rsrc_start to map in our registers, but seems like
+ * we ought to set it to something, so we use the register VA.
+ */
+ hcd->rsrc_start =
+ (ulong) gxio_usb_host_get_reg_start(&pdata->usb_ctx);
+ hcd->rsrc_len = gxio_usb_host_get_reg_len(&pdata->usb_ctx);
+ hcd->regs = gxio_usb_host_get_reg_start(&pdata->usb_ctx);
+
+ tilegx_start_ohc();
+
+ /* Create our IRQs and register them. */
+ pdata->irq = irq_alloc_hwirq(-1);
+ if (!pdata->irq) {
+ ret = -ENXIO;
+ goto err_no_irq;
+ }
+
+ tile_irq_activate(pdata->irq, TILE_IRQ_PERCPU);
+
+ /* Configure interrupts. */
+ ret = gxio_usb_host_cfg_interrupt(&pdata->usb_ctx,
+ cpu_x(my_cpu), cpu_y(my_cpu),
+ KERNEL_PL, pdata->irq);
+ if (ret) {
+ ret = -ENXIO;
+ goto err_have_irq;
+ }
+
+ /* Register all of our memory. */
+ pte = pte_set_home(pte, PAGE_HOME_HASH);
+ ret = gxio_usb_host_register_client_memory(&pdata->usb_ctx, pte, 0);
+ if (ret) {
+ ret = -ENXIO;
+ goto err_have_irq;
+ }
+
+ ohci_hcd_init(hcd_to_ohci(hcd));
+
+ ret = usb_add_hcd(hcd, pdata->irq, IRQF_SHARED);
+ if (ret == 0) {
+ platform_set_drvdata(pdev, hcd);
+ device_wakeup_enable(hcd->self.controller);
+ return ret;
+ }
+
+err_have_irq:
+ irq_free_hwirq(pdata->irq);
+err_no_irq:
+ tilegx_stop_ohc();
+ usb_put_hcd(hcd);
+err_hcd:
+ gxio_usb_host_destroy(&pdata->usb_ctx);
+ return ret;
+}
+
+static int ohci_hcd_tilegx_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+ tilegx_stop_ohc();
+ gxio_usb_host_destroy(&pdata->usb_ctx);
+ irq_free_hwirq(pdata->irq);
+
+ return 0;
+}
+
+static void ohci_hcd_tilegx_drv_shutdown(struct platform_device *pdev)
+{
+ usb_hcd_platform_shutdown(pdev);
+ ohci_hcd_tilegx_drv_remove(pdev);
+}
+
+static struct platform_driver ohci_hcd_tilegx_driver = {
+ .probe = ohci_hcd_tilegx_drv_probe,
+ .remove = ohci_hcd_tilegx_drv_remove,
+ .shutdown = ohci_hcd_tilegx_drv_shutdown,
+ .driver = {
+ .name = "tilegx-ohci",
+ .owner = THIS_MODULE,
+ }
+};
+
+MODULE_ALIAS("platform:tilegx-ohci");
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index 8dabe8e31d8..bb409588d39 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -27,7 +27,6 @@
/*#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
-#include <linux/init.h>
#include <linux/namei.h>
#include <linux/sched.h>*/
#include <linux/platform_device.h>
@@ -128,7 +127,8 @@ static void tmio_start_hc(struct platform_device *dev)
tmio_iowrite8(2, tmio->ccr + CCR_INTC);
dev_info(&dev->dev, "revision %d @ 0x%08llx, irq %d\n",
- tmio_ioread8(tmio->ccr + CCR_REVID), hcd->rsrc_start, hcd->irq);
+ tmio_ioread8(tmio->ccr + CCR_REVID),
+ (u64) hcd->rsrc_start, hcd->irq);
}
static int ohci_tmio_start(struct usb_hcd *hcd)
@@ -140,7 +140,8 @@ static int ohci_tmio_start(struct usb_hcd *hcd)
return ret;
if ((ret = ohci_run(ohci)) < 0) {
- err("can't start %s", hcd->self.bus_name);
+ dev_err(hcd->self.controller, "can't start %s\n",
+ hcd->self.bus_name);
ohci_stop(hcd);
return ret;
}
@@ -183,9 +184,9 @@ static const struct hc_driver ohci_tmio_hc_driver = {
/*-------------------------------------------------------------------------*/
static struct platform_driver ohci_hcd_tmio_driver;
-static int __devinit ohci_hcd_tmio_drv_probe(struct platform_device *dev)
+static int ohci_hcd_tmio_drv_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct resource *regs = platform_get_resource(dev, IORESOURCE_MEM, 0);
struct resource *config = platform_get_resource(dev, IORESOURCE_MEM, 1);
struct resource *sram = platform_get_resource(dev, IORESOURCE_MEM, 2);
@@ -208,13 +209,13 @@ static int __devinit ohci_hcd_tmio_drv_probe(struct platform_device *dev)
}
hcd->rsrc_start = regs->start;
- hcd->rsrc_len = regs->end - regs->start + 1;
+ hcd->rsrc_len = resource_size(regs);
tmio = hcd_to_tmio(hcd);
spin_lock_init(&tmio->lock);
- tmio->ccr = ioremap(config->start, config->end - config->start + 1);
+ tmio->ccr = ioremap(config->start, resource_size(config));
if (!tmio->ccr) {
ret = -ENOMEM;
goto err_ioremap_ccr;
@@ -228,7 +229,7 @@ static int __devinit ohci_hcd_tmio_drv_probe(struct platform_device *dev)
if (!dma_declare_coherent_memory(&dev->dev, sram->start,
sram->start,
- sram->end - sram->start + 1,
+ resource_size(sram),
DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE)) {
ret = -EBUSY;
goto err_dma_declare;
@@ -244,10 +245,11 @@ static int __devinit ohci_hcd_tmio_drv_probe(struct platform_device *dev)
ohci = hcd_to_ohci(hcd);
ohci_hcd_init(ohci);
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ ret = usb_add_hcd(hcd, irq, 0);
if (ret)
goto err_add_hcd;
+ device_wakeup_enable(hcd->self.controller);
if (ret == 0)
return ret;
@@ -270,11 +272,11 @@ err_usb_create_hcd:
return ret;
}
-static int __devexit ohci_hcd_tmio_drv_remove(struct platform_device *dev)
+static int ohci_hcd_tmio_drv_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct tmio_hcd *tmio = hcd_to_tmio(hcd);
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
usb_remove_hcd(hcd);
tmio_stop_hc(dev);
@@ -285,15 +287,13 @@ static int __devexit ohci_hcd_tmio_drv_remove(struct platform_device *dev)
iounmap(tmio->ccr);
usb_put_hcd(hcd);
- platform_set_drvdata(dev, NULL);
-
return 0;
}
#ifdef CONFIG_PM
static int ohci_hcd_tmio_drv_suspend(struct platform_device *dev, pm_message_t state)
{
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct tmio_hcd *tmio = hcd_to_tmio(hcd);
@@ -318,15 +318,12 @@ static int ohci_hcd_tmio_drv_suspend(struct platform_device *dev, pm_message_t s
if (ret)
return ret;
}
-
- hcd->state = HC_STATE_SUSPENDED;
-
return 0;
}
static int ohci_hcd_tmio_drv_resume(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct tmio_hcd *tmio = hcd_to_tmio(hcd);
@@ -354,7 +351,7 @@ static int ohci_hcd_tmio_drv_resume(struct platform_device *dev)
spin_unlock_irqrestore(&tmio->lock, flags);
- ohci_finish_controller_resume(hcd);
+ ohci_resume(hcd, false);
return 0;
}
@@ -365,7 +362,7 @@ static int ohci_hcd_tmio_drv_resume(struct platform_device *dev)
static struct platform_driver ohci_hcd_tmio_driver = {
.probe = ohci_hcd_tmio_drv_probe,
- .remove = __devexit_p(ohci_hcd_tmio_drv_remove),
+ .remove = ohci_hcd_tmio_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.suspend = ohci_hcd_tmio_drv_suspend,
.resume = ohci_hcd_tmio_drv_resume,
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 51facb985c8..05e02a709d4 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -344,6 +344,12 @@ typedef struct urb_priv {
* a subset of what the full implementation needs. (Linus)
*/
+enum ohci_rh_state {
+ OHCI_RH_HALTED,
+ OHCI_RH_SUSPENDED,
+ OHCI_RH_RUNNING
+};
+
struct ohci_hcd {
spinlock_t lock;
@@ -366,11 +372,6 @@ struct ohci_hcd {
struct ed *ed_controltail; /* last in ctrl list */
struct ed *periodic [NUM_INTS]; /* shadow int_table */
- /*
- * OTG controllers and transceivers need software interaction;
- * other external transceivers should be software-transparent
- */
- struct otg_transceiver *transceiver;
void (*start_hnp)(struct ohci_hcd *ohci);
/*
@@ -384,6 +385,7 @@ struct ohci_hcd {
/*
* driver state
*/
+ enum ohci_rh_state rh_state;
int num_ports;
int load [NUM_INTS];
u32 hc_control; /* copy of hc control reg */
@@ -401,9 +403,10 @@ struct ohci_hcd {
#define OHCI_QUIRK_NEC 0x40 /* lost interrupts */
#define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */
#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
-#define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/
+#define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/
#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
-#define OHCI_QUIRK_SHUTDOWN 0x800 /* nVidia power bug */
+#define OHCI_QUIRK_GLOBAL_SUSPEND 0x800 /* must suspend ports */
+
// there are also chip quirks/bugs in init logic
struct work_struct nec_work; /* Worker for NEC quirk */
@@ -414,12 +417,14 @@ struct ohci_hcd {
struct ed *ed_to_check;
unsigned zf_delay;
-#ifdef DEBUG
struct dentry *debug_dir;
struct dentry *debug_async;
struct dentry *debug_periodic;
struct dentry *debug_registers;
-#endif
+
+ /* platform-specific data -- must come last */
+ unsigned long priv[0] __aligned(sizeof(s64));
+
};
#ifdef CONFIG_PCI
@@ -433,7 +438,7 @@ static inline int quirk_zfmicro(struct ohci_hcd *ohci)
}
static inline int quirk_amdiso(struct ohci_hcd *ohci)
{
- return ohci->flags & OHCI_QUIRK_AMD_ISO;
+ return ohci->flags & OHCI_QUIRK_AMD_PLL;
}
static inline int quirk_amdprefetch(struct ohci_hcd *ohci)
{
@@ -470,10 +475,6 @@ static inline struct usb_hcd *ohci_to_hcd (const struct ohci_hcd *ohci)
/*-------------------------------------------------------------------------*/
-#ifndef DEBUG
-#define STUB_DEBUG_FILES
-#endif /* DEBUG */
-
#define ohci_dbg(ohci, fmt, args...) \
dev_dbg (ohci_to_hcd(ohci)->self.controller , fmt , ## args )
#define ohci_err(ohci, fmt, args...) \
@@ -483,12 +484,6 @@ static inline struct usb_hcd *ohci_to_hcd (const struct ohci_hcd *ohci)
#define ohci_warn(ohci, fmt, args...) \
dev_warn (ohci_to_hcd(ohci)->self.controller , fmt , ## args )
-#ifdef OHCI_VERBOSE_DEBUG
-# define ohci_vdbg ohci_dbg
-#else
-# define ohci_vdbg(ohci, fmt, args...) do { } while (0)
-#endif
-
/*-------------------------------------------------------------------------*/
/*
@@ -575,18 +570,8 @@ static inline void _ohci_writel (const struct ohci_hcd *ohci,
#endif
}
-#ifdef CONFIG_ARCH_LH7A404
-/* Marc Singer: at the time this code was written, the LH7A404
- * had a problem reading the USB host registers. This
- * implementation of the ohci_readl function performs the read
- * twice as a work-around.
- */
-#define ohci_readl(o,r) (_ohci_readl(o,r),_ohci_readl(o,r))
-#define ohci_writel(o,v,r) _ohci_writel(o,v,r)
-#else
#define ohci_readl(o,r) _ohci_readl(o,r)
#define ohci_writel(o,v,r) _ohci_writel(o,v,r)
-#endif
/*-------------------------------------------------------------------------*/
@@ -690,11 +675,6 @@ static inline u16 ohci_hwPSW(const struct ohci_hcd *ohci,
/*-------------------------------------------------------------------------*/
-static inline void disable (struct ohci_hcd *ohci)
-{
- ohci_to_hcd(ohci)->state = HC_STATE_HALT;
-}
-
#define FI 0x2edf /* 12000 bits per frame (-1) */
#define FSMP(fi) (0x7fff & ((6 * ((fi) - 210)) / 7))
#define FIT (1 << 31)
@@ -718,7 +698,7 @@ static inline void periodic_reinit (struct ohci_hcd *ohci)
#define read_roothub(hc, register, mask) ({ \
u32 temp = ohci_readl (hc, &hc->regs->roothub.register); \
if (temp == -1) \
- disable (hc); \
+ hc->rh_state = OHCI_RH_HALTED; \
else if (hc->flags & OHCI_QUIRK_AMD756) \
while (temp & mask) \
temp = ohci_readl (hc, &hc->regs->roothub.register); \
@@ -732,3 +712,23 @@ static inline u32 roothub_status (struct ohci_hcd *hc)
{ return ohci_readl (hc, &hc->regs->roothub.status); }
static inline u32 roothub_portstatus (struct ohci_hcd *hc, int i)
{ return read_roothub (hc, portstatus [i], 0xffe0fce0); }
+
+/* Declarations of things exported for use by ohci platform drivers */
+
+struct ohci_driver_overrides {
+ const char *product_desc;
+ size_t extra_priv_size;
+ int (*reset)(struct usb_hcd *hcd);
+};
+
+extern void ohci_init_driver(struct hc_driver *drv,
+ const struct ohci_driver_overrides *over);
+extern int ohci_restart(struct ohci_hcd *ohci);
+extern int ohci_setup(struct usb_hcd *hcd);
+#ifdef CONFIG_PM
+extern int ohci_suspend(struct usb_hcd *hcd, bool do_wakeup);
+extern int ohci_resume(struct usb_hcd *hcd, bool hibernated);
+#endif
+extern int ohci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength);
+extern int ohci_hub_status_data(struct usb_hcd *hcd, char *buf);
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 32149be4ad8..e07248b6ab6 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -29,7 +29,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
@@ -40,7 +39,6 @@
#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/system.h>
#include <asm/unaligned.h>
#include <linux/irq.h>
@@ -61,6 +59,10 @@
#define oxu_info(oxu, fmt, args...) \
dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define DEBUG
+#endif
+
static inline struct usb_hcd *oxu_to_hcd(struct oxu_hcd *oxu)
{
return container_of((void *) oxu, struct usb_hcd, hcd_priv);
@@ -233,7 +235,7 @@ module_param(park, uint, S_IRUGO);
MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
/* For flakey hardware, ignore overcurrent indicators */
-static int ignore_oc;
+static bool ignore_oc;
module_param(ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC(ignore_oc, "ignore bogus hardware overcurrent indications");
@@ -451,9 +453,9 @@ static void ehci_hub_descriptor(struct oxu_hcd *oxu,
temp = 1 + (ports / 8);
desc->bDescLength = 7 + 2 * temp;
- /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
- memset(&desc->bitmap[0], 0, temp);
- memset(&desc->bitmap[temp], 0xff, temp);
+ /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+ memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+ memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
temp = 0x0008; /* per-port overcurrent reporting */
if (HCS_PPC(oxu->hcs_params))
@@ -1400,8 +1402,8 @@ static struct ehci_qh *qh_make(struct oxu_hcd *oxu,
* But interval 1 scheduling is simpler, and
* includes high bandwidth.
*/
- dbg("intr period %d uframes, NYET!",
- urb->interval);
+ oxu_dbg(oxu, "intr period %d uframes, NYET!\n",
+ urb->interval);
goto done;
}
} else {
@@ -1472,7 +1474,7 @@ static struct ehci_qh *qh_make(struct oxu_hcd *oxu,
}
break;
default:
- dbg("bogus dev %p speed %d", urb->dev, urb->dev->speed);
+ oxu_dbg(oxu, "bogus dev %p speed %d\n", urb->dev, urb->dev->speed);
done:
qh_put(qh);
return NULL;
@@ -1884,6 +1886,7 @@ static int enable_periodic(struct oxu_hcd *oxu)
status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125);
if (status != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
+ usb_hc_died(oxu_to_hcd(oxu));
return status;
}
@@ -1909,6 +1912,7 @@ static int disable_periodic(struct oxu_hcd *oxu)
status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125);
if (status != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
+ usb_hc_died(oxu_to_hcd(oxu));
return status;
}
@@ -2306,7 +2310,7 @@ restart:
qh_put(temp.qh);
break;
default:
- dbg("corrupt type %d frame %d shadow %p",
+ oxu_dbg(oxu, "corrupt type %d frame %d shadow %p\n",
type, frame, q.ptr);
q.ptr = NULL;
}
@@ -2449,8 +2453,9 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
goto dead;
}
+ /* Shared IRQ? */
status &= INTR_MASK;
- if (!status) { /* irq sharing? */
+ if (!status || unlikely(hcd->state == HC_STATE_HALT)) {
spin_unlock(&oxu->lock);
return IRQ_NONE;
}
@@ -2516,6 +2521,7 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
dead:
ehci_reset(oxu);
writel(0, &oxu->regs->configured_flag);
+ usb_hc_died(hcd);
/* generic layer kills/unlinks all urbs, then
* uses oxu_stop to clean up the rest
*/
@@ -2879,7 +2885,7 @@ static int oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
/* Ok, we have more job to do! :) */
for (i = 0; i < num - 1; i++) {
- /* Get free micro URB poll till a free urb is recieved */
+ /* Get free micro URB poll till a free urb is received */
do {
murb = (struct urb *) oxu_murb_alloc(oxu);
@@ -2911,7 +2917,7 @@ static int oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
/* Last urb requires special handling */
- /* Get free micro URB poll till a free urb is recieved */
+ /* Get free micro URB poll till a free urb is received */
do {
murb = (struct urb *) oxu_murb_alloc(oxu);
if (!murb)
@@ -2988,8 +2994,9 @@ static int oxu_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
/* shouldn't happen often, but ...
* FIXME kill those tds' urbs
*/
- err("can't reschedule qh %p, err %d",
- qh, status);
+ dev_err(hcd->self.controller,
+ "can't reschedule qh %p, err %d\n", qh,
+ status);
}
return status;
}
@@ -3080,7 +3087,7 @@ static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
int ports, i, retval = 1;
unsigned long flags;
- /* if !USB_SUSPEND, root hub timers won't get shut down ... */
+ /* if !PM_RUNTIME, root hub timers won't get shut down ... */
if (!HC_IS_RUNNING(hcd->state))
return 0;
@@ -3094,7 +3101,7 @@ static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
/* Some boards (mostly VIA?) report bogus overcurrent indications,
* causing massive log spam unless we completely ignore them. It
- * may be relevant that VIA VT8235 controlers, where PORT_POWER is
+ * may be relevant that VIA VT8235 controllers, where PORT_POWER is
* always set, seem to clear PORT_OCC and PORT_CSC when writing to
* PORT_POWER; that's surprising, but maybe within-spec.
*/
@@ -3743,6 +3750,7 @@ static struct usb_hcd *oxu_create(struct platform_device *pdev,
if (ret < 0)
return ERR_PTR(ret);
+ device_wakeup_enable(hcd->self.controller);
return hcd;
}
@@ -3824,7 +3832,7 @@ static int oxu_drv_probe(struct platform_device *pdev)
return -ENODEV;
}
memstart = res->start;
- memlen = res->end - res->start + 1;
+ memlen = resource_size(res);
dev_dbg(&pdev->dev, "MEM resource %lx-%lx\n", memstart, memlen);
if (!request_mem_region(memstart, memlen,
oxu_hc_driver.description)) {
@@ -3832,7 +3840,7 @@ static int oxu_drv_probe(struct platform_device *pdev)
return -EBUSY;
}
- ret = set_irq_type(irq, IRQF_TRIGGER_FALLING);
+ ret = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING);
if (ret) {
dev_err(&pdev->dev, "error setting irq type\n");
ret = -EFAULT;
@@ -3870,7 +3878,6 @@ static int oxu_drv_probe(struct platform_device *pdev)
error_init:
kfree(info);
- platform_set_drvdata(pdev, NULL);
error_alloc:
iounmap(base);
@@ -3903,7 +3910,6 @@ static int oxu_drv_remove(struct platform_device *pdev)
release_mem_region(memstart, memlen);
kfree(info);
- platform_set_drvdata(pdev, NULL);
return 0;
}
@@ -3947,24 +3953,7 @@ static struct platform_driver oxu_driver = {
}
};
-static int __init oxu_module_init(void)
-{
- int retval = 0;
-
- retval = platform_driver_register(&oxu_driver);
- if (retval < 0)
- return retval;
-
- return retval;
-}
-
-static void __exit oxu_module_cleanup(void)
-{
- platform_driver_unregister(&oxu_driver);
-}
-
-module_init(oxu_module_init);
-module_exit(oxu_module_cleanup);
+module_platform_driver(oxu_driver);
MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION);
MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 4c502c890eb..2f3acebb577 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -9,11 +9,13 @@
*/
#include <linux/types.h>
+#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/acpi.h>
+#include <linux/dmi.h>
#include "pci-quirks.h"
#include "xhci-ext-caps.h"
@@ -34,6 +36,9 @@
#define OHCI_INTRSTATUS 0x0c
#define OHCI_INTRENABLE 0x10
#define OHCI_INTRDISABLE 0x14
+#define OHCI_FMINTERVAL 0x34
+#define OHCI_HCFS (3 << 6) /* hc functional state */
+#define OHCI_HCR (1 << 0) /* host controller reset */
#define OHCI_OCR (1 << 3) /* ownership change request */
#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */
#define OHCI_CTRL_IR (1 << 8) /* interrupt routing */
@@ -52,6 +57,402 @@
#define EHCI_USBLEGCTLSTS 4 /* legacy control/status */
#define EHCI_USBLEGCTLSTS_SOOE (1 << 13) /* SMI on ownership change */
+/* AMD quirk use */
+#define AB_REG_BAR_LOW 0xe0
+#define AB_REG_BAR_HIGH 0xe1
+#define AB_REG_BAR_SB700 0xf0
+#define AB_INDX(addr) ((addr) + 0x00)
+#define AB_DATA(addr) ((addr) + 0x04)
+#define AX_INDXC 0x30
+#define AX_DATAC 0x34
+
+#define NB_PCIE_INDX_ADDR 0xe0
+#define NB_PCIE_INDX_DATA 0xe4
+#define PCIE_P_CNTL 0x10040
+#define BIF_NB 0x10002
+#define NB_PIF0_PWRDOWN_0 0x01100012
+#define NB_PIF0_PWRDOWN_1 0x01100013
+
+#define USB_INTEL_XUSB2PR 0xD0
+#define USB_INTEL_USB2PRM 0xD4
+#define USB_INTEL_USB3_PSSEN 0xD8
+#define USB_INTEL_USB3PRM 0xDC
+
+/*
+ * amd_chipset_gen values represent AMD different chipset generations
+ */
+enum amd_chipset_gen {
+ NOT_AMD_CHIPSET = 0,
+ AMD_CHIPSET_SB600,
+ AMD_CHIPSET_SB700,
+ AMD_CHIPSET_SB800,
+ AMD_CHIPSET_HUDSON2,
+ AMD_CHIPSET_BOLTON,
+ AMD_CHIPSET_YANGTZE,
+ AMD_CHIPSET_UNKNOWN,
+};
+
+struct amd_chipset_type {
+ enum amd_chipset_gen gen;
+ u8 rev;
+};
+
+static struct amd_chipset_info {
+ struct pci_dev *nb_dev;
+ struct pci_dev *smbus_dev;
+ int nb_type;
+ struct amd_chipset_type sb_type;
+ int isoc_reqs;
+ int probe_count;
+ int probe_result;
+} amd_chipset;
+
+static DEFINE_SPINLOCK(amd_lock);
+
+/*
+ * amd_chipset_sb_type_init - initialize amd chipset southbridge type
+ *
+ * AMD FCH/SB generation and revision is identified by SMBus controller
+ * vendor, device and revision IDs.
+ *
+ * Returns: 1 if it is an AMD chipset, 0 otherwise.
+ */
+static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
+{
+ u8 rev = 0;
+ pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN;
+
+ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
+ PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
+ if (pinfo->smbus_dev) {
+ rev = pinfo->smbus_dev->revision;
+ if (rev >= 0x10 && rev <= 0x1f)
+ pinfo->sb_type.gen = AMD_CHIPSET_SB600;
+ else if (rev >= 0x30 && rev <= 0x3f)
+ pinfo->sb_type.gen = AMD_CHIPSET_SB700;
+ else if (rev >= 0x40 && rev <= 0x4f)
+ pinfo->sb_type.gen = AMD_CHIPSET_SB800;
+ } else {
+ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
+
+ if (!pinfo->smbus_dev) {
+ pinfo->sb_type.gen = NOT_AMD_CHIPSET;
+ return 0;
+ }
+
+ rev = pinfo->smbus_dev->revision;
+ if (rev >= 0x11 && rev <= 0x14)
+ pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
+ else if (rev >= 0x15 && rev <= 0x18)
+ pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
+ else if (rev >= 0x39 && rev <= 0x3a)
+ pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
+ }
+
+ pinfo->sb_type.rev = rev;
+ return 1;
+}
+
+void sb800_prefetch(struct device *dev, int on)
+{
+ u16 misc;
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ pci_read_config_word(pdev, 0x50, &misc);
+ if (on == 0)
+ pci_write_config_word(pdev, 0x50, misc & 0xfcff);
+ else
+ pci_write_config_word(pdev, 0x50, misc | 0x0300);
+}
+EXPORT_SYMBOL_GPL(sb800_prefetch);
+
+int usb_amd_find_chipset_info(void)
+{
+ unsigned long flags;
+ struct amd_chipset_info info;
+ int ret;
+
+ spin_lock_irqsave(&amd_lock, flags);
+
+ /* probe only once */
+ if (amd_chipset.probe_count > 0) {
+ amd_chipset.probe_count++;
+ spin_unlock_irqrestore(&amd_lock, flags);
+ return amd_chipset.probe_result;
+ }
+ memset(&info, 0, sizeof(info));
+ spin_unlock_irqrestore(&amd_lock, flags);
+
+ if (!amd_chipset_sb_type_init(&info)) {
+ ret = 0;
+ goto commit;
+ }
+
+ /* Below chipset generations needn't enable AMD PLL quirk */
+ if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
+ info.sb_type.gen == AMD_CHIPSET_SB600 ||
+ info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
+ (info.sb_type.gen == AMD_CHIPSET_SB700 &&
+ info.sb_type.rev > 0x3b)) {
+ if (info.smbus_dev) {
+ pci_dev_put(info.smbus_dev);
+ info.smbus_dev = NULL;
+ }
+ ret = 0;
+ goto commit;
+ }
+
+ info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
+ if (info.nb_dev) {
+ info.nb_type = 1;
+ } else {
+ info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
+ if (info.nb_dev) {
+ info.nb_type = 2;
+ } else {
+ info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ 0x9600, NULL);
+ if (info.nb_dev)
+ info.nb_type = 3;
+ }
+ }
+
+ ret = info.probe_result = 1;
+ printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
+
+commit:
+
+ spin_lock_irqsave(&amd_lock, flags);
+ if (amd_chipset.probe_count > 0) {
+ /* race - someone else was faster - drop devices */
+
+ /* Mark that we where here */
+ amd_chipset.probe_count++;
+ ret = amd_chipset.probe_result;
+
+ spin_unlock_irqrestore(&amd_lock, flags);
+
+ if (info.nb_dev)
+ pci_dev_put(info.nb_dev);
+ if (info.smbus_dev)
+ pci_dev_put(info.smbus_dev);
+
+ } else {
+ /* no race - commit the result */
+ info.probe_count++;
+ amd_chipset = info;
+ spin_unlock_irqrestore(&amd_lock, flags);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
+
+int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
+{
+ /* Make sure amd chipset type has already been initialized */
+ usb_amd_find_chipset_info();
+ if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
+ return 0;
+
+ dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
+ return 1;
+}
+EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
+
+bool usb_amd_hang_symptom_quirk(void)
+{
+ u8 rev;
+
+ usb_amd_find_chipset_info();
+ rev = amd_chipset.sb_type.rev;
+ /* SB600 and old version of SB700 have hang symptom bug */
+ return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 ||
+ (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
+ rev >= 0x3a && rev <= 0x3b);
+}
+EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk);
+
+bool usb_amd_prefetch_quirk(void)
+{
+ usb_amd_find_chipset_info();
+ /* SB800 needs pre-fetch fix */
+ return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800;
+}
+EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
+
+/*
+ * The hardware normally enables the A-link power management feature, which
+ * lets the system lower the power consumption in idle states.
+ *
+ * This USB quirk prevents the link going into that lower power state
+ * during isochronous transfers.
+ *
+ * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of
+ * some AMD platforms may stutter or have breaks occasionally.
+ */
+static void usb_amd_quirk_pll(int disable)
+{
+ u32 addr, addr_low, addr_high, val;
+ u32 bit = disable ? 0 : 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&amd_lock, flags);
+
+ if (disable) {
+ amd_chipset.isoc_reqs++;
+ if (amd_chipset.isoc_reqs > 1) {
+ spin_unlock_irqrestore(&amd_lock, flags);
+ return;
+ }
+ } else {
+ amd_chipset.isoc_reqs--;
+ if (amd_chipset.isoc_reqs > 0) {
+ spin_unlock_irqrestore(&amd_lock, flags);
+ return;
+ }
+ }
+
+ if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 ||
+ amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 ||
+ amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) {
+ outb_p(AB_REG_BAR_LOW, 0xcd6);
+ addr_low = inb_p(0xcd7);
+ outb_p(AB_REG_BAR_HIGH, 0xcd6);
+ addr_high = inb_p(0xcd7);
+ addr = addr_high << 8 | addr_low;
+
+ outl_p(0x30, AB_INDX(addr));
+ outl_p(0x40, AB_DATA(addr));
+ outl_p(0x34, AB_INDX(addr));
+ val = inl_p(AB_DATA(addr));
+ } else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
+ amd_chipset.sb_type.rev <= 0x3b) {
+ pci_read_config_dword(amd_chipset.smbus_dev,
+ AB_REG_BAR_SB700, &addr);
+ outl(AX_INDXC, AB_INDX(addr));
+ outl(0x40, AB_DATA(addr));
+ outl(AX_DATAC, AB_INDX(addr));
+ val = inl(AB_DATA(addr));
+ } else {
+ spin_unlock_irqrestore(&amd_lock, flags);
+ return;
+ }
+
+ if (disable) {
+ val &= ~0x08;
+ val |= (1 << 4) | (1 << 9);
+ } else {
+ val |= 0x08;
+ val &= ~((1 << 4) | (1 << 9));
+ }
+ outl_p(val, AB_DATA(addr));
+
+ if (!amd_chipset.nb_dev) {
+ spin_unlock_irqrestore(&amd_lock, flags);
+ return;
+ }
+
+ if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) {
+ addr = PCIE_P_CNTL;
+ pci_write_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_ADDR, addr);
+ pci_read_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_DATA, &val);
+
+ val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
+ val |= bit | (bit << 3) | (bit << 12);
+ val |= ((!bit) << 4) | ((!bit) << 9);
+ pci_write_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_DATA, val);
+
+ addr = BIF_NB;
+ pci_write_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_ADDR, addr);
+ pci_read_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_DATA, &val);
+ val &= ~(1 << 8);
+ val |= bit << 8;
+
+ pci_write_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_DATA, val);
+ } else if (amd_chipset.nb_type == 2) {
+ addr = NB_PIF0_PWRDOWN_0;
+ pci_write_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_ADDR, addr);
+ pci_read_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_DATA, &val);
+ if (disable)
+ val &= ~(0x3f << 7);
+ else
+ val |= 0x3f << 7;
+
+ pci_write_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_DATA, val);
+
+ addr = NB_PIF0_PWRDOWN_1;
+ pci_write_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_ADDR, addr);
+ pci_read_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_DATA, &val);
+ if (disable)
+ val &= ~(0x3f << 7);
+ else
+ val |= 0x3f << 7;
+
+ pci_write_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_DATA, val);
+ }
+
+ spin_unlock_irqrestore(&amd_lock, flags);
+ return;
+}
+
+void usb_amd_quirk_pll_disable(void)
+{
+ usb_amd_quirk_pll(1);
+}
+EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
+
+void usb_amd_quirk_pll_enable(void)
+{
+ usb_amd_quirk_pll(0);
+}
+EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
+
+void usb_amd_dev_put(void)
+{
+ struct pci_dev *nb, *smbus;
+ unsigned long flags;
+
+ spin_lock_irqsave(&amd_lock, flags);
+
+ amd_chipset.probe_count--;
+ if (amd_chipset.probe_count > 0) {
+ spin_unlock_irqrestore(&amd_lock, flags);
+ return;
+ }
+
+ /* save them to pci_dev_put outside of spinlock */
+ nb = amd_chipset.nb_dev;
+ smbus = amd_chipset.smbus_dev;
+
+ amd_chipset.nb_dev = NULL;
+ amd_chipset.smbus_dev = NULL;
+ amd_chipset.nb_type = 0;
+ memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
+ amd_chipset.isoc_reqs = 0;
+ amd_chipset.probe_result = 0;
+
+ spin_unlock_irqrestore(&amd_lock, flags);
+
+ if (nb)
+ pci_dev_put(nb);
+ if (smbus)
+ pci_dev_put(smbus);
+}
+EXPORT_SYMBOL_GPL(usb_amd_dev_put);
/*
* Make sure the controller is completely inactive, unable to
@@ -143,7 +544,7 @@ static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
#define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
-static void __devinit quirk_usb_handoff_uhci(struct pci_dev *pdev)
+static void quirk_usb_handoff_uhci(struct pci_dev *pdev)
{
unsigned long base = 0;
int i;
@@ -161,15 +562,17 @@ static void __devinit quirk_usb_handoff_uhci(struct pci_dev *pdev)
uhci_check_and_reset_hc(pdev, base);
}
-static int __devinit mmio_resource_enabled(struct pci_dev *pdev, int idx)
+static int mmio_resource_enabled(struct pci_dev *pdev, int idx)
{
return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
}
-static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
+static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
{
void __iomem *base;
u32 control;
+ u32 fminterval;
+ int cnt;
if (!mmio_resource_enabled(pdev, 0))
return;
@@ -202,26 +605,147 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
}
#endif
- /* reset controller, preserving RWC (and possibly IR) */
- writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
+ /* disable interrupts */
+ writel((u32) ~0, base + OHCI_INTRDISABLE);
+
+ /* Reset the USB bus, if the controller isn't already in RESET */
+ if (control & OHCI_HCFS) {
+ /* Go into RESET, preserving RWC (and possibly IR) */
+ writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
+ readl(base + OHCI_CONTROL);
+
+ /* drive bus reset for at least 50 ms (7.1.7.5) */
+ msleep(50);
+ }
+
+ /* software reset of the controller, preserving HcFmInterval */
+ fminterval = readl(base + OHCI_FMINTERVAL);
+ writel(OHCI_HCR, base + OHCI_CMDSTATUS);
+
+ /* reset requires max 10 us delay */
+ for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */
+ if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
+ break;
+ udelay(1);
+ }
+ writel(fminterval, base + OHCI_FMINTERVAL);
+
+ /* Now the controller is safely in SUSPEND and nothing can wake it up */
+ iounmap(base);
+}
+
+static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
+ {
+ /* Pegatron Lucid (ExoPC) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"),
+ DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"),
+ },
+ },
+ {
+ /* Pegatron Lucid (Ordissimo AIRIS) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
+ DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
+ },
+ },
+ {
+ /* Pegatron Lucid (Ordissimo) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"),
+ DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
+ },
+ },
+ {
+ /* HASEE E200 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
+ DMI_MATCH(DMI_BOARD_NAME, "E210"),
+ DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
+ },
+ },
+ { }
+};
+
+static void ehci_bios_handoff(struct pci_dev *pdev,
+ void __iomem *op_reg_base,
+ u32 cap, u8 offset)
+{
+ int try_handoff = 1, tried_handoff = 0;
/*
- * disable interrupts
+ * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
+ * the handoff on its unused controller. Skip it.
+ *
+ * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
*/
- writel(~(u32)0, base + OHCI_INTRDISABLE);
- writel(~(u32)0, base + OHCI_INTRSTATUS);
+ if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
+ pdev->device == 0x27cc)) {
+ if (dmi_check_system(ehci_dmi_nohandoff_table))
+ try_handoff = 0;
+ }
- iounmap(base);
+ if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) {
+ dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
+
+#if 0
+/* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on,
+ * but that seems dubious in general (the BIOS left it off intentionally)
+ * and is known to prevent some systems from booting. so we won't do this
+ * unless maybe we can determine when we're on a system that needs SMI forced.
+ */
+ /* BIOS workaround (?): be sure the pre-Linux code
+ * receives the SMI
+ */
+ pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val);
+ pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS,
+ val | EHCI_USBLEGCTLSTS_SOOE);
+#endif
+
+ /* some systems get upset if this semaphore is
+ * set for any other reason than forcing a BIOS
+ * handoff..
+ */
+ pci_write_config_byte(pdev, offset + 3, 1);
+ }
+
+ /* if boot firmware now owns EHCI, spin till it hands it over. */
+ if (try_handoff) {
+ int msec = 1000;
+ while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
+ tried_handoff = 1;
+ msleep(10);
+ msec -= 10;
+ pci_read_config_dword(pdev, offset, &cap);
+ }
+ }
+
+ if (cap & EHCI_USBLEGSUP_BIOS) {
+ /* well, possibly buggy BIOS... try to shut it down,
+ * and hope nothing goes too wrong
+ */
+ if (try_handoff)
+ dev_warn(&pdev->dev, "EHCI: BIOS handoff failed"
+ " (BIOS bug?) %08x\n", cap);
+ pci_write_config_byte(pdev, offset + 2, 0);
+ }
+
+ /* just in case, always disable EHCI SMIs */
+ pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0);
+
+ /* If the BIOS ever owned the controller then we can't expect
+ * any power sessions to remain intact.
+ */
+ if (tried_handoff)
+ writel(0, op_reg_base + EHCI_CONFIGFLAG);
}
-static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
+static void quirk_usb_disable_ehci(struct pci_dev *pdev)
{
- int wait_time, delta;
void __iomem *base, *op_reg_base;
- u32 hcc_params, val;
+ u32 hcc_params, cap, val;
u8 offset, cap_length;
- int count = 256/4;
- int tried_handoff = 0;
+ int wait_time, count = 256/4;
if (!mmio_resource_enabled(pdev, 0))
return;
@@ -240,77 +764,17 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
hcc_params = readl(base + EHCI_HCC_PARAMS);
offset = (hcc_params >> 8) & 0xff;
while (offset && --count) {
- u32 cap;
- int msec;
-
pci_read_config_dword(pdev, offset, &cap);
- switch (cap & 0xff) {
- case 1: /* BIOS/SMM/... handoff support */
- if ((cap & EHCI_USBLEGSUP_BIOS)) {
- dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
-
-#if 0
-/* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on,
- * but that seems dubious in general (the BIOS left it off intentionally)
- * and is known to prevent some systems from booting. so we won't do this
- * unless maybe we can determine when we're on a system that needs SMI forced.
- */
- /* BIOS workaround (?): be sure the
- * pre-Linux code receives the SMI
- */
- pci_read_config_dword(pdev,
- offset + EHCI_USBLEGCTLSTS,
- &val);
- pci_write_config_dword(pdev,
- offset + EHCI_USBLEGCTLSTS,
- val | EHCI_USBLEGCTLSTS_SOOE);
-#endif
-
- /* some systems get upset if this semaphore is
- * set for any other reason than forcing a BIOS
- * handoff..
- */
- pci_write_config_byte(pdev, offset + 3, 1);
- }
-
- /* if boot firmware now owns EHCI, spin till
- * it hands it over.
- */
- msec = 1000;
- while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
- tried_handoff = 1;
- msleep(10);
- msec -= 10;
- pci_read_config_dword(pdev, offset, &cap);
- }
-
- if (cap & EHCI_USBLEGSUP_BIOS) {
- /* well, possibly buggy BIOS... try to shut
- * it down, and hope nothing goes too wrong
- */
- dev_warn(&pdev->dev, "EHCI: BIOS handoff failed"
- " (BIOS bug?) %08x\n", cap);
- pci_write_config_byte(pdev, offset + 2, 0);
- }
-
- /* just in case, always disable EHCI SMIs */
- pci_write_config_dword(pdev,
- offset + EHCI_USBLEGCTLSTS,
- 0);
- /* If the BIOS ever owned the controller then we
- * can't expect any power sessions to remain intact.
- */
- if (tried_handoff)
- writel(0, op_reg_base + EHCI_CONFIGFLAG);
+ switch (cap & 0xff) {
+ case 1:
+ ehci_bios_handoff(pdev, op_reg_base, cap, offset);
break;
- case 0: /* illegal reserved capability */
- cap = 0;
- /* FALLTHROUGH */
+ case 0: /* Illegal reserved cap, set cap=0 so we exit */
+ cap = 0; /* then fallthrough... */
default:
dev_warn(&pdev->dev, "EHCI: unrecognized capability "
- "%02x\n", cap & 0xff);
- break;
+ "%02x\n", cap & 0xff);
}
offset = (cap >> 8) & 0xff;
}
@@ -327,11 +791,10 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
writel(val, op_reg_base + EHCI_USBCMD);
wait_time = 2000;
- delta = 100;
do {
writel(0x3f, op_reg_base + EHCI_USBSTS);
- udelay(delta);
- wait_time -= delta;
+ udelay(100);
+ wait_time -= 100;
val = readl(op_reg_base + EHCI_USBSTS);
if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
break;
@@ -373,6 +836,115 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done,
return -ETIMEDOUT;
}
+/*
+ * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
+ * share some number of ports. These ports can be switched between either
+ * controller. Not all of the ports under the EHCI host controller may be
+ * switchable.
+ *
+ * The ports should be switched over to xHCI before PCI probes for any device
+ * start. This avoids active devices under EHCI being disconnected during the
+ * port switchover, which could cause loss of data on USB storage devices, or
+ * failed boot when the root file system is on a USB mass storage device and is
+ * enumerated under EHCI first.
+ *
+ * We write into the xHC's PCI configuration space in some Intel-specific
+ * registers to switch the ports over. The USB 3.0 terminations and the USB
+ * 2.0 data wires are switched separately. We want to enable the SuperSpeed
+ * terminations before switching the USB 2.0 wires over, so that USB 3.0
+ * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
+ */
+void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
+{
+ u32 ports_available;
+ bool ehci_found = false;
+ struct pci_dev *companion = NULL;
+
+ /* Sony VAIO t-series with subsystem device ID 90a8 is not capable of
+ * switching ports from EHCI to xHCI
+ */
+ if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY &&
+ xhci_pdev->subsystem_device == 0x90a8)
+ return;
+
+ /* make sure an intel EHCI controller exists */
+ for_each_pci_dev(companion) {
+ if (companion->class == PCI_CLASS_SERIAL_USB_EHCI &&
+ companion->vendor == PCI_VENDOR_ID_INTEL) {
+ ehci_found = true;
+ break;
+ }
+ }
+
+ if (!ehci_found)
+ return;
+
+ /* Don't switchover the ports if the user hasn't compiled the xHCI
+ * driver. Otherwise they will see "dead" USB ports that don't power
+ * the devices.
+ */
+ if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) {
+ dev_warn(&xhci_pdev->dev,
+ "CONFIG_USB_XHCI_HCD is turned off, "
+ "defaulting to EHCI.\n");
+ dev_warn(&xhci_pdev->dev,
+ "USB 3.0 devices will work at USB 2.0 speeds.\n");
+ usb_disable_xhci_ports(xhci_pdev);
+ return;
+ }
+
+ /* Read USB3PRM, the USB 3.0 Port Routing Mask Register
+ * Indicate the ports that can be changed from OS.
+ */
+ pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM,
+ &ports_available);
+
+ dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n",
+ ports_available);
+
+ /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
+ * Register, to turn on SuperSpeed terminations for the
+ * switchable ports.
+ */
+ pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
+ ports_available);
+
+ pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
+ &ports_available);
+ dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
+ "under xHCI: 0x%x\n", ports_available);
+
+ /* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
+ * Indicate the USB 2.0 ports to be controlled by the xHCI host.
+ */
+
+ pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM,
+ &ports_available);
+
+ dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n",
+ ports_available);
+
+ /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
+ * switch the USB 2.0 power and data lines over to the xHCI
+ * host.
+ */
+ pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+ ports_available);
+
+ pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+ &ports_available);
+ dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over "
+ "to xHCI: 0x%x\n", ports_available);
+}
+EXPORT_SYMBOL_GPL(usb_enable_intel_xhci_ports);
+
+void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
+{
+ pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
+ pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
+}
+EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
+
/**
* PCI Quirks for xHCI.
*
@@ -381,19 +953,19 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done,
* and then waits 5 seconds for the BIOS to hand over control.
* If we timeout, assume the BIOS is broken and take control anyway.
*/
-static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
+static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
{
void __iomem *base;
int ext_cap_offset;
void __iomem *op_reg_base;
u32 val;
int timeout;
+ int len = pci_resource_len(pdev, 0);
if (!mmio_resource_enabled(pdev, 0))
return;
- base = ioremap_nocache(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
+ base = ioremap_nocache(pci_resource_start(pdev, 0), len);
if (base == NULL)
return;
@@ -403,9 +975,17 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
*/
ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
do {
+ if ((ext_cap_offset + sizeof(val)) > len) {
+ /* We're reading garbage from the controller */
+ dev_warn(&pdev->dev,
+ "xHCI controller failing to respond");
+ return;
+ }
+
if (!ext_cap_offset)
/* We've reached the end of the extended capabilities */
goto hc_init;
+
val = readl(base + ext_cap_offset);
if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
break;
@@ -414,7 +994,7 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
/* If the BIOS owns the HC, signal that the OS wants it, and wait */
if (val & XHCI_HC_BIOS_OWNED) {
- writel(val & XHCI_HC_OS_OWNED, base + ext_cap_offset);
+ writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
/* Wait for 5 seconds with 10 microsecond polling interval */
timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
@@ -428,11 +1008,18 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
}
}
- /* Disable any BIOS SMIs */
- writel(XHCI_LEGACY_DISABLE_SMI,
- base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+ val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+ /* Mask off (turn off) any enabled SMIs */
+ val &= XHCI_LEGACY_DISABLE_SMI;
+ /* Mask all SMI events bits, RW1C */
+ val |= XHCI_LEGACY_SMI_EVENTS;
+ /* Disable any BIOS SMIs and clear all SMI events*/
+ writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
hc_init:
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ usb_enable_intel_xhci_ports(pdev);
+
op_reg_base = base + XHCI_HC_LENGTH(readl(base));
/* Wait for the host controller to be ready before writing any
@@ -466,8 +1053,24 @@ hc_init:
iounmap(base);
}
-static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
+static void quirk_usb_early_handoff(struct pci_dev *pdev)
{
+ /* Skip Netlogic mips SoC's internal PCI USB controller.
+ * This device does not need/support EHCI/OHCI handoff
+ */
+ if (pdev->vendor == 0x184e) /* vendor Netlogic */
+ return;
+ if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
+ pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
+ pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
+ pdev->class != PCI_CLASS_SERIAL_USB_XHCI)
+ return;
+
+ if (pci_enable_device(pdev) < 0) {
+ dev_warn(&pdev->dev, "Can't enable PCI device, "
+ "BIOS handoff failed.\n");
+ return;
+ }
if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
quirk_usb_handoff_uhci(pdev);
else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
@@ -476,5 +1079,7 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
quirk_usb_disable_ehci(pdev);
else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
quirk_usb_handoff_xhci(pdev);
+ pci_disable_device(pdev);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 1564edfff6f..c622ddf21c9 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -1,7 +1,26 @@
#ifndef __LINUX_USB_PCI_QUIRKS_H
#define __LINUX_USB_PCI_QUIRKS_H
+#ifdef CONFIG_PCI
void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
+int usb_amd_find_chipset_info(void);
+int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev);
+bool usb_amd_hang_symptom_quirk(void);
+bool usb_amd_prefetch_quirk(void);
+void usb_amd_dev_put(void);
+void usb_amd_quirk_pll_disable(void);
+void usb_amd_quirk_pll_enable(void);
+void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
+void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
+void sb800_prefetch(struct device *dev, int on);
+#else
+struct pci_dev;
+static inline void usb_amd_quirk_pll_disable(void) {}
+static inline void usb_amd_quirk_pll_enable(void) {}
+static inline void usb_amd_dev_put(void) {}
+static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
+static inline void sb800_prefetch(struct device *dev, int on) {}
+#endif /* CONFIG_PCI */
#endif /* __LINUX_USB_PCI_QUIRKS_H */
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 3076b1cc05d..110b4b9ebea 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -6,7 +6,7 @@
* Portions Copyright (C) 2004-2005 David Brownell
* Portions Copyright (C) 1999 Roman Weissgaerber
*
- * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -27,7 +27,6 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/list.h>
@@ -95,9 +94,7 @@ static int r8a66597_clock_enable(struct r8a66597 *r8a66597)
int i = 0;
if (r8a66597->pdata->on_chip) {
-#ifdef CONFIG_HAVE_CLK
- clk_enable(r8a66597->clk);
-#endif
+ clk_prepare_enable(r8a66597->clk);
do {
r8a66597_write(r8a66597, SCKE, SYSCFG0);
tmp = r8a66597_read(r8a66597, SYSCFG0);
@@ -141,9 +138,7 @@ static void r8a66597_clock_disable(struct r8a66597 *r8a66597)
udelay(1);
if (r8a66597->pdata->on_chip) {
-#ifdef CONFIG_HAVE_CLK
- clk_disable(r8a66597->clk);
-#endif
+ clk_disable_unprepare(r8a66597->clk);
} else {
r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
@@ -401,7 +396,7 @@ static u8 alloc_usb_address(struct r8a66597 *r8a66597, struct urb *urb)
if (r8a66597->address_map & (1 << addr))
continue;
- dbg("alloc_address: r8a66597_addr=%d", addr);
+ dev_dbg(&urb->dev->dev, "alloc_address: r8a66597_addr=%d\n", addr);
r8a66597->address_map |= 1 << addr;
if (make_r8a66597_device(r8a66597, urb, addr) < 0)
@@ -426,7 +421,7 @@ static void free_usb_address(struct r8a66597 *r8a66597,
if (!dev)
return;
- dbg("free_addr: addr=%d", dev->address);
+ dev_dbg(&dev->udev->dev, "free_addr: addr=%d\n", dev->address);
dev->state = USB_STATE_DEFAULT;
r8a66597->address_map &= ~(1 << dev->address);
@@ -819,7 +814,7 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb,
struct r8a66597_device *dev = get_urb_to_r8a66597_dev(r8a66597, urb);
struct r8a66597_pipe *pipe = hep->hcpriv;
- dbg("enable_pipe:");
+ dev_dbg(&dev->udev->dev, "enable_pipe:\n");
pipe->info = *info;
set_pipe_reg_addr(pipe, R8A66597_PIPE_NO_DMA);
@@ -898,7 +893,7 @@ static void disable_r8a66597_pipe_all(struct r8a66597 *r8a66597,
force_dequeue(r8a66597, pipenum, dev->address);
}
- dbg("disable_pipe");
+ dev_dbg(&dev->udev->dev, "disable_pipe\n");
r8a66597->dma_map &= ~(dev->dma_map);
dev->dma_map = 0;
@@ -959,7 +954,7 @@ static void init_pipe_info(struct r8a66597 *r8a66597, struct urb *urb,
info.pipenum = get_empty_pipenum(r8a66597, ep);
info.address = get_urb_to_r8a66597_addr(r8a66597, urb);
info.epnum = usb_endpoint_num(ep);
- info.maxpacket = le16_to_cpu(ep->wMaxPacketSize);
+ info.maxpacket = usb_endpoint_maxp(ep);
info.type = get_r8a66597_type(usb_endpoint_type(ep));
info.bufnum = get_bufnum(info.pipenum);
info.buf_bsize = get_buf_bsize(info.pipenum);
@@ -1438,7 +1433,7 @@ static void packet_write(struct r8a66597 *r8a66597, u16 pipenum)
if (pipenum > 0)
r8a66597_write(r8a66597, ~(1 << pipenum), BEMPSTS);
if (urb->transfer_buffer) {
- r8a66597_write_fifo(r8a66597, td->pipe->fifoaddr, buf, size);
+ r8a66597_write_fifo(r8a66597, td->pipe, buf, size);
if (!usb_pipebulk(urb->pipe) || td->maxpacket != size)
r8a66597_write(r8a66597, BVAL, td->pipe->fifoctr);
}
@@ -2033,18 +2028,15 @@ static int r8a66597_get_frame(struct usb_hcd *hcd)
static void collect_usb_address_map(struct usb_device *udev, unsigned long *map)
{
int chix;
+ struct usb_device *childdev;
if (udev->state == USB_STATE_CONFIGURED &&
udev->parent && udev->parent->devnum > 1 &&
udev->parent->descriptor.bDeviceClass == USB_CLASS_HUB)
map[udev->devnum/32] |= (1 << (udev->devnum % 32));
- for (chix = 0; chix < udev->maxchild; chix++) {
- struct usb_device *childdev = udev->children[chix];
-
- if (childdev)
- collect_usb_address_map(childdev, map);
- }
+ usb_hub_for_each_child(udev, chix, childdev)
+ collect_usb_address_map(childdev, map);
}
/* this function must be called with interrupt disabled */
@@ -2150,8 +2142,9 @@ static void r8a66597_hub_descriptor(struct r8a66597 *r8a66597,
desc->bDescLength = 9;
desc->bPwrOn2PwrGood = 0;
desc->wHubCharacteristics = cpu_to_le16(0x0011);
- desc->bitmap[0] = ((1 << r8a66597->max_root_hub) - 1) << 1;
- desc->bitmap[1] = ~0;
+ desc->u.hs.DeviceRemovable[0] =
+ ((1 << r8a66597->max_root_hub) - 1) << 1;
+ desc->u.hs.DeviceRemovable[1] = ~0;
}
static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
@@ -2263,7 +2256,7 @@ static int r8a66597_bus_suspend(struct usb_hcd *hcd)
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
int port;
- dbg("%s", __func__);
+ dev_dbg(&r8a66597->device0.udev->dev, "%s\n", __func__);
for (port = 0; port < r8a66597->max_root_hub; port++) {
struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
@@ -2272,7 +2265,7 @@ static int r8a66597_bus_suspend(struct usb_hcd *hcd)
if (!(rh->port & USB_PORT_STAT_ENABLE))
continue;
- dbg("suspend port = %d", port);
+ dev_dbg(&rh->dev->udev->dev, "suspend port = %d\n", port);
r8a66597_bclr(r8a66597, UACT, dvstctr_reg); /* suspend */
rh->port |= USB_PORT_STAT_SUSPEND;
@@ -2294,7 +2287,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd)
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
int port;
- dbg("%s", __func__);
+ dev_dbg(&r8a66597->device0.udev->dev, "%s\n", __func__);
for (port = 0; port < r8a66597->max_root_hub; port++) {
struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
@@ -2303,9 +2296,9 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd)
if (!(rh->port & USB_PORT_STAT_SUSPEND))
continue;
- dbg("resume port = %d", port);
+ dev_dbg(&rh->dev->udev->dev, "resume port = %d\n", port);
rh->port &= ~USB_PORT_STAT_SUSPEND;
- rh->port |= USB_PORT_STAT_C_SUSPEND < 16;
+ rh->port |= USB_PORT_STAT_C_SUSPEND << 16;
r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
msleep(50);
r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
@@ -2359,7 +2352,7 @@ static int r8a66597_suspend(struct device *dev)
struct r8a66597 *r8a66597 = dev_get_drvdata(dev);
int port;
- dbg("%s", __func__);
+ dev_dbg(dev, "%s\n", __func__);
disable_controller(r8a66597);
@@ -2377,7 +2370,7 @@ static int r8a66597_resume(struct device *dev)
struct r8a66597 *r8a66597 = dev_get_drvdata(dev);
struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597);
- dbg("%s", __func__);
+ dev_dbg(dev, "%s\n", __func__);
enable_controller(r8a66597);
usb_root_hub_lost_power(hcd->self.root_hub);
@@ -2397,27 +2390,23 @@ static const struct dev_pm_ops r8a66597_dev_pm_ops = {
#define R8A66597_DEV_PM_OPS NULL
#endif
-static int __devexit r8a66597_remove(struct platform_device *pdev)
+static int r8a66597_remove(struct platform_device *pdev)
{
- struct r8a66597 *r8a66597 = dev_get_drvdata(&pdev->dev);
+ struct r8a66597 *r8a66597 = platform_get_drvdata(pdev);
struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597);
del_timer_sync(&r8a66597->rh_timer);
usb_remove_hcd(hcd);
iounmap(r8a66597->reg);
-#ifdef CONFIG_HAVE_CLK
if (r8a66597->pdata->on_chip)
clk_put(r8a66597->clk);
-#endif
usb_put_hcd(hcd);
return 0;
}
-static int __devinit r8a66597_probe(struct platform_device *pdev)
+static int r8a66597_probe(struct platform_device *pdev)
{
-#ifdef CONFIG_HAVE_CLK
char clk_name[8];
-#endif
struct resource *res = NULL, *ires;
int irq = -1;
void __iomem *reg = NULL;
@@ -2427,6 +2416,9 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
int i;
unsigned long irq_trigger;
+ if (usb_disabled())
+ return -ENODEV;
+
if (pdev->dev.dma_mask) {
ret = -EINVAL;
dev_err(&pdev->dev, "dma not supported\n");
@@ -2473,12 +2465,11 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
}
r8a66597 = hcd_to_r8a66597(hcd);
memset(r8a66597, 0, sizeof(struct r8a66597));
- dev_set_drvdata(&pdev->dev, r8a66597);
- r8a66597->pdata = pdev->dev.platform_data;
+ platform_set_drvdata(pdev, r8a66597);
+ r8a66597->pdata = dev_get_platdata(&pdev->dev);
r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
if (r8a66597->pdata->on_chip) {
-#ifdef CONFIG_HAVE_CLK
snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
r8a66597->clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(r8a66597->clk)) {
@@ -2487,7 +2478,6 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
ret = PTR_ERR(r8a66597->clk);
goto clean_up2;
}
-#endif
r8a66597->max_root_hub = 1;
} else
r8a66597->max_root_hub = 2;
@@ -2516,21 +2506,21 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&r8a66597->child_device);
hcd->rsrc_start = res->start;
+ hcd->has_tt = 1;
- ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger);
+ ret = usb_add_hcd(hcd, irq, irq_trigger);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to add hcd\n");
goto clean_up3;
}
+ device_wakeup_enable(hcd->self.controller);
return 0;
clean_up3:
-#ifdef CONFIG_HAVE_CLK
if (r8a66597->pdata->on_chip)
clk_put(r8a66597->clk);
clean_up2:
-#endif
usb_put_hcd(hcd);
clean_up:
@@ -2542,28 +2532,12 @@ clean_up:
static struct platform_driver r8a66597_driver = {
.probe = r8a66597_probe,
- .remove = __devexit_p(r8a66597_remove),
+ .remove = r8a66597_remove,
.driver = {
- .name = (char *) hcd_name,
+ .name = hcd_name,
.owner = THIS_MODULE,
.pm = R8A66597_DEV_PM_OPS,
},
};
-static int __init r8a66597_init(void)
-{
- if (usb_disabled())
- return -ENODEV;
-
- printk(KERN_INFO KBUILD_MODNAME ": driver %s, %s\n", hcd_name,
- DRIVER_VERSION);
- return platform_driver_register(&r8a66597_driver);
-}
-module_init(r8a66597_init);
-
-static void __exit r8a66597_cleanup(void)
-{
- platform_driver_unregister(&r8a66597_driver);
-}
-module_exit(r8a66597_cleanup);
-
+module_platform_driver(r8a66597_driver);
diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h
index 25563e9a90b..672cea307ab 100644
--- a/drivers/usb/host/r8a66597.h
+++ b/drivers/usb/host/r8a66597.h
@@ -26,10 +26,7 @@
#ifndef __R8A66597_H__
#define __R8A66597_H__
-#ifdef CONFIG_HAVE_CLK
#include <linux/clk.h>
-#endif
-
#include <linux/usb/r8a66597.h>
#define R8A66597_MAX_NUM_PIPE 10
@@ -113,9 +110,7 @@ struct r8a66597_root_hub {
struct r8a66597 {
spinlock_t lock;
void __iomem *reg;
-#ifdef CONFIG_HAVE_CLK
struct clk *clk;
-#endif
struct r8a66597_platdata *pdata;
struct r8a66597_device device0;
struct r8a66597_root_hub root_hub[R8A66597_MAX_ROOT_HUB];
@@ -201,11 +196,26 @@ static inline void r8a66597_write(struct r8a66597 *r8a66597, u16 val,
iowrite16(val, r8a66597->reg + offset);
}
+static inline void r8a66597_mdfy(struct r8a66597 *r8a66597,
+ u16 val, u16 pat, unsigned long offset)
+{
+ u16 tmp;
+ tmp = r8a66597_read(r8a66597, offset);
+ tmp = tmp & (~pat);
+ tmp = tmp | val;
+ r8a66597_write(r8a66597, tmp, offset);
+}
+
+#define r8a66597_bclr(r8a66597, val, offset) \
+ r8a66597_mdfy(r8a66597, 0, val, offset)
+#define r8a66597_bset(r8a66597, val, offset) \
+ r8a66597_mdfy(r8a66597, val, 0, offset)
+
static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597,
- unsigned long offset, u16 *buf,
+ struct r8a66597_pipe *pipe, u16 *buf,
int len)
{
- void __iomem *fifoaddr = r8a66597->reg + offset;
+ void __iomem *fifoaddr = r8a66597->reg + pipe->fifoaddr;
unsigned long count;
unsigned char *pb;
int i;
@@ -230,26 +240,15 @@ static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597,
iowrite16_rep(fifoaddr, buf, len);
if (unlikely(odd)) {
buf = &buf[len];
+ if (r8a66597->pdata->wr0_shorted_to_wr1)
+ r8a66597_bclr(r8a66597, MBW_16, pipe->fifosel);
iowrite8((unsigned char)*buf, fifoaddr);
+ if (r8a66597->pdata->wr0_shorted_to_wr1)
+ r8a66597_bset(r8a66597, MBW_16, pipe->fifosel);
}
}
}
-static inline void r8a66597_mdfy(struct r8a66597 *r8a66597,
- u16 val, u16 pat, unsigned long offset)
-{
- u16 tmp;
- tmp = r8a66597_read(r8a66597, offset);
- tmp = tmp & (~pat);
- tmp = tmp | val;
- r8a66597_write(r8a66597, tmp, offset);
-}
-
-#define r8a66597_bclr(r8a66597, val, offset) \
- r8a66597_mdfy(r8a66597, 0, val, offset)
-#define r8a66597_bset(r8a66597, val, offset) \
- r8a66597_mdfy(r8a66597, val, 0, offset)
-
static inline unsigned long get_syscfg_reg(int port)
{
return port == 0 ? SYSCFG0 : SYSCFG1;
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 990f06b89ea..a517151867a 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -22,7 +22,7 @@
* and usb-storage.
*
* TODO:
- * - usb suspend/resume triggered by sl811 (with USB_SUSPEND)
+ * - usb suspend/resume triggered by sl811 (with PM_RUNTIME)
* - various issues noted in the code
* - performance work; use both register banks; ...
* - use urb->iso_frame_desc[] with ISO transfers
@@ -39,7 +39,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
@@ -47,10 +46,12 @@
#include <linux/usb/sl811.h>
#include <linux/usb/hcd.h>
#include <linux/platform_device.h>
+#include <linux/prefetch.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/system.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
@@ -63,20 +64,9 @@ MODULE_ALIAS("platform:sl811-hcd");
#define DRIVER_VERSION "19 May 2005"
-
-#ifndef DEBUG
-# define STUB_DEBUG_FILE
-#endif
-
/* for now, use only one transfer register bank */
#undef USE_B
-/* this doesn't understand urb->iso_frame_desc[], but if you had a driver
- * that just queued one ISO frame per URB then iso transfers "should" work
- * using the normal urb status fields.
- */
-#define DISABLE_ISO
-
// #define QUIRK2
#define QUIRK3
@@ -106,7 +96,8 @@ static void port_power(struct sl811 *sl811, int is_on)
if (sl811->board && sl811->board->port_power) {
/* switch VBUS, at 500mA unless hub power budget gets set */
- DBG("power %s\n", is_on ? "on" : "off");
+ dev_dbg(hcd->self.controller, "power %s\n",
+ is_on ? "on" : "off");
sl811->board->port_power(hcd->self.controller, is_on);
}
@@ -162,7 +153,7 @@ static void setup_packet(
writeb(SL_SETUP /* | ep->epnum */, data_reg);
writeb(usb_pipedevice(urb->pipe), data_reg);
- /* always OUT/data0 */ ;
+ /* always OUT/data0 */
sl811_write(sl811, bank + SL11H_HOSTCTLREG,
control | SL11H_HCTLMASK_OUT);
ep->length = 0;
@@ -288,7 +279,7 @@ static inline void sofirq_on(struct sl811 *sl811)
{
if (sl811->irq_enable & SL11H_INTMASK_SOFINTR)
return;
- VDBG("sof irq on\n");
+ dev_dbg(sl811_to_hcd(sl811)->self.controller, "sof irq on\n");
sl811->irq_enable |= SL11H_INTMASK_SOFINTR;
}
@@ -296,7 +287,7 @@ static inline void sofirq_off(struct sl811 *sl811)
{
if (!(sl811->irq_enable & SL11H_INTMASK_SOFINTR))
return;
- VDBG("sof irq off\n");
+ dev_dbg(sl811_to_hcd(sl811)->self.controller, "sof irq off\n");
sl811->irq_enable &= ~SL11H_INTMASK_SOFINTR;
}
@@ -344,7 +335,8 @@ static struct sl811h_ep *start(struct sl811 *sl811, u8 bank)
}
if (unlikely(list_empty(&ep->hep->urb_list))) {
- DBG("empty %p queue?\n", ep);
+ dev_dbg(sl811_to_hcd(sl811)->self.controller,
+ "empty %p queue?\n", ep);
return NULL;
}
@@ -397,7 +389,8 @@ static struct sl811h_ep *start(struct sl811 *sl811, u8 bank)
status_packet(sl811, ep, urb, bank, control);
break;
default:
- DBG("bad ep%p pid %02x\n", ep, ep->nextpid);
+ dev_dbg(sl811_to_hcd(sl811)->self.controller,
+ "bad ep%p pid %02x\n", ep, ep->nextpid);
ep = NULL;
}
return ep;
@@ -453,7 +446,8 @@ static void finish_request(
}
/* periodic deschedule */
- DBG("deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
+ dev_dbg(sl811_to_hcd(sl811)->self.controller,
+ "deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
struct sl811h_ep *temp;
struct sl811h_ep **prev = &sl811->periodic[i];
@@ -599,7 +593,8 @@ static inline u8 checkdone(struct sl811 *sl811)
ctl = sl811_read(sl811, SL811_EP_A(SL11H_HOSTCTLREG));
if (ctl & SL11H_HCTLMASK_ARM)
sl811_write(sl811, SL811_EP_A(SL11H_HOSTCTLREG), 0);
- DBG("%s DONE_A: ctrl %02x sts %02x\n",
+ dev_dbg(sl811_to_hcd(sl811)->self.controller,
+ "%s DONE_A: ctrl %02x sts %02x\n",
(ctl & SL11H_HCTLMASK_ARM) ? "timeout" : "lost",
ctl,
sl811_read(sl811, SL811_EP_A(SL11H_PKTSTATREG)));
@@ -610,7 +605,8 @@ static inline u8 checkdone(struct sl811 *sl811)
ctl = sl811_read(sl811, SL811_EP_B(SL11H_HOSTCTLREG));
if (ctl & SL11H_HCTLMASK_ARM)
sl811_write(sl811, SL811_EP_B(SL11H_HOSTCTLREG), 0);
- DBG("%s DONE_B: ctrl %02x sts %02x\n",
+ dev_dbg(sl811_to_hcd(sl811)->self.controller,
+ "%s DONE_B: ctrl %02x sts %02x\n",
(ctl & SL11H_HCTLMASK_ARM) ? "timeout" : "lost",
ctl,
sl811_read(sl811, SL811_EP_B(SL11H_PKTSTATREG)));
@@ -671,7 +667,7 @@ retry:
* this one has nothing scheduled.
*/
if (sl811->next_periodic) {
- // ERR("overrun to slot %d\n", index);
+ // dev_err(hcd->self.controller, "overrun to slot %d\n", index);
sl811->stat_overrun++;
}
if (sl811->periodic[index])
@@ -729,7 +725,7 @@ retry:
} else if (irqstat & SL11H_INTMASK_RD) {
if (sl811->port1 & USB_PORT_STAT_SUSPEND) {
- DBG("wakeup\n");
+ dev_dbg(hcd->self.controller, "wakeup\n");
sl811->port1 |= USB_PORT_STAT_C_SUSPEND << 16;
sl811->stat_wake++;
} else
@@ -807,7 +803,7 @@ static int sl811h_urb_enqueue(
int retval;
struct usb_host_endpoint *hep = urb->ep;
-#ifdef DISABLE_ISO
+#ifndef CONFIG_USB_SL811_HCD_ISO
if (type == PIPE_ISOCHRONOUS)
return -ENOSPC;
#endif
@@ -858,9 +854,11 @@ static int sl811h_urb_enqueue(
if (ep->maxpacket > H_MAXPACKET) {
/* iso packets up to 240 bytes could work... */
- DBG("dev %d ep%d maxpacket %d\n",
- udev->devnum, epnum, ep->maxpacket);
+ dev_dbg(hcd->self.controller,
+ "dev %d ep%d maxpacket %d\n", udev->devnum,
+ epnum, ep->maxpacket);
retval = -EINVAL;
+ kfree(ep);
goto fail;
}
@@ -922,7 +920,8 @@ static int sl811h_urb_enqueue(
* to share the faster parts of the tree without needing
* dummy/placeholder nodes
*/
- DBG("schedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
+ dev_dbg(hcd->self.controller, "schedule qh%d/%p branch %d\n",
+ ep->period, ep, ep->branch);
for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
struct sl811h_ep **prev = &sl811->periodic[i];
struct sl811h_ep *here = *prev;
@@ -981,7 +980,8 @@ static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
} else if (sl811->active_a == ep) {
if (time_before_eq(sl811->jiffies_a, jiffies)) {
/* happens a lot with lowspeed?? */
- DBG("giveup on DONE_A: ctrl %02x sts %02x\n",
+ dev_dbg(hcd->self.controller,
+ "giveup on DONE_A: ctrl %02x sts %02x\n",
sl811_read(sl811,
SL811_EP_A(SL11H_HOSTCTLREG)),
sl811_read(sl811,
@@ -995,7 +995,8 @@ static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
} else if (sl811->active_b == ep) {
if (time_before_eq(sl811->jiffies_a, jiffies)) {
/* happens a lot with lowspeed?? */
- DBG("giveup on DONE_B: ctrl %02x sts %02x\n",
+ dev_dbg(hcd->self.controller,
+ "giveup on DONE_B: ctrl %02x sts %02x\n",
sl811_read(sl811,
SL811_EP_B(SL11H_HOSTCTLREG)),
sl811_read(sl811,
@@ -1013,7 +1014,8 @@ static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
if (urb)
finish_request(sl811, ep, urb, 0);
else
- VDBG("dequeue, urb %p active %s; wait4irq\n", urb,
+ dev_dbg(sl811_to_hcd(sl811)->self.controller,
+ "dequeue, urb %p active %s; wait4irq\n", urb,
(sl811->active_a == ep) ? "A" : "B");
} else
retval = -EINVAL;
@@ -1034,7 +1036,7 @@ sl811h_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
if (!list_empty(&hep->urb_list))
msleep(3);
if (!list_empty(&hep->urb_list))
- WARNING("ep %p not empty?\n", ep);
+ dev_warn(hcd->self.controller, "ep %p not empty?\n", ep);
kfree(ep);
hep->hcpriv = NULL;
@@ -1110,9 +1112,9 @@ sl811h_hub_descriptor (
desc->wHubCharacteristics = cpu_to_le16(temp);
- /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
- desc->bitmap[0] = 0 << 1;
- desc->bitmap[1] = ~0;
+ /* ports removable, and legacy PortPwrCtrlMask */
+ desc->u.hs.DeviceRemovable[0] = 0 << 1;
+ desc->u.hs.DeviceRemovable[1] = ~0;
}
static void
@@ -1137,7 +1139,7 @@ sl811h_timer(unsigned long _sl811)
switch (signaling) {
case SL11H_CTL1MASK_SE0:
- DBG("end reset\n");
+ dev_dbg(sl811_to_hcd(sl811)->self.controller, "end reset\n");
sl811->port1 = (USB_PORT_STAT_C_RESET << 16)
| USB_PORT_STAT_POWER;
sl811->ctrl1 = 0;
@@ -1146,11 +1148,12 @@ sl811h_timer(unsigned long _sl811)
irqstat &= ~SL11H_INTMASK_RD;
break;
case SL11H_CTL1MASK_K:
- DBG("end resume\n");
+ dev_dbg(sl811_to_hcd(sl811)->self.controller, "end resume\n");
sl811->port1 &= ~USB_PORT_STAT_SUSPEND;
break;
default:
- DBG("odd timer signaling: %02x\n", signaling);
+ dev_dbg(sl811_to_hcd(sl811)->self.controller,
+ "odd timer signaling: %02x\n", signaling);
break;
}
sl811_write(sl811, SL11H_IRQ_STATUS, irqstat);
@@ -1248,7 +1251,7 @@ sl811h_hub_control(
break;
/* 20 msec of resume/K signaling, other irqs blocked */
- DBG("start resume...\n");
+ dev_dbg(hcd->self.controller, "start resume...\n");
sl811->irq_enable = 0;
sl811_write(sl811, SL11H_IRQ_ENABLE,
sl811->irq_enable);
@@ -1286,7 +1289,8 @@ sl811h_hub_control(
#ifndef VERBOSE
if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
#endif
- DBG("GetPortStatus %08x\n", sl811->port1);
+ dev_dbg(hcd->self.controller, "GetPortStatus %08x\n",
+ sl811->port1);
break;
case SetPortFeature:
if (wIndex != 1 || wLength != 0)
@@ -1298,7 +1302,7 @@ sl811h_hub_control(
if (!(sl811->port1 & USB_PORT_STAT_ENABLE))
goto error;
- DBG("suspend...\n");
+ dev_dbg(hcd->self.controller,"suspend...\n");
sl811->ctrl1 &= ~SL11H_CTL1MASK_SOF_ENA;
sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
break;
@@ -1343,7 +1347,7 @@ static int
sl811h_bus_suspend(struct usb_hcd *hcd)
{
// SOFs off
- DBG("%s\n", __func__);
+ dev_dbg(hcd->self.controller, "%s\n", __func__);
return 0;
}
@@ -1351,7 +1355,7 @@ static int
sl811h_bus_resume(struct usb_hcd *hcd)
{
// SOFs on
- DBG("%s\n", __func__);
+ dev_dbg(hcd->self.controller, "%s\n", __func__);
return 0;
}
@@ -1365,16 +1369,6 @@ sl811h_bus_resume(struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
-#ifdef STUB_DEBUG_FILE
-
-static inline void create_debug_file(struct sl811 *sl811) { }
-static inline void remove_debug_file(struct sl811 *sl811) { }
-
-#else
-
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-
static void dump_irq(struct seq_file *s, char *label, u8 mask)
{
seq_printf(s, "%s %02x%s%s%s%s%s%s\n", label, mask,
@@ -1386,7 +1380,7 @@ static void dump_irq(struct seq_file *s, char *label, u8 mask)
(mask & SL11H_INTMASK_DP) ? " dp" : "");
}
-static int proc_sl811h_show(struct seq_file *s, void *unused)
+static int sl811h_show(struct seq_file *s, void *unused)
{
struct sl811 *sl811 = s->private;
struct sl811h_ep *ep;
@@ -1418,7 +1412,7 @@ static int proc_sl811h_show(struct seq_file *s, void *unused)
case SL11H_CTL1MASK_SE0: s = " se0/reset"; break;
case SL11H_CTL1MASK_K: s = " k/resume"; break;
default: s = "j"; break;
- }; s; }),
+ } s; }),
(t & SL11H_CTL1MASK_LSPD) ? " lowspeed" : "",
(t & SL11H_CTL1MASK_SUSPEND) ? " suspend" : "");
@@ -1451,7 +1445,7 @@ static int proc_sl811h_show(struct seq_file *s, void *unused)
case USB_PID_SETUP: s = "setup"; break;
case USB_PID_ACK: s = "status"; break;
default: s = "?"; break;
- }; s;}),
+ } s;}),
ep->maxpacket,
ep->nak_count, ep->error_count);
list_for_each_entry (urb, &ep->hep->urb_list, urb_list) {
@@ -1497,34 +1491,31 @@ static int proc_sl811h_show(struct seq_file *s, void *unused)
return 0;
}
-static int proc_sl811h_open(struct inode *inode, struct file *file)
+static int sl811h_open(struct inode *inode, struct file *file)
{
- return single_open(file, proc_sl811h_show, PDE(inode)->data);
+ return single_open(file, sl811h_show, inode->i_private);
}
-static const struct file_operations proc_ops = {
- .open = proc_sl811h_open,
+static const struct file_operations debug_ops = {
+ .open = sl811h_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/* expect just one sl811 per system */
-static const char proc_filename[] = "driver/sl811h";
-
static void create_debug_file(struct sl811 *sl811)
{
- sl811->pde = proc_create_data(proc_filename, 0, NULL, &proc_ops, sl811);
+ sl811->debug_file = debugfs_create_file("sl811h", S_IRUGO,
+ usb_debug_root, sl811,
+ &debug_ops);
}
static void remove_debug_file(struct sl811 *sl811)
{
- if (sl811->pde)
- remove_proc_entry(proc_filename, NULL);
+ debugfs_remove(sl811->debug_file);
}
-#endif
-
/*-------------------------------------------------------------------------*/
static void
@@ -1600,7 +1591,7 @@ static struct hc_driver sl811h_hc_driver = {
/*-------------------------------------------------------------------------*/
-static int __devexit
+static int
sl811h_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
@@ -1623,7 +1614,7 @@ sl811h_remove(struct platform_device *dev)
return 0;
}
-static int __devinit
+static int
sl811h_probe(struct platform_device *dev)
{
struct usb_hcd *hcd;
@@ -1636,6 +1627,9 @@ sl811h_probe(struct platform_device *dev)
u8 tmp, ioaddr = 0;
unsigned long irqflags;
+ if (usb_disabled())
+ return -ENODEV;
+
/* basic sanity checks first. board-specific init logic should
* have initialized these three resources and probably board
* specific platform_data. we don't probe for IRQs, and do only
@@ -1650,7 +1644,7 @@ sl811h_probe(struct platform_device *dev)
/* refuse to confuse usbcore */
if (dev->dev.dma_mask) {
- DBG("no we won't dma\n");
+ dev_dbg(&dev->dev, "no we won't dma\n");
return -EINVAL;
}
@@ -1696,7 +1690,7 @@ sl811h_probe(struct platform_device *dev)
spin_lock_init(&sl811->lock);
INIT_LIST_HEAD(&sl811->async);
- sl811->board = dev->dev.platform_data;
+ sl811->board = dev_get_platdata(&dev->dev);
init_timer(&sl811->timer);
sl811->timer.function = sl811h_timer;
sl811->timer.data = (unsigned long) sl811;
@@ -1718,7 +1712,7 @@ sl811h_probe(struct platform_device *dev)
break;
default:
/* reject case 0, SL11S is less functional */
- DBG("chiprev %02x\n", tmp);
+ dev_dbg(&dev->dev, "chiprev %02x\n", tmp);
retval = -ENXIO;
goto err6;
}
@@ -1733,10 +1727,12 @@ sl811h_probe(struct platform_device *dev)
* Use resource IRQ flags if set by platform device setup.
*/
irqflags |= IRQF_SHARED;
- retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | irqflags);
+ retval = usb_add_hcd(hcd, irq, irqflags);
if (retval != 0)
goto err6;
+ device_wakeup_enable(hcd->self.controller);
+
create_debug_file(sl811);
return retval;
@@ -1749,7 +1745,7 @@ sl811h_probe(struct platform_device *dev)
if (!ioaddr)
iounmap(addr_reg);
err2:
- DBG("init error, %d\n", retval);
+ dev_dbg(&dev->dev, "init error, %d\n", retval);
return retval;
}
@@ -1757,7 +1753,7 @@ sl811h_probe(struct platform_device *dev)
/* for this device there's no useful distinction between the controller
* and its root hub, except that the root hub only gets direct PM calls
- * when CONFIG_USB_SUSPEND is enabled.
+ * when CONFIG_PM_RUNTIME is enabled.
*/
static int
@@ -1810,7 +1806,7 @@ sl811h_resume(struct platform_device *dev)
/* this driver is exported so sl811_cs can depend on it */
struct platform_driver sl811h_driver = {
.probe = sl811h_probe,
- .remove = __devexit_p(sl811h_remove),
+ .remove = sl811h_remove,
.suspend = sl811h_suspend,
.resume = sl811h_resume,
@@ -1821,20 +1817,4 @@ struct platform_driver sl811h_driver = {
};
EXPORT_SYMBOL(sl811h_driver);
-/*-------------------------------------------------------------------------*/
-
-static int __init sl811h_init(void)
-{
- if (usb_disabled())
- return -ENODEV;
-
- INFO("driver %s, %s\n", hcd_name, DRIVER_VERSION);
- return platform_driver_register(&sl811h_driver);
-}
-module_init(sl811h_init);
-
-static void __exit sl811h_cleanup(void)
-{
- platform_driver_unregister(&sl811h_driver);
-}
-module_exit(sl811h_cleanup);
+module_platform_driver(sl811h_driver);
diff --git a/drivers/usb/host/sl811.h b/drivers/usb/host/sl811.h
index b6b8c1f233d..1e23ef49bec 100644
--- a/drivers/usb/host/sl811.h
+++ b/drivers/usb/host/sl811.h
@@ -122,7 +122,7 @@ struct sl811 {
void __iomem *addr_reg;
void __iomem *data_reg;
struct sl811_platform_data *board;
- struct proc_dir_entry *pde;
+ struct dentry *debug_file;
unsigned long stat_insrmv;
unsigned long stat_wake;
@@ -242,25 +242,8 @@ sl811_read_buf(struct sl811 *sl811, int addr, void *buf, size_t count)
/*-------------------------------------------------------------------------*/
-#ifdef DEBUG
-#define DBG(stuff...) printk(KERN_DEBUG "sl811: " stuff)
-#else
-#define DBG(stuff...) do{}while(0)
-#endif
-
-#ifdef VERBOSE
-# define VDBG DBG
-#else
-# define VDBG(stuff...) do{}while(0)
-#endif
-
#ifdef PACKET_TRACE
-# define PACKET VDBG
+# define PACKET pr_debug("sl811: "stuff)
#else
# define PACKET(stuff...) do{}while(0)
#endif
-
-#define ERR(stuff...) printk(KERN_ERR "sl811: " stuff)
-#define WARNING(stuff...) printk(KERN_WARNING "sl811: " stuff)
-#define INFO(stuff...) printk(KERN_INFO "sl811: " stuff)
-
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
index 3775c035a6c..88a9bffe93d 100644
--- a/drivers/usb/host/sl811_cs.c
+++ b/drivers/usb/host/sl811_cs.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -187,7 +186,7 @@ static int sl811_cs_probe(struct pcmcia_device *link)
return sl811_cs_config(link);
}
-static struct pcmcia_device_id sl811_ids[] = {
+static const struct pcmcia_device_id sl811_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0xc015, 0x0001), /* RATOC USB HOST CF+ Card */
PCMCIA_DEVICE_NULL,
};
@@ -200,17 +199,4 @@ static struct pcmcia_driver sl811_cs_driver = {
.remove = sl811_cs_detach,
.id_table = sl811_ids,
};
-
-/*====================================================================*/
-
-static int __init init_sl811_cs(void)
-{
- return pcmcia_register_driver(&sl811_cs_driver);
-}
-module_init(init_sl811_cs);
-
-static void __exit exit_sl811_cs(void)
-{
- pcmcia_unregister_driver(&sl811_cs_driver);
-}
-module_exit(exit_sl811_cs);
+module_pcmcia_driver(sl811_cs_driver);
diff --git a/drivers/usb/host/ssb-hcd.c b/drivers/usb/host/ssb-hcd.c
new file mode 100644
index 00000000000..0196f766df7
--- /dev/null
+++ b/drivers/usb/host/ssb-hcd.c
@@ -0,0 +1,278 @@
+/*
+ * Sonics Silicon Backplane
+ * Broadcom USB-core driver (SSB bus glue)
+ *
+ * Copyright 2011-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Based on ssb-ohci driver
+ * Copyright 2007 Michael Buesch <m@bues.ch>
+ *
+ * Derived from the OHCI-PCI driver
+ * Copyright 1999 Roman Weissgaerber
+ * Copyright 2000-2002 David Brownell
+ * Copyright 1999 Linus Torvalds
+ * Copyright 1999 Gregory P. Smith
+ *
+ * Derived from the USBcore related parts of Broadcom-SB
+ * Copyright 2005-2011 Broadcom Corporation
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+#include <linux/ssb/ssb.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb/ehci_pdriver.h>
+#include <linux/usb/ohci_pdriver.h>
+
+MODULE_AUTHOR("Hauke Mehrtens");
+MODULE_DESCRIPTION("Common USB driver for SSB Bus");
+MODULE_LICENSE("GPL");
+
+#define SSB_HCD_TMSLOW_HOSTMODE (1 << 29)
+
+struct ssb_hcd_device {
+ struct platform_device *ehci_dev;
+ struct platform_device *ohci_dev;
+
+ u32 enable_flags;
+};
+
+static void ssb_hcd_5354wa(struct ssb_device *dev)
+{
+#ifdef CONFIG_SSB_DRIVER_MIPS
+ /* Work around for 5354 failures */
+ if (dev->id.revision == 2 && dev->bus->chip_id == 0x5354) {
+ /* Change syn01 reg */
+ ssb_write32(dev, 0x894, 0x00fe00fe);
+
+ /* Change syn03 reg */
+ ssb_write32(dev, 0x89c, ssb_read32(dev, 0x89c) | 0x1);
+ }
+#endif
+}
+
+static void ssb_hcd_usb20wa(struct ssb_device *dev)
+{
+ if (dev->id.coreid == SSB_DEV_USB20_HOST) {
+ /*
+ * USB 2.0 special considerations:
+ *
+ * In addition to the standard SSB reset sequence, the Host
+ * Control Register must be programmed to bring the USB core
+ * and various phy components out of reset.
+ */
+ ssb_write32(dev, 0x200, 0x7ff);
+
+ /* Change Flush control reg */
+ ssb_write32(dev, 0x400, ssb_read32(dev, 0x400) & ~8);
+ ssb_read32(dev, 0x400);
+
+ /* Change Shim control reg */
+ ssb_write32(dev, 0x304, ssb_read32(dev, 0x304) & ~0x100);
+ ssb_read32(dev, 0x304);
+
+ udelay(1);
+
+ ssb_hcd_5354wa(dev);
+ }
+}
+
+/* based on arch/mips/brcm-boards/bcm947xx/pcibios.c */
+static u32 ssb_hcd_init_chip(struct ssb_device *dev)
+{
+ u32 flags = 0;
+
+ if (dev->id.coreid == SSB_DEV_USB11_HOSTDEV)
+ /* Put the device into host-mode. */
+ flags |= SSB_HCD_TMSLOW_HOSTMODE;
+
+ ssb_device_enable(dev, flags);
+
+ ssb_hcd_usb20wa(dev);
+
+ return flags;
+}
+
+static const struct usb_ehci_pdata ehci_pdata = {
+};
+
+static const struct usb_ohci_pdata ohci_pdata = {
+};
+
+static struct platform_device *ssb_hcd_create_pdev(struct ssb_device *dev, bool ohci, u32 addr, u32 len)
+{
+ struct platform_device *hci_dev;
+ struct resource hci_res[2];
+ int ret = -ENOMEM;
+
+ memset(hci_res, 0, sizeof(hci_res));
+
+ hci_res[0].start = addr;
+ hci_res[0].end = hci_res[0].start + len - 1;
+ hci_res[0].flags = IORESOURCE_MEM;
+
+ hci_res[1].start = dev->irq;
+ hci_res[1].flags = IORESOURCE_IRQ;
+
+ hci_dev = platform_device_alloc(ohci ? "ohci-platform" :
+ "ehci-platform" , 0);
+ if (!hci_dev)
+ return NULL;
+
+ hci_dev->dev.parent = dev->dev;
+ hci_dev->dev.dma_mask = &hci_dev->dev.coherent_dma_mask;
+
+ ret = platform_device_add_resources(hci_dev, hci_res,
+ ARRAY_SIZE(hci_res));
+ if (ret)
+ goto err_alloc;
+ if (ohci)
+ ret = platform_device_add_data(hci_dev, &ohci_pdata,
+ sizeof(ohci_pdata));
+ else
+ ret = platform_device_add_data(hci_dev, &ehci_pdata,
+ sizeof(ehci_pdata));
+ if (ret)
+ goto err_alloc;
+ ret = platform_device_add(hci_dev);
+ if (ret)
+ goto err_alloc;
+
+ return hci_dev;
+
+err_alloc:
+ platform_device_put(hci_dev);
+ return ERR_PTR(ret);
+}
+
+static int ssb_hcd_probe(struct ssb_device *dev,
+ const struct ssb_device_id *id)
+{
+ int err, tmp;
+ int start, len;
+ u16 chipid_top;
+ u16 coreid = dev->id.coreid;
+ struct ssb_hcd_device *usb_dev;
+
+ /* USBcores are only connected on embedded devices. */
+ chipid_top = (dev->bus->chip_id & 0xFF00);
+ if (chipid_top != 0x4700 && chipid_top != 0x5300)
+ return -ENODEV;
+
+ /* TODO: Probably need checks here; is the core connected? */
+
+ if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
+ return -EOPNOTSUPP;
+
+ usb_dev = kzalloc(sizeof(struct ssb_hcd_device), GFP_KERNEL);
+ if (!usb_dev)
+ return -ENOMEM;
+
+ /* We currently always attach SSB_DEV_USB11_HOSTDEV
+ * as HOST OHCI. If we want to attach it as Client device,
+ * we must branch here and call into the (yet to
+ * be written) Client mode driver. Same for remove(). */
+ usb_dev->enable_flags = ssb_hcd_init_chip(dev);
+
+ tmp = ssb_read32(dev, SSB_ADMATCH0);
+
+ start = ssb_admatch_base(tmp);
+ len = (coreid == SSB_DEV_USB20_HOST) ? 0x800 : ssb_admatch_size(tmp);
+ usb_dev->ohci_dev = ssb_hcd_create_pdev(dev, true, start, len);
+ if (IS_ERR(usb_dev->ohci_dev)) {
+ err = PTR_ERR(usb_dev->ohci_dev);
+ goto err_free_usb_dev;
+ }
+
+ if (coreid == SSB_DEV_USB20_HOST) {
+ start = ssb_admatch_base(tmp) + 0x800; /* ehci core offset */
+ usb_dev->ehci_dev = ssb_hcd_create_pdev(dev, false, start, len);
+ if (IS_ERR(usb_dev->ehci_dev)) {
+ err = PTR_ERR(usb_dev->ehci_dev);
+ goto err_unregister_ohci_dev;
+ }
+ }
+
+ ssb_set_drvdata(dev, usb_dev);
+ return 0;
+
+err_unregister_ohci_dev:
+ platform_device_unregister(usb_dev->ohci_dev);
+err_free_usb_dev:
+ kfree(usb_dev);
+ return err;
+}
+
+static void ssb_hcd_remove(struct ssb_device *dev)
+{
+ struct ssb_hcd_device *usb_dev = ssb_get_drvdata(dev);
+ struct platform_device *ohci_dev = usb_dev->ohci_dev;
+ struct platform_device *ehci_dev = usb_dev->ehci_dev;
+
+ if (ohci_dev)
+ platform_device_unregister(ohci_dev);
+ if (ehci_dev)
+ platform_device_unregister(ehci_dev);
+
+ ssb_device_disable(dev, 0);
+}
+
+static void ssb_hcd_shutdown(struct ssb_device *dev)
+{
+ ssb_device_disable(dev, 0);
+}
+
+#ifdef CONFIG_PM
+
+static int ssb_hcd_suspend(struct ssb_device *dev, pm_message_t state)
+{
+ ssb_device_disable(dev, 0);
+
+ return 0;
+}
+
+static int ssb_hcd_resume(struct ssb_device *dev)
+{
+ struct ssb_hcd_device *usb_dev = ssb_get_drvdata(dev);
+
+ ssb_device_enable(dev, usb_dev->enable_flags);
+
+ return 0;
+}
+
+#else /* !CONFIG_PM */
+#define ssb_hcd_suspend NULL
+#define ssb_hcd_resume NULL
+#endif /* CONFIG_PM */
+
+static const struct ssb_device_id ssb_hcd_table[] = {
+ SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB11_HOSTDEV, SSB_ANY_REV),
+ SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB11_HOST, SSB_ANY_REV),
+ SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB20_HOST, SSB_ANY_REV),
+ SSB_DEVTABLE_END
+};
+MODULE_DEVICE_TABLE(ssb, ssb_hcd_table);
+
+static struct ssb_driver ssb_hcd_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ssb_hcd_table,
+ .probe = ssb_hcd_probe,
+ .remove = ssb_hcd_remove,
+ .shutdown = ssb_hcd_shutdown,
+ .suspend = ssb_hcd_suspend,
+ .resume = ssb_hcd_resume,
+};
+
+static int __init ssb_hcd_init(void)
+{
+ return ssb_driver_register(&ssb_hcd_driver);
+}
+module_init(ssb_hcd_init);
+
+static void __exit ssb_hcd_exit(void)
+{
+ ssb_driver_unregister(&ssb_hcd_driver);
+}
+module_exit(ssb_hcd_exit);
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index fab764946c7..c0671750671 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -55,7 +55,6 @@
#include <linux/mutex.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/system.h>
#include <asm/byteorder.h>
/* FIXME ohci.h is ONLY for internal use by the OHCI driver.
@@ -74,7 +73,7 @@ MODULE_LICENSE("GPL");
#define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444)
INT_MODULE_PARM(testing, 0);
/* Some boards misreport power switching/overcurrent*/
-static int distrust_firmware = 1;
+static bool distrust_firmware = 1;
module_param(distrust_firmware, bool, 0);
MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren"
"t setup");
@@ -1810,9 +1809,9 @@ static int u132_hcd_start(struct usb_hcd *hcd)
struct platform_device *pdev =
to_platform_device(hcd->self.controller);
u16 vendor = ((struct u132_platform_data *)
- (pdev->dev.platform_data))->vendor;
+ dev_get_platdata(&pdev->dev))->vendor;
u16 device = ((struct u132_platform_data *)
- (pdev->dev.platform_data))->device;
+ dev_get_platdata(&pdev->dev))->device;
mutex_lock(&u132->sw_lock);
msleep(10);
if (vendor == PCI_VENDOR_ID_AMD && device == 0x740c) {
@@ -2604,13 +2603,14 @@ static int u132_roothub_descriptor(struct u132 *u132,
retval = u132_read_pcimem(u132, roothub.b, &rh_b);
if (retval)
return retval;
- memset(desc->bitmap, 0xff, sizeof(desc->bitmap));
- desc->bitmap[0] = rh_b & RH_B_DR;
+ memset(desc->u.hs.DeviceRemovable, 0xff,
+ sizeof(desc->u.hs.DeviceRemovable));
+ desc->u.hs.DeviceRemovable[0] = rh_b & RH_B_DR;
if (u132->num_ports > 7) {
- desc->bitmap[1] = (rh_b & RH_B_DR) >> 8;
- desc->bitmap[2] = 0xff;
+ desc->u.hs.DeviceRemovable[1] = (rh_b & RH_B_DR) >> 8;
+ desc->u.hs.DeviceRemovable[2] = 0xff;
} else
- desc->bitmap[1] = 0xff;
+ desc->u.hs.DeviceRemovable[1] = 0xff;
return 0;
}
@@ -2990,7 +2990,7 @@ static struct hc_driver u132_hc_driver = {
* synchronously - but instead should immediately stop activity to the
* device and asynchronously call usb_remove_hcd()
*/
-static int __devexit u132_remove(struct platform_device *pdev)
+static int u132_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
if (hcd) {
@@ -3034,7 +3034,7 @@ static void u132_initialise(struct u132 *u132, struct platform_device *pdev)
int addrs = MAX_U132_ADDRS;
int udevs = MAX_U132_UDEVS;
int endps = MAX_U132_ENDPS;
- u132->board = pdev->dev.platform_data;
+ u132->board = dev_get_platdata(&pdev->dev);
u132->platform_dev = pdev;
u132->power = 0;
u132->reset = 0;
@@ -3084,7 +3084,7 @@ static void u132_initialise(struct u132 *u132, struct platform_device *pdev)
mutex_unlock(&u132->sw_lock);
}
-static int __devinit u132_probe(struct platform_device *pdev)
+static int u132_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
int retval;
@@ -3133,6 +3133,7 @@ static int __devinit u132_probe(struct platform_device *pdev)
u132_u132_put_kref(u132);
return retval;
} else {
+ device_wakeup_enable(hcd->self.controller);
u132_monitor_queue_work(u132, 100);
return 0;
}
@@ -3141,10 +3142,11 @@ static int __devinit u132_probe(struct platform_device *pdev)
#ifdef CONFIG_PM
-/* for this device there's no useful distinction between the controller
-* and its root hub, except that the root hub only gets direct PM calls
-* when CONFIG_USB_SUSPEND is enabled.
-*/
+/*
+ * for this device there's no useful distinction between the controller
+ * and its root hub, except that the root hub only gets direct PM calls
+ * when CONFIG_PM_RUNTIME is enabled.
+ */
static int u132_suspend(struct platform_device *pdev, pm_message_t state)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
@@ -3212,11 +3214,11 @@ static int u132_resume(struct platform_device *pdev)
*/
static struct platform_driver u132_platform_driver = {
.probe = u132_probe,
- .remove = __devexit_p(u132_remove),
+ .remove = u132_remove,
.suspend = u132_suspend,
.resume = u132_resume,
.driver = {
- .name = (char *)hcd_name,
+ .name = hcd_name,
.owner = THIS_MODULE,
},
};
@@ -3229,8 +3231,7 @@ static int __init u132_hcd_init(void)
mutex_init(&u132_module_lock);
if (usb_disabled())
return -ENODEV;
- printk(KERN_INFO "driver %s built at %s on %s\n", hcd_name, __TIME__,
- __DATE__);
+ printk(KERN_INFO "driver %s\n", hcd_name);
workqueue = create_singlethread_workqueue("u132");
retval = platform_driver_register(&u132_platform_driver);
return retval;
diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c
index 6e7fb5f38db..1b28a000d5c 100644
--- a/drivers/usb/host/uhci-debug.c
+++ b/drivers/usb/host/uhci-debug.c
@@ -12,14 +12,15 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/debugfs.h>
-#include <linux/smp_lock.h>
#include <asm/io.h>
#include "uhci-hcd.h"
+#define EXTRA_SPACE 1024
+
static struct dentry *uhci_debugfs_root;
-#ifdef DEBUG
+#ifdef CONFIG_DYNAMIC_DEBUG
/* Handle REALLY large printks so we don't overflow buffers */
static void lprintk(char *buf)
@@ -38,18 +39,16 @@ static void lprintk(char *buf)
}
}
-static int uhci_show_td(struct uhci_td *td, char *buf, int len, int space)
+static int uhci_show_td(struct uhci_hcd *uhci, struct uhci_td *td, char *buf,
+ int len, int space)
{
char *out = buf;
char *spid;
u32 status, token;
- /* Try to make sure there's enough memory */
- if (len < 160)
- return 0;
-
- status = td_status(td);
- out += sprintf(out, "%*s[%p] link (%08x) ", space, "", td, le32_to_cpu(td->link));
+ status = td_status(uhci, td);
+ out += sprintf(out, "%*s[%p] link (%08x) ", space, "", td,
+ hc32_to_cpu(uhci, td->link));
out += sprintf(out, "e%d %s%s%s%s%s%s%s%s%s%sLength=%x ",
((status >> 27) & 3),
(status & TD_CTRL_SPD) ? "SPD " : "",
@@ -63,8 +62,10 @@ static int uhci_show_td(struct uhci_td *td, char *buf, int len, int space)
(status & TD_CTRL_CRCTIMEO) ? "CRC/Timeo " : "",
(status & TD_CTRL_BITSTUFF) ? "BitStuff " : "",
status & 0x7ff);
+ if (out - buf > len)
+ goto done;
- token = td_token(td);
+ token = td_token(uhci, td);
switch (uhci_packetid(token)) {
case USB_PID_SETUP:
spid = "SETUP";
@@ -87,20 +88,22 @@ static int uhci_show_td(struct uhci_td *td, char *buf, int len, int space)
(token >> 8) & 127,
(token & 0xff),
spid);
- out += sprintf(out, "(buf=%08x)\n", le32_to_cpu(td->buffer));
+ out += sprintf(out, "(buf=%08x)\n", hc32_to_cpu(uhci, td->buffer));
+done:
+ if (out - buf > len)
+ out += sprintf(out, " ...\n");
return out - buf;
}
-static int uhci_show_urbp(struct urb_priv *urbp, char *buf, int len, int space)
+static int uhci_show_urbp(struct uhci_hcd *uhci, struct urb_priv *urbp,
+ char *buf, int len, int space)
{
char *out = buf;
struct uhci_td *td;
int i, nactive, ninactive;
char *ptype;
- if (len < 200)
- return 0;
out += sprintf(out, "urb_priv [%p] ", urbp);
out += sprintf(out, "urb [%p] ", urbp->urb);
@@ -108,6 +111,8 @@ static int uhci_show_urbp(struct urb_priv *urbp, char *buf, int len, int space)
out += sprintf(out, "Dev=%d ", usb_pipedevice(urbp->urb->pipe));
out += sprintf(out, "EP=%x(%s) ", usb_pipeendpoint(urbp->urb->pipe),
(usb_pipein(urbp->urb->pipe) ? "IN" : "OUT"));
+ if (out - buf > len)
+ goto done;
switch (usb_pipetype(urbp->urb->pipe)) {
case PIPE_ISOCHRONOUS: ptype = "ISO"; break;
@@ -126,24 +131,33 @@ static int uhci_show_urbp(struct urb_priv *urbp, char *buf, int len, int space)
out += sprintf(out, " Unlinked=%d", urbp->urb->unlinked);
out += sprintf(out, "\n");
+ if (out - buf > len)
+ goto done;
+
i = nactive = ninactive = 0;
list_for_each_entry(td, &urbp->td_list, list) {
if (urbp->qh->type != USB_ENDPOINT_XFER_ISOC &&
(++i <= 10 || debug > 2)) {
out += sprintf(out, "%*s%d: ", space + 2, "", i);
- out += uhci_show_td(td, out, len - (out - buf), 0);
+ out += uhci_show_td(uhci, td, out,
+ len - (out - buf), 0);
+ if (out - buf > len)
+ goto tail;
} else {
- if (td_status(td) & TD_CTRL_ACTIVE)
+ if (td_status(uhci, td) & TD_CTRL_ACTIVE)
++nactive;
else
++ninactive;
}
}
if (nactive + ninactive > 0)
- out += sprintf(out, "%*s[skipped %d inactive and %d active "
- "TDs]\n",
+ out += sprintf(out,
+ "%*s[skipped %d inactive and %d active TDs]\n",
space, "", ninactive, nactive);
-
+done:
+ if (out - buf > len)
+ out += sprintf(out, " ...\n");
+tail:
return out - buf;
}
@@ -152,13 +166,9 @@ static int uhci_show_qh(struct uhci_hcd *uhci,
{
char *out = buf;
int i, nurbs;
- __le32 element = qh_element(qh);
+ __hc32 element = qh_element(qh);
char *qtype;
- /* Try to make sure there's enough memory */
- if (len < 80 * 7)
- return 0;
-
switch (qh->type) {
case USB_ENDPOINT_XFER_ISOC: qtype = "ISO"; break;
case USB_ENDPOINT_XFER_INT: qtype = "INT"; break;
@@ -169,47 +179,59 @@ static int uhci_show_qh(struct uhci_hcd *uhci,
out += sprintf(out, "%*s[%p] %s QH link (%08x) element (%08x)\n",
space, "", qh, qtype,
- le32_to_cpu(qh->link), le32_to_cpu(element));
+ hc32_to_cpu(uhci, qh->link),
+ hc32_to_cpu(uhci, element));
if (qh->type == USB_ENDPOINT_XFER_ISOC)
- out += sprintf(out, "%*s period %d phase %d load %d us, "
- "frame %x desc [%p]\n",
+ out += sprintf(out,
+ "%*s period %d phase %d load %d us, frame %x desc [%p]\n",
space, "", qh->period, qh->phase, qh->load,
qh->iso_frame, qh->iso_packet_desc);
else if (qh->type == USB_ENDPOINT_XFER_INT)
out += sprintf(out, "%*s period %d phase %d load %d us\n",
space, "", qh->period, qh->phase, qh->load);
+ if (out - buf > len)
+ goto done;
- if (element & UHCI_PTR_QH)
+ if (element & UHCI_PTR_QH(uhci))
out += sprintf(out, "%*s Element points to QH (bug?)\n", space, "");
- if (element & UHCI_PTR_DEPTH)
+ if (element & UHCI_PTR_DEPTH(uhci))
out += sprintf(out, "%*s Depth traverse\n", space, "");
- if (element & cpu_to_le32(8))
+ if (element & cpu_to_hc32(uhci, 8))
out += sprintf(out, "%*s Bit 3 set (bug?)\n", space, "");
- if (!(element & ~(UHCI_PTR_QH | UHCI_PTR_DEPTH)))
+ if (!(element & ~(UHCI_PTR_QH(uhci) | UHCI_PTR_DEPTH(uhci))))
out += sprintf(out, "%*s Element is NULL (bug?)\n", space, "");
+ if (out - buf > len)
+ goto done;
+
if (list_empty(&qh->queue)) {
out += sprintf(out, "%*s queue is empty\n", space, "");
- if (qh == uhci->skel_async_qh)
- out += uhci_show_td(uhci->term_td, out,
+ if (qh == uhci->skel_async_qh) {
+ out += uhci_show_td(uhci, uhci->term_td, out,
len - (out - buf), 0);
+ if (out - buf > len)
+ goto tail;
+ }
} else {
struct urb_priv *urbp = list_entry(qh->queue.next,
struct urb_priv, node);
struct uhci_td *td = list_entry(urbp->td_list.next,
struct uhci_td, list);
- if (element != LINK_TO_TD(td))
+ if (element != LINK_TO_TD(uhci, td))
out += sprintf(out, "%*s Element != First TD\n",
space, "");
i = nurbs = 0;
list_for_each_entry(urbp, &qh->queue, node) {
- if (++i <= 10)
- out += uhci_show_urbp(urbp, out,
+ if (++i <= 10) {
+ out += uhci_show_urbp(uhci, urbp, out,
len - (out - buf), space + 2);
+ if (out - buf > len)
+ goto tail;
+ }
else
++nurbs;
}
@@ -218,23 +240,27 @@ static int uhci_show_qh(struct uhci_hcd *uhci,
space, "", nurbs);
}
+ if (out - buf > len)
+ goto done;
+
if (qh->dummy_td) {
out += sprintf(out, "%*s Dummy TD\n", space, "");
- out += uhci_show_td(qh->dummy_td, out, len - (out - buf), 0);
+ out += uhci_show_td(uhci, qh->dummy_td, out,
+ len - (out - buf), 0);
+ if (out - buf > len)
+ goto tail;
}
+done:
+ if (out - buf > len)
+ out += sprintf(out, " ...\n");
+tail:
return out - buf;
}
-static int uhci_show_sc(int port, unsigned short status, char *buf, int len)
+static int uhci_show_sc(int port, unsigned short status, char *buf)
{
- char *out = buf;
-
- /* Try to make sure there's enough memory */
- if (len < 160)
- return 0;
-
- out += sprintf(out, " stat%d = %04x %s%s%s%s%s%s%s%s%s%s\n",
+ return sprintf(buf, " stat%d = %04x %s%s%s%s%s%s%s%s%s%s\n",
port,
status,
(status & USBPORTSC_SUSP) ? " Suspend" : "",
@@ -247,19 +273,12 @@ static int uhci_show_sc(int port, unsigned short status, char *buf, int len)
(status & USBPORTSC_PE) ? " Enabled" : "",
(status & USBPORTSC_CSC) ? " ConnectChange" : "",
(status & USBPORTSC_CCS) ? " Connected" : "");
-
- return out - buf;
}
-static int uhci_show_root_hub_state(struct uhci_hcd *uhci, char *buf, int len)
+static int uhci_show_root_hub_state(struct uhci_hcd *uhci, char *buf)
{
- char *out = buf;
char *rh_state;
- /* Try to make sure there's enough memory */
- if (len < 60)
- return 0;
-
switch (uhci->rh_state) {
case UHCI_RH_RESET:
rh_state = "reset"; break;
@@ -278,32 +297,27 @@ static int uhci_show_root_hub_state(struct uhci_hcd *uhci, char *buf, int len)
default:
rh_state = "?"; break;
}
- out += sprintf(out, "Root-hub state: %s FSBR: %d\n",
+ return sprintf(buf, "Root-hub state: %s FSBR: %d\n",
rh_state, uhci->fsbr_is_on);
- return out - buf;
}
static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len)
{
char *out = buf;
- unsigned long io_addr = uhci->io_addr;
unsigned short usbcmd, usbstat, usbint, usbfrnum;
unsigned int flbaseadd;
unsigned char sof;
unsigned short portsc1, portsc2;
- /* Try to make sure there's enough memory */
- if (len < 80 * 9)
- return 0;
- usbcmd = inw(io_addr + 0);
- usbstat = inw(io_addr + 2);
- usbint = inw(io_addr + 4);
- usbfrnum = inw(io_addr + 6);
- flbaseadd = inl(io_addr + 8);
- sof = inb(io_addr + 12);
- portsc1 = inw(io_addr + 16);
- portsc2 = inw(io_addr + 18);
+ usbcmd = uhci_readw(uhci, USBCMD);
+ usbstat = uhci_readw(uhci, USBSTS);
+ usbint = uhci_readw(uhci, USBINTR);
+ usbfrnum = uhci_readw(uhci, USBFRNUM);
+ flbaseadd = uhci_readl(uhci, USBFLBASEADD);
+ sof = uhci_readb(uhci, USBSOF);
+ portsc1 = uhci_readw(uhci, USBPORTSC1);
+ portsc2 = uhci_readw(uhci, USBPORTSC2);
out += sprintf(out, " usbcmd = %04x %s%s%s%s%s%s%s%s\n",
usbcmd,
@@ -315,6 +329,8 @@ static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len)
(usbcmd & USBCMD_GRESET) ? "GRESET " : "",
(usbcmd & USBCMD_HCRESET) ? "HCRESET " : "",
(usbcmd & USBCMD_RS) ? "RS " : "");
+ if (out - buf > len)
+ goto done;
out += sprintf(out, " usbstat = %04x %s%s%s%s%s%s\n",
usbstat,
@@ -324,19 +340,33 @@ static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len)
(usbstat & USBSTS_RD) ? "ResumeDetect " : "",
(usbstat & USBSTS_ERROR) ? "USBError " : "",
(usbstat & USBSTS_USBINT) ? "USBINT " : "");
+ if (out - buf > len)
+ goto done;
out += sprintf(out, " usbint = %04x\n", usbint);
out += sprintf(out, " usbfrnum = (%d)%03x\n", (usbfrnum >> 10) & 1,
0xfff & (4*(unsigned int)usbfrnum));
out += sprintf(out, " flbaseadd = %08x\n", flbaseadd);
out += sprintf(out, " sof = %02x\n", sof);
- out += uhci_show_sc(1, portsc1, out, len - (out - buf));
- out += uhci_show_sc(2, portsc2, out, len - (out - buf));
- out += sprintf(out, "Most recent frame: %x (%d) "
- "Last ISO frame: %x (%d)\n",
+ if (out - buf > len)
+ goto done;
+
+ out += uhci_show_sc(1, portsc1, out);
+ if (out - buf > len)
+ goto done;
+
+ out += uhci_show_sc(2, portsc2, out);
+ if (out - buf > len)
+ goto done;
+
+ out += sprintf(out,
+ "Most recent frame: %x (%d) Last ISO frame: %x (%d)\n",
uhci->frame_number, uhci->frame_number & 1023,
uhci->last_iso_frame, uhci->last_iso_frame & 1023);
+done:
+ if (out - buf > len)
+ out += sprintf(out, " ...\n");
return out - buf;
}
@@ -348,17 +378,21 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
struct uhci_td *td;
struct list_head *tmp, *head;
int nframes, nerrs;
- __le32 link;
- __le32 fsbr_link;
+ __hc32 link;
+ __hc32 fsbr_link;
static const char * const qh_names[] = {
"unlink", "iso", "int128", "int64", "int32", "int16",
"int8", "int4", "int2", "async", "term"
};
- out += uhci_show_root_hub_state(uhci, out, len - (out - buf));
+ out += uhci_show_root_hub_state(uhci, out);
+ if (out - buf > len)
+ goto done;
out += sprintf(out, "HC status\n");
out += uhci_show_status(uhci, out, len - (out - buf));
+ if (out - buf > len)
+ goto tail;
out += sprintf(out, "Periodic load table\n");
for (i = 0; i < MAX_PHASE; ++i) {
@@ -371,14 +405,16 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
uhci_to_hcd(uhci)->self.bandwidth_int_reqs,
uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs);
if (debug <= 1)
- return out - buf;
+ goto tail;
out += sprintf(out, "Frame List\n");
nframes = 10;
nerrs = 0;
for (i = 0; i < UHCI_NUMFRAMES; ++i) {
- __le32 qh_dma;
+ __hc32 qh_dma;
+ if (out - buf > len)
+ goto done;
j = 0;
td = uhci->frame_cpu[i];
link = uhci->frame[i];
@@ -387,7 +423,7 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
if (nframes > 0) {
out += sprintf(out, "- Frame %d -> (%08x)\n",
- i, le32_to_cpu(link));
+ i, hc32_to_cpu(uhci, link));
j = 1;
}
@@ -396,16 +432,21 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
do {
td = list_entry(tmp, struct uhci_td, fl_list);
tmp = tmp->next;
- if (link != LINK_TO_TD(td)) {
- if (nframes > 0)
- out += sprintf(out, " link does "
- "not match list entry!\n");
- else
+ if (link != LINK_TO_TD(uhci, td)) {
+ if (nframes > 0) {
+ out += sprintf(out,
+ " link does not match list entry!\n");
+ if (out - buf > len)
+ goto done;
+ } else
++nerrs;
}
- if (nframes > 0)
- out += uhci_show_td(td, out,
+ if (nframes > 0) {
+ out += uhci_show_td(uhci, td, out,
len - (out - buf), 4);
+ if (out - buf > len)
+ goto tail;
+ }
link = td->link;
} while (tmp != head);
@@ -416,11 +457,14 @@ check_link:
if (!j) {
out += sprintf(out,
"- Frame %d -> (%08x)\n",
- i, le32_to_cpu(link));
+ i, hc32_to_cpu(uhci, link));
j = 1;
}
- out += sprintf(out, " link does not match "
- "QH (%08x)!\n", le32_to_cpu(qh_dma));
+ out += sprintf(out,
+ " link does not match QH (%08x)!\n",
+ hc32_to_cpu(uhci, qh_dma));
+ if (out - buf > len)
+ goto done;
} else
++nerrs;
}
@@ -431,21 +475,30 @@ check_link:
out += sprintf(out, "Skeleton QHs\n");
+ if (out - buf > len)
+ goto done;
+
fsbr_link = 0;
for (i = 0; i < UHCI_NUM_SKELQH; ++i) {
int cnt = 0;
qh = uhci->skelqh[i];
- out += sprintf(out, "- skel_%s_qh\n", qh_names[i]); \
+ out += sprintf(out, "- skel_%s_qh\n", qh_names[i]);
out += uhci_show_qh(uhci, qh, out, len - (out - buf), 4);
+ if (out - buf > len)
+ goto tail;
/* Last QH is the Terminating QH, it's different */
if (i == SKEL_TERM) {
- if (qh_element(qh) != LINK_TO_TD(uhci->term_td))
- out += sprintf(out, " skel_term_qh element is not set to term_td!\n");
+ if (qh_element(qh) != LINK_TO_TD(uhci, uhci->term_td)) {
+ out += sprintf(out,
+ " skel_term_qh element is not set to term_td!\n");
+ if (out - buf > len)
+ goto done;
+ }
link = fsbr_link;
if (!link)
- link = LINK_TO_QH(uhci->skel_term_qh);
+ link = LINK_TO_QH(uhci, uhci->skel_term_qh);
goto check_qh_link;
}
@@ -455,29 +508,40 @@ check_link:
while (tmp != head) {
qh = list_entry(tmp, struct uhci_qh, node);
tmp = tmp->next;
- if (++cnt <= 10)
+ if (++cnt <= 10) {
out += uhci_show_qh(uhci, qh, out,
len - (out - buf), 4);
+ if (out - buf > len)
+ goto tail;
+ }
if (!fsbr_link && qh->skel >= SKEL_FSBR)
- fsbr_link = LINK_TO_QH(qh);
+ fsbr_link = LINK_TO_QH(uhci, qh);
}
if ((cnt -= 10) > 0)
out += sprintf(out, " Skipped %d QHs\n", cnt);
- link = UHCI_PTR_TERM;
+ link = UHCI_PTR_TERM(uhci);
if (i <= SKEL_ISO)
;
else if (i < SKEL_ASYNC)
- link = LINK_TO_QH(uhci->skel_async_qh);
+ link = LINK_TO_QH(uhci, uhci->skel_async_qh);
else if (!uhci->fsbr_is_on)
;
else
- link = LINK_TO_QH(uhci->skel_term_qh);
+ link = LINK_TO_QH(uhci, uhci->skel_term_qh);
check_qh_link:
if (qh->link != link)
- out += sprintf(out, " last QH not linked to next skeleton!\n");
+ out += sprintf(out,
+ " last QH not linked to next skeleton!\n");
+
+ if (out - buf > len)
+ goto done;
}
+done:
+ if (out - buf > len)
+ out += sprintf(out, " ...\n");
+tail:
return out - buf;
}
@@ -509,7 +573,8 @@ static int uhci_debug_open(struct inode *inode, struct file *file)
up->size = 0;
spin_lock_irqsave(&uhci->lock, flags);
if (uhci->is_initialized)
- up->size = uhci_sprint_schedule(uhci, up->data, MAX_OUTPUT);
+ up->size = uhci_sprint_schedule(uhci, up->data,
+ MAX_OUTPUT - EXTRA_SPACE);
spin_unlock_irqrestore(&uhci->lock, flags);
file->private_data = up;
@@ -524,7 +589,9 @@ static loff_t uhci_debug_lseek(struct file *file, loff_t off, int whence)
up = file->private_data;
- /* XXX: atomic 64bit seek access, but that needs to be fixed in the VFS */
+ /*
+ * XXX: atomic 64bit seek access, but that needs to be fixed in the VFS
+ */
switch (whence) {
case 0:
new = off;
@@ -568,7 +635,7 @@ static const struct file_operations uhci_debug_operations = {
#endif /* CONFIG_DEBUG_FS */
-#else /* DEBUG */
+#else /* CONFIG_DYNAMIC_DEBUG*/
static inline void lprintk(char *buf)
{}
diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c
new file mode 100644
index 00000000000..ab25dc397e8
--- /dev/null
+++ b/drivers/usb/host/uhci-grlib.c
@@ -0,0 +1,207 @@
+/*
+ * UHCI HCD (Host Controller Driver) for GRLIB GRUSBHC
+ *
+ * Copyright (c) 2011 Jan Andersson <jan@gaisler.com>
+ *
+ * This file is based on UHCI PCI HCD:
+ * (C) Copyright 1999 Linus Torvalds
+ * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
+ * (C) Copyright 1999 Randy Dunlap
+ * (C) Copyright 1999 Georg Acher, acher@in.tum.de
+ * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
+ * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
+ * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
+ * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
+ * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
+ * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
+ * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
+ */
+
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+static int uhci_grlib_init(struct usb_hcd *hcd)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+
+ /*
+ * Probe to determine the endianness of the controller.
+ * We know that bit 7 of the PORTSC1 register is always set
+ * and bit 15 is always clear. If uhci_readw() yields a value
+ * with bit 7 (0x80) turned on then the current little-endian
+ * setting is correct. Otherwise we assume the value was
+ * byte-swapped; hence the register interface and presumably
+ * also the descriptors are big-endian.
+ */
+ if (!(uhci_readw(uhci, USBPORTSC1) & 0x80)) {
+ uhci->big_endian_mmio = 1;
+ uhci->big_endian_desc = 1;
+ }
+
+ uhci->rh_numports = uhci_count_ports(hcd);
+
+ /* Set up pointers to to generic functions */
+ uhci->reset_hc = uhci_generic_reset_hc;
+ uhci->check_and_reset_hc = uhci_generic_check_and_reset_hc;
+ /* No special actions need to be taken for the functions below */
+ uhci->configure_hc = NULL;
+ uhci->resume_detect_interrupts_are_broken = NULL;
+ uhci->global_suspend_mode_is_broken = NULL;
+
+ /* Reset if the controller isn't already safely quiescent. */
+ check_and_reset_hc(uhci);
+ return 0;
+}
+
+static const struct hc_driver uhci_grlib_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "GRLIB GRUSBHC UHCI Host Controller",
+ .hcd_priv_size = sizeof(struct uhci_hcd),
+
+ /* Generic hardware linkage */
+ .irq = uhci_irq,
+ .flags = HCD_MEMORY | HCD_USB11,
+
+ /* Basic lifecycle operations */
+ .reset = uhci_grlib_init,
+ .start = uhci_start,
+#ifdef CONFIG_PM
+ .pci_suspend = NULL,
+ .pci_resume = NULL,
+ .bus_suspend = uhci_rh_suspend,
+ .bus_resume = uhci_rh_resume,
+#endif
+ .stop = uhci_stop,
+
+ .urb_enqueue = uhci_urb_enqueue,
+ .urb_dequeue = uhci_urb_dequeue,
+
+ .endpoint_disable = uhci_hcd_endpoint_disable,
+ .get_frame_number = uhci_hcd_get_frame_number,
+
+ .hub_status_data = uhci_hub_status_data,
+ .hub_control = uhci_hub_control,
+};
+
+
+static int uhci_hcd_grlib_probe(struct platform_device *op)
+{
+ struct device_node *dn = op->dev.of_node;
+ struct usb_hcd *hcd;
+ struct uhci_hcd *uhci = NULL;
+ struct resource res;
+ int irq;
+ int rv;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ dev_dbg(&op->dev, "initializing GRUSBHC UHCI USB Controller\n");
+
+ rv = of_address_to_resource(dn, 0, &res);
+ if (rv)
+ return rv;
+
+ /* usb_create_hcd requires dma_mask != NULL */
+ op->dev.dma_mask = &op->dev.coherent_dma_mask;
+ hcd = usb_create_hcd(&uhci_grlib_hc_driver, &op->dev,
+ "GRUSBHC UHCI USB");
+ if (!hcd)
+ return -ENOMEM;
+
+ hcd->rsrc_start = res.start;
+ hcd->rsrc_len = resource_size(&res);
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+ printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
+ rv = -EBUSY;
+ goto err_rmr;
+ }
+
+ irq = irq_of_parse_and_map(dn, 0);
+ if (irq == NO_IRQ) {
+ printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
+ rv = -EBUSY;
+ goto err_irq;
+ }
+
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
+ rv = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ uhci = hcd_to_uhci(hcd);
+
+ uhci->regs = hcd->regs;
+
+ rv = usb_add_hcd(hcd, irq, 0);
+ if (rv)
+ goto err_uhci;
+
+ device_wakeup_enable(hcd->self.controller);
+ return 0;
+
+err_uhci:
+ iounmap(hcd->regs);
+err_ioremap:
+ irq_dispose_mapping(irq);
+err_irq:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err_rmr:
+ usb_put_hcd(hcd);
+
+ return rv;
+}
+
+static int uhci_hcd_grlib_remove(struct platform_device *op)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(op);
+
+ dev_dbg(&op->dev, "stopping GRLIB GRUSBHC UHCI USB Controller\n");
+
+ usb_remove_hcd(hcd);
+
+ iounmap(hcd->regs);
+ irq_dispose_mapping(hcd->irq);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+/* Make sure the controller is quiescent and that we're not using it
+ * any more. This is mainly for the benefit of programs which, like kexec,
+ * expect the hardware to be idle: not doing DMA or generating IRQs.
+ *
+ * This routine may be called in a damaged or failing kernel. Hence we
+ * do not acquire the spinlock before shutting down the controller.
+ */
+static void uhci_hcd_grlib_shutdown(struct platform_device *op)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(op);
+
+ uhci_hc_died(hcd_to_uhci(hcd));
+}
+
+static const struct of_device_id uhci_hcd_grlib_of_match[] = {
+ { .name = "GAISLER_UHCI", },
+ { .name = "01_027", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, uhci_hcd_grlib_of_match);
+
+
+static struct platform_driver uhci_grlib_driver = {
+ .probe = uhci_hcd_grlib_probe,
+ .remove = uhci_hcd_grlib_remove,
+ .shutdown = uhci_hcd_grlib_shutdown,
+ .driver = {
+ .name = "grlib-uhci",
+ .owner = THIS_MODULE,
+ .of_match_table = uhci_hcd_grlib_of_match,
+ },
+};
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index f52d04db28f..27f35e8f161 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -45,21 +45,20 @@
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/system.h>
#include "uhci-hcd.h"
-#include "pci-quirks.h"
/*
* Version Information
*/
-#define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \
-Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \
-Alan Stern"
+#define DRIVER_AUTHOR \
+ "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, " \
+ "Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, " \
+ "Roman Weissgaerber, Alan Stern"
#define DRIVER_DESC "USB Universal Host Controller Interface driver"
/* for flakey hardware, ignore overcurrent indicators */
-static int ignore_oc;
+static bool ignore_oc;
module_param(ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC(ignore_oc, "ignore hardware overcurrent indications");
@@ -70,18 +69,21 @@ MODULE_PARM_DESC(ignore_oc, "ignore hardware overcurrent indications");
* show all queues in /sys/kernel/debug/uhci/[pci_addr]
* debug = 3, show all TDs in URBs when dumping
*/
-#ifdef DEBUG
-#define DEBUG_CONFIGURED 1
+#ifdef CONFIG_DYNAMIC_DEBUG
+
static int debug = 1;
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug level");
+static char *errbuf;
#else
-#define DEBUG_CONFIGURED 0
-#define debug 0
+
+#define debug 0
+#define errbuf NULL
+
#endif
-static char *errbuf;
+
#define ERRBUF_LEN (32 * 1024)
static struct kmem_cache *uhci_up_cachep; /* urb_priv */
@@ -93,7 +95,7 @@ static void uhci_get_current_frame_number(struct uhci_hcd *uhci);
/*
* Calculate the link pointer DMA value for the first Skeleton QH in a frame.
*/
-static __le32 uhci_frame_skel_link(struct uhci_hcd *uhci, int frame)
+static __hc32 uhci_frame_skel_link(struct uhci_hcd *uhci, int frame)
{
int skelnum;
@@ -115,7 +117,7 @@ static __le32 uhci_frame_skel_link(struct uhci_hcd *uhci, int frame)
skelnum = 8 - (int) __ffs(frame | UHCI_NUMFRAMES);
if (skelnum <= 1)
skelnum = 9;
- return LINK_TO_QH(uhci->skelqh[skelnum]);
+ return LINK_TO_QH(uhci, uhci->skelqh[skelnum]);
}
#include "uhci-debug.c"
@@ -134,15 +136,12 @@ static void finish_reset(struct uhci_hcd *uhci)
* We have to clear them by hand.
*/
for (port = 0; port < uhci->rh_numports; ++port)
- outw(0, uhci->io_addr + USBPORTSC1 + (port * 2));
+ uhci_writew(uhci, 0, USBPORTSC1 + (port * 2));
uhci->port_c_suspend = uhci->resuming_ports = 0;
uhci->rh_state = UHCI_RH_RESET;
uhci->is_stopped = UHCI_IS_STOPPED;
- uhci_to_hcd(uhci)->state = HC_STATE_HALT;
clear_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
-
- uhci->dead = 0; /* Full reset resurrects the controller */
}
/*
@@ -152,7 +151,7 @@ static void finish_reset(struct uhci_hcd *uhci)
static void uhci_hc_died(struct uhci_hcd *uhci)
{
uhci_get_current_frame_number(uhci);
- uhci_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr);
+ uhci->reset_hc(uhci);
finish_reset(uhci);
uhci->dead = 1;
@@ -167,97 +166,118 @@ static void uhci_hc_died(struct uhci_hcd *uhci)
*/
static void check_and_reset_hc(struct uhci_hcd *uhci)
{
- if (uhci_check_and_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr))
+ if (uhci->check_and_reset_hc(uhci))
finish_reset(uhci);
}
+#if defined(CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC)
+/*
+ * The two functions below are generic reset functions that are used on systems
+ * that do not have keyboard and mouse legacy support. We assume that we are
+ * running on such a system if CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC is defined.
+ */
+
+/*
+ * Make sure the controller is completely inactive, unable to
+ * generate interrupts or do DMA.
+ */
+static void uhci_generic_reset_hc(struct uhci_hcd *uhci)
+{
+ /* Reset the HC - this will force us to get a
+ * new notification of any already connected
+ * ports due to the virtual disconnect that it
+ * implies.
+ */
+ uhci_writew(uhci, USBCMD_HCRESET, USBCMD);
+ mb();
+ udelay(5);
+ if (uhci_readw(uhci, USBCMD) & USBCMD_HCRESET)
+ dev_warn(uhci_dev(uhci), "HCRESET not completed yet!\n");
+
+ /* Just to be safe, disable interrupt requests and
+ * make sure the controller is stopped.
+ */
+ uhci_writew(uhci, 0, USBINTR);
+ uhci_writew(uhci, 0, USBCMD);
+}
+
+/*
+ * Initialize a controller that was newly discovered or has just been
+ * resumed. In either case we can't be sure of its previous state.
+ *
+ * Returns: 1 if the controller was reset, 0 otherwise.
+ */
+static int uhci_generic_check_and_reset_hc(struct uhci_hcd *uhci)
+{
+ unsigned int cmd, intr;
+
+ /*
+ * When restarting a suspended controller, we expect all the
+ * settings to be the same as we left them:
+ *
+ * Controller is stopped and configured with EGSM set;
+ * No interrupts enabled except possibly Resume Detect.
+ *
+ * If any of these conditions are violated we do a complete reset.
+ */
+
+ cmd = uhci_readw(uhci, USBCMD);
+ if ((cmd & USBCMD_RS) || !(cmd & USBCMD_CF) || !(cmd & USBCMD_EGSM)) {
+ dev_dbg(uhci_dev(uhci), "%s: cmd = 0x%04x\n",
+ __func__, cmd);
+ goto reset_needed;
+ }
+
+ intr = uhci_readw(uhci, USBINTR);
+ if (intr & (~USBINTR_RESUME)) {
+ dev_dbg(uhci_dev(uhci), "%s: intr = 0x%04x\n",
+ __func__, intr);
+ goto reset_needed;
+ }
+ return 0;
+
+reset_needed:
+ dev_dbg(uhci_dev(uhci), "Performing full reset\n");
+ uhci_generic_reset_hc(uhci);
+ return 1;
+}
+#endif /* CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC */
+
/*
* Store the basic register settings needed by the controller.
*/
static void configure_hc(struct uhci_hcd *uhci)
{
- struct pci_dev *pdev = to_pci_dev(uhci_dev(uhci));
-
/* Set the frame length to the default: 1 ms exactly */
- outb(USBSOF_DEFAULT, uhci->io_addr + USBSOF);
+ uhci_writeb(uhci, USBSOF_DEFAULT, USBSOF);
/* Store the frame list base address */
- outl(uhci->frame_dma_handle, uhci->io_addr + USBFLBASEADD);
+ uhci_writel(uhci, uhci->frame_dma_handle, USBFLBASEADD);
/* Set the current frame number */
- outw(uhci->frame_number & UHCI_MAX_SOF_NUMBER,
- uhci->io_addr + USBFRNUM);
-
- /* Mark controller as not halted before we enable interrupts */
- uhci_to_hcd(uhci)->state = HC_STATE_SUSPENDED;
- mb();
-
- /* Enable PIRQ */
- pci_write_config_word(pdev, USBLEGSUP, USBLEGSUP_DEFAULT);
+ uhci_writew(uhci, uhci->frame_number & UHCI_MAX_SOF_NUMBER,
+ USBFRNUM);
- /* Disable platform-specific non-PME# wakeup */
- if (pdev->vendor == PCI_VENDOR_ID_INTEL)
- pci_write_config_byte(pdev, USBRES_INTEL, 0);
+ /* perform any arch/bus specific configuration */
+ if (uhci->configure_hc)
+ uhci->configure_hc(uhci);
}
-
static int resume_detect_interrupts_are_broken(struct uhci_hcd *uhci)
{
- int port;
-
/* If we have to ignore overcurrent events then almost by definition
* we can't depend on resume-detect interrupts. */
if (ignore_oc)
return 1;
- switch (to_pci_dev(uhci_dev(uhci))->vendor) {
- default:
- break;
-
- case PCI_VENDOR_ID_GENESYS:
- /* Genesys Logic's GL880S controllers don't generate
- * resume-detect interrupts.
- */
- return 1;
-
- case PCI_VENDOR_ID_INTEL:
- /* Some of Intel's USB controllers have a bug that causes
- * resume-detect interrupts if any port has an over-current
- * condition. To make matters worse, some motherboards
- * hardwire unused USB ports' over-current inputs active!
- * To prevent problems, we will not enable resume-detect
- * interrupts if any ports are OC.
- */
- for (port = 0; port < uhci->rh_numports; ++port) {
- if (inw(uhci->io_addr + USBPORTSC1 + port * 2) &
- USBPORTSC_OC)
- return 1;
- }
- break;
- }
- return 0;
+ return uhci->resume_detect_interrupts_are_broken ?
+ uhci->resume_detect_interrupts_are_broken(uhci) : 0;
}
static int global_suspend_mode_is_broken(struct uhci_hcd *uhci)
{
- int port;
- const char *sys_info;
- static char bad_Asus_board[] = "A7V8X";
-
- /* One of Asus's motherboards has a bug which causes it to
- * wake up immediately from suspend-to-RAM if any of the ports
- * are connected. In such cases we will not set EGSM.
- */
- sys_info = dmi_get_system_info(DMI_BOARD_NAME);
- if (sys_info && !strcmp(sys_info, bad_Asus_board)) {
- for (port = 0; port < uhci->rh_numports; ++port) {
- if (inw(uhci->io_addr + USBPORTSC1 + port * 2) &
- USBPORTSC_CCS)
- return 1;
- }
- }
-
- return 0;
+ return uhci->global_suspend_mode_is_broken ?
+ uhci->global_suspend_mode_is_broken(uhci) : 0;
}
static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state)
@@ -276,52 +296,52 @@ __acquires(uhci->lock)
* and that remote wakeups should be enabled.
*/
egsm_enable = USBCMD_EGSM;
- uhci->RD_enable = 1;
int_enable = USBINTR_RESUME;
wakeup_enable = 1;
- /* In auto-stop mode wakeups must always be detected, but
- * Resume-Detect interrupts may be prohibited. (In the absence
- * of CONFIG_PM, they are always disallowed.)
+ /*
+ * In auto-stop mode, we must be able to detect new connections.
+ * The user can force us to poll by disabling remote wakeup;
+ * otherwise we will use the EGSM/RD mechanism.
*/
if (auto_stop) {
if (!device_may_wakeup(&rhdev->dev))
- int_enable = 0;
+ egsm_enable = int_enable = 0;
+ }
- /* In bus-suspend mode wakeups may be disabled, but if they are
- * allowed then so are Resume-Detect interrupts.
- */
- } else {
#ifdef CONFIG_PM
+ /*
+ * In bus-suspend mode, we use the wakeup setting specified
+ * for the root hub.
+ */
+ else {
if (!rhdev->do_remote_wakeup)
wakeup_enable = 0;
-#endif
}
+#endif
- /* EGSM causes the root hub to echo a 'K' signal (resume) out any
- * port which requests a remote wakeup. According to the USB spec,
- * every hub is supposed to do this. But if we are ignoring
- * remote-wakeup requests anyway then there's no point to it.
- * We also shouldn't enable EGSM if it's broken.
- */
- if (!wakeup_enable || global_suspend_mode_is_broken(uhci))
- egsm_enable = 0;
-
- /* If we're ignoring wakeup events then there's no reason to
- * enable Resume-Detect interrupts. We also shouldn't enable
- * them if they are broken or disallowed.
+ /*
+ * UHCI doesn't distinguish between wakeup requests from downstream
+ * devices and local connect/disconnect events. There's no way to
+ * enable one without the other; both are controlled by EGSM. Thus
+ * if wakeups are disallowed then EGSM must be turned off -- in which
+ * case remote wakeup requests from downstream during system sleep
+ * will be lost.
*
- * This logic may lead us to enabling RD but not EGSM. The UHCI
- * spec foolishly says that RD works only when EGSM is on, but
- * there's no harm in enabling it anyway -- perhaps some chips
- * will implement it!
+ * In addition, if EGSM is broken then we can't use it. Likewise,
+ * if Resume-Detect interrupts are broken then we can't use them.
+ *
+ * Finally, neither EGSM nor RD is useful by itself. Without EGSM,
+ * the RD status bit will never get set. Without RD, the controller
+ * won't generate interrupts to tell the system about wakeup events.
*/
- if (!wakeup_enable || resume_detect_interrupts_are_broken(uhci) ||
- !int_enable)
- uhci->RD_enable = int_enable = 0;
+ if (!wakeup_enable || global_suspend_mode_is_broken(uhci) ||
+ resume_detect_interrupts_are_broken(uhci))
+ egsm_enable = int_enable = 0;
- outw(int_enable, uhci->io_addr + USBINTR);
- outw(egsm_enable | USBCMD_CF, uhci->io_addr + USBCMD);
+ uhci->RD_enable = !!int_enable;
+ uhci_writew(uhci, int_enable, USBINTR);
+ uhci_writew(uhci, egsm_enable | USBCMD_CF, USBCMD);
mb();
udelay(5);
@@ -330,7 +350,7 @@ __acquires(uhci->lock)
* controller should stop after a few microseconds. Otherwise
* we will give the controller one frame to stop.
*/
- if (!auto_stop && !(inw(uhci->io_addr + USBSTS) & USBSTS_HCH)) {
+ if (!auto_stop && !(uhci_readw(uhci, USBSTS) & USBSTS_HCH)) {
uhci->rh_state = UHCI_RH_SUSPENDING;
spin_unlock_irq(&uhci->lock);
msleep(1);
@@ -338,7 +358,7 @@ __acquires(uhci->lock)
if (uhci->dead)
return;
}
- if (!(inw(uhci->io_addr + USBSTS) & USBSTS_HCH))
+ if (!(uhci_readw(uhci, USBSTS) & USBSTS_HCH))
dev_warn(uhci_dev(uhci), "Controller not stopped yet!\n");
uhci_get_current_frame_number(uhci);
@@ -346,10 +366,12 @@ __acquires(uhci->lock)
uhci->rh_state = new_state;
uhci->is_stopped = UHCI_IS_STOPPED;
- /* If interrupts don't work and remote wakeup is enabled then
- * the suspended root hub needs to be polled.
+ /*
+ * If remote wakeup is enabled but either EGSM or RD interrupts
+ * doesn't work, then we won't get an interrupt when a wakeup event
+ * occurs. Thus the suspended root hub needs to be polled.
*/
- if (!int_enable && wakeup_enable)
+ if (wakeup_enable && (!int_enable || !egsm_enable))
set_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
else
clear_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
@@ -360,15 +382,14 @@ __acquires(uhci->lock)
static void start_rh(struct uhci_hcd *uhci)
{
- uhci_to_hcd(uhci)->state = HC_STATE_RUNNING;
uhci->is_stopped = 0;
/* Mark it configured and running with a 64-byte max packet.
* All interrupts are enabled, even though RESUME won't do anything.
*/
- outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, uhci->io_addr + USBCMD);
- outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP,
- uhci->io_addr + USBINTR);
+ uhci_writew(uhci, USBCMD_RS | USBCMD_CF | USBCMD_MAXP, USBCMD);
+ uhci_writew(uhci, USBINTR_TIMEOUT | USBINTR_RESUME |
+ USBINTR_IOC | USBINTR_SP, USBINTR);
mb();
uhci->rh_state = UHCI_RH_RUNNING;
set_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
@@ -391,9 +412,9 @@ __acquires(uhci->lock)
unsigned egsm;
/* Keep EGSM on if it was set before */
- egsm = inw(uhci->io_addr + USBCMD) & USBCMD_EGSM;
+ egsm = uhci_readw(uhci, USBCMD) & USBCMD_EGSM;
uhci->rh_state = UHCI_RH_RESUMING;
- outw(USBCMD_FGR | USBCMD_CF | egsm, uhci->io_addr + USBCMD);
+ uhci_writew(uhci, USBCMD_FGR | USBCMD_CF | egsm, USBCMD);
spin_unlock_irq(&uhci->lock);
msleep(20);
spin_lock_irq(&uhci->lock);
@@ -401,10 +422,10 @@ __acquires(uhci->lock)
return;
/* End Global Resume and wait for EOP to be sent */
- outw(USBCMD_CF, uhci->io_addr + USBCMD);
+ uhci_writew(uhci, USBCMD_CF, USBCMD);
mb();
udelay(4);
- if (inw(uhci->io_addr + USBCMD) & USBCMD_FGR)
+ if (uhci_readw(uhci, USBCMD) & USBCMD_FGR)
dev_warn(uhci_dev(uhci), "FGR not stopped yet!\n");
}
@@ -424,45 +445,48 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
* interrupt cause. Contrary to the UHCI specification, the
* "HC Halted" status bit is persistent: it is RO, not R/WC.
*/
- status = inw(uhci->io_addr + USBSTS);
+ status = uhci_readw(uhci, USBSTS);
if (!(status & ~USBSTS_HCH)) /* shared interrupt, not mine */
return IRQ_NONE;
- outw(status, uhci->io_addr + USBSTS); /* Clear it */
+ uhci_writew(uhci, status, USBSTS); /* Clear it */
+
+ spin_lock(&uhci->lock);
+ if (unlikely(!uhci->is_initialized)) /* not yet configured */
+ goto done;
if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
if (status & USBSTS_HSE)
- dev_err(uhci_dev(uhci), "host system error, "
- "PCI problems?\n");
+ dev_err(uhci_dev(uhci),
+ "host system error, PCI problems?\n");
if (status & USBSTS_HCPE)
- dev_err(uhci_dev(uhci), "host controller process "
- "error, something bad happened!\n");
+ dev_err(uhci_dev(uhci),
+ "host controller process error, something bad happened!\n");
if (status & USBSTS_HCH) {
- spin_lock(&uhci->lock);
if (uhci->rh_state >= UHCI_RH_RUNNING) {
dev_err(uhci_dev(uhci),
- "host controller halted, "
- "very bad!\n");
+ "host controller halted, very bad!\n");
if (debug > 1 && errbuf) {
/* Print the schedule for debugging */
- uhci_sprint_schedule(uhci,
- errbuf, ERRBUF_LEN);
+ uhci_sprint_schedule(uhci, errbuf,
+ ERRBUF_LEN - EXTRA_SPACE);
lprintk(errbuf);
}
uhci_hc_died(uhci);
+ usb_hc_died(hcd);
/* Force a callback in case there are
* pending unlinks */
mod_timer(&hcd->rh_timer, jiffies);
}
- spin_unlock(&uhci->lock);
}
}
- if (status & USBSTS_RD)
+ if (status & USBSTS_RD) {
+ spin_unlock(&uhci->lock);
usb_hcd_poll_rh_status(hcd);
- else {
- spin_lock(&uhci->lock);
+ } else {
uhci_scan_schedule(uhci);
+ done:
spin_unlock(&uhci->lock);
}
@@ -471,7 +495,7 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
/*
* Store the current frame number in uhci->frame_number if the controller
- * is runnning. Expand from 11 bits (of which we use only 10) to a
+ * is running. Expand from 11 bits (of which we use only 10) to a
* full-sized integer.
*
* Like many other parts of the driver, this code relies on being polled
@@ -482,7 +506,7 @@ static void uhci_get_current_frame_number(struct uhci_hcd *uhci)
if (!uhci->is_stopped) {
unsigned delta;
- delta = (inw(uhci->io_addr + USBFRNUM) - uhci->frame_number) &
+ delta = (uhci_readw(uhci, USBFRNUM) - uhci->frame_number) &
(UHCI_NUMFRAMES - 1);
uhci->frame_number += delta;
}
@@ -495,13 +519,12 @@ static void release_uhci(struct uhci_hcd *uhci)
{
int i;
- if (DEBUG_CONFIGURED) {
- spin_lock_irq(&uhci->lock);
- uhci->is_initialized = 0;
- spin_unlock_irq(&uhci->lock);
- debugfs_remove(uhci->dentry);
- }
+ spin_lock_irq(&uhci->lock);
+ uhci->is_initialized = 0;
+ spin_unlock_irq(&uhci->lock);
+
+ debugfs_remove(uhci->dentry);
for (i = 0; i < UHCI_NUM_SKELQH; i++)
uhci_free_qh(uhci, uhci->skelqh[i]);
@@ -519,61 +542,6 @@ static void release_uhci(struct uhci_hcd *uhci)
uhci->frame, uhci->frame_dma_handle);
}
-static int uhci_init(struct usb_hcd *hcd)
-{
- struct uhci_hcd *uhci = hcd_to_uhci(hcd);
- unsigned io_size = (unsigned) hcd->rsrc_len;
- int port;
-
- uhci->io_addr = (unsigned long) hcd->rsrc_start;
-
- /* The UHCI spec says devices must have 2 ports, and goes on to say
- * they may have more but gives no way to determine how many there
- * are. However according to the UHCI spec, Bit 7 of the port
- * status and control register is always set to 1. So we try to
- * use this to our advantage. Another common failure mode when
- * a nonexistent register is addressed is to return all ones, so
- * we test for that also.
- */
- for (port = 0; port < (io_size - USBPORTSC1) / 2; port++) {
- unsigned int portstatus;
-
- portstatus = inw(uhci->io_addr + USBPORTSC1 + (port * 2));
- if (!(portstatus & 0x0080) || portstatus == 0xffff)
- break;
- }
- if (debug)
- dev_info(uhci_dev(uhci), "detected %d ports\n", port);
-
- /* Anything greater than 7 is weird so we'll ignore it. */
- if (port > UHCI_RH_MAXCHILD) {
- dev_info(uhci_dev(uhci), "port count misdetected? "
- "forcing to 2 ports\n");
- port = 2;
- }
- uhci->rh_numports = port;
-
- /* Kick BIOS off this hardware and reset if the controller
- * isn't already safely quiescent.
- */
- check_and_reset_hc(uhci);
- return 0;
-}
-
-/* Make sure the controller is quiescent and that we're not using it
- * any more. This is mainly for the benefit of programs which, like kexec,
- * expect the hardware to be idle: not doing DMA or generating IRQs.
- *
- * This routine may be called in a damaged or failing kernel. Hence we
- * do not acquire the spinlock before shutting down the controller.
- */
-static void uhci_shutdown(struct pci_dev *pdev)
-{
- struct usb_hcd *hcd = (struct usb_hcd *) pci_get_drvdata(pdev);
-
- uhci_hc_died(hcd_to_uhci(hcd));
-}
-
/*
* Allocate a frame list, and then setup the skeleton
*
@@ -600,6 +568,9 @@ static int uhci_start(struct usb_hcd *hcd)
struct dentry __maybe_unused *dentry;
hcd->uses_new_polling = 1;
+ /* Accept arbitrarily long scatter-gather lists */
+ if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+ hcd->self.sg_tablesize = ~0;
spin_lock_init(&uhci->lock);
setup_timer(&uhci->fsbr_timer, uhci_fsbr_timeout,
@@ -622,8 +593,8 @@ static int uhci_start(struct usb_hcd *hcd)
UHCI_NUMFRAMES * sizeof(*uhci->frame),
&uhci->frame_dma_handle, 0);
if (!uhci->frame) {
- dev_err(uhci_dev(uhci), "unable to allocate "
- "consistent memory for frame list\n");
+ dev_err(uhci_dev(uhci),
+ "unable to allocate consistent memory for frame list\n");
goto err_alloc_frame;
}
memset(uhci->frame, 0, UHCI_NUMFRAMES * sizeof(*uhci->frame));
@@ -631,8 +602,8 @@ static int uhci_start(struct usb_hcd *hcd)
uhci->frame_cpu = kcalloc(UHCI_NUMFRAMES, sizeof(*uhci->frame_cpu),
GFP_KERNEL);
if (!uhci->frame_cpu) {
- dev_err(uhci_dev(uhci), "unable to allocate "
- "memory for frame pointers\n");
+ dev_err(uhci_dev(uhci),
+ "unable to allocate memory for frame pointers\n");
goto err_alloc_frame_cpu;
}
@@ -668,16 +639,16 @@ static int uhci_start(struct usb_hcd *hcd)
* 8 Interrupt queues; link all higher int queues to int1 = async
*/
for (i = SKEL_ISO + 1; i < SKEL_ASYNC; ++i)
- uhci->skelqh[i]->link = LINK_TO_QH(uhci->skel_async_qh);
- uhci->skel_async_qh->link = UHCI_PTR_TERM;
- uhci->skel_term_qh->link = LINK_TO_QH(uhci->skel_term_qh);
+ uhci->skelqh[i]->link = LINK_TO_QH(uhci, uhci->skel_async_qh);
+ uhci->skel_async_qh->link = UHCI_PTR_TERM(uhci);
+ uhci->skel_term_qh->link = LINK_TO_QH(uhci, uhci->skel_term_qh);
/* This dummy TD is to work around a bug in Intel PIIX controllers */
- uhci_fill_td(uhci->term_td, 0, uhci_explen(0) |
+ uhci_fill_td(uhci, uhci->term_td, 0, uhci_explen(0) |
(0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
- uhci->term_td->link = UHCI_PTR_TERM;
+ uhci->term_td->link = UHCI_PTR_TERM(uhci);
uhci->skel_async_qh->element = uhci->skel_term_qh->element =
- LINK_TO_TD(uhci->term_td);
+ LINK_TO_TD(uhci, uhci->term_td);
/*
* Fill the frame list: make all entries point to the proper
@@ -695,9 +666,9 @@ static int uhci_start(struct usb_hcd *hcd)
*/
mb();
+ spin_lock_irq(&uhci->lock);
configure_hc(uhci);
uhci->is_initialized = 1;
- spin_lock_irq(&uhci->lock);
start_rh(uhci);
spin_unlock_irq(&uhci->lock);
return 0;
@@ -767,8 +738,8 @@ static int uhci_rh_suspend(struct usb_hcd *hcd)
*/
else if (hcd->self.root_hub->do_remote_wakeup &&
uhci->resuming_ports) {
- dev_dbg(uhci_dev(uhci), "suspend failed because a port "
- "is resuming\n");
+ dev_dbg(uhci_dev(uhci),
+ "suspend failed because a port is resuming\n");
rc = -EBUSY;
} else
suspend_rh(uhci, UHCI_RH_SUSPENDED);
@@ -790,86 +761,6 @@ static int uhci_rh_resume(struct usb_hcd *hcd)
return rc;
}
-static int uhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
-{
- struct uhci_hcd *uhci = hcd_to_uhci(hcd);
- struct pci_dev *pdev = to_pci_dev(uhci_dev(uhci));
- int rc = 0;
-
- dev_dbg(uhci_dev(uhci), "%s\n", __func__);
-
- spin_lock_irq(&uhci->lock);
- if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead)
- goto done_okay; /* Already suspended or dead */
-
- if (uhci->rh_state > UHCI_RH_SUSPENDED) {
- dev_warn(uhci_dev(uhci), "Root hub isn't suspended!\n");
- rc = -EBUSY;
- goto done;
- };
-
- /* All PCI host controllers are required to disable IRQ generation
- * at the source, so we must turn off PIRQ.
- */
- pci_write_config_word(pdev, USBLEGSUP, 0);
- clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-
- /* Enable platform-specific non-PME# wakeup */
- if (do_wakeup) {
- if (pdev->vendor == PCI_VENDOR_ID_INTEL)
- pci_write_config_byte(pdev, USBRES_INTEL,
- USBPORT1EN | USBPORT2EN);
- }
-
-done_okay:
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-done:
- spin_unlock_irq(&uhci->lock);
- return rc;
-}
-
-static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
-{
- struct uhci_hcd *uhci = hcd_to_uhci(hcd);
-
- dev_dbg(uhci_dev(uhci), "%s\n", __func__);
-
- /* Since we aren't in D3 any more, it's safe to set this flag
- * even if the controller was dead.
- */
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-
- spin_lock_irq(&uhci->lock);
-
- /* Make sure resume from hibernation re-enumerates everything */
- if (hibernated)
- uhci_hc_died(uhci);
-
- /* The firmware or a boot kernel may have changed the controller
- * settings during a system wakeup. Check it and reconfigure
- * to avoid problems.
- */
- check_and_reset_hc(uhci);
-
- /* If the controller was dead before, it's back alive now */
- configure_hc(uhci);
-
- /* Tell the core if the controller had to be reset */
- if (uhci->rh_state == UHCI_RH_RESET)
- usb_root_hub_lost_power(hcd->self.root_hub);
-
- spin_unlock_irq(&uhci->lock);
-
- /* If interrupts don't work and remote wakeup is enabled then
- * the suspended root hub needs to be polled.
- */
- if (!uhci->RD_enable && hcd->self.root_hub->do_remote_wakeup)
- set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-
- /* Does the root hub have a port wakeup pending? */
- usb_hcd_poll_rh_status(hcd);
- return 0;
-}
#endif
/* Wait until a particular device/endpoint's QH is idle, and free it */
@@ -907,67 +798,67 @@ static int uhci_hcd_get_frame_number(struct usb_hcd *hcd)
/* Minimize latency by avoiding the spinlock */
frame_number = uhci->frame_number;
barrier();
- delta = (inw(uhci->io_addr + USBFRNUM) - frame_number) &
+ delta = (uhci_readw(uhci, USBFRNUM) - frame_number) &
(UHCI_NUMFRAMES - 1);
return frame_number + delta;
}
-static const char hcd_name[] = "uhci_hcd";
-
-static const struct hc_driver uhci_driver = {
- .description = hcd_name,
- .product_desc = "UHCI Host Controller",
- .hcd_priv_size = sizeof(struct uhci_hcd),
-
- /* Generic hardware linkage */
- .irq = uhci_irq,
- .flags = HCD_USB11,
+/* Determines number of ports on controller */
+static int uhci_count_ports(struct usb_hcd *hcd)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+ unsigned io_size = (unsigned) hcd->rsrc_len;
+ int port;
- /* Basic lifecycle operations */
- .reset = uhci_init,
- .start = uhci_start,
-#ifdef CONFIG_PM
- .pci_suspend = uhci_pci_suspend,
- .pci_resume = uhci_pci_resume,
- .bus_suspend = uhci_rh_suspend,
- .bus_resume = uhci_rh_resume,
-#endif
- .stop = uhci_stop,
+ /* The UHCI spec says devices must have 2 ports, and goes on to say
+ * they may have more but gives no way to determine how many there
+ * are. However according to the UHCI spec, Bit 7 of the port
+ * status and control register is always set to 1. So we try to
+ * use this to our advantage. Another common failure mode when
+ * a nonexistent register is addressed is to return all ones, so
+ * we test for that also.
+ */
+ for (port = 0; port < (io_size - USBPORTSC1) / 2; port++) {
+ unsigned int portstatus;
- .urb_enqueue = uhci_urb_enqueue,
- .urb_dequeue = uhci_urb_dequeue,
+ portstatus = uhci_readw(uhci, USBPORTSC1 + (port * 2));
+ if (!(portstatus & 0x0080) || portstatus == 0xffff)
+ break;
+ }
+ if (debug)
+ dev_info(uhci_dev(uhci), "detected %d ports\n", port);
- .endpoint_disable = uhci_hcd_endpoint_disable,
- .get_frame_number = uhci_hcd_get_frame_number,
+ /* Anything greater than 7 is weird so we'll ignore it. */
+ if (port > UHCI_RH_MAXCHILD) {
+ dev_info(uhci_dev(uhci),
+ "port count misdetected? forcing to 2 ports\n");
+ port = 2;
+ }
- .hub_status_data = uhci_hub_status_data,
- .hub_control = uhci_hub_control,
-};
+ return port;
+}
-static const struct pci_device_id uhci_pci_ids[] = { {
- /* handle any USB UHCI controller */
- PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0),
- .driver_data = (unsigned long) &uhci_driver,
- }, { /* end: all zeroes */ }
-};
+static const char hcd_name[] = "uhci_hcd";
-MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
+#ifdef CONFIG_PCI
+#include "uhci-pci.c"
+#define PCI_DRIVER uhci_pci_driver
+#endif
-static struct pci_driver uhci_pci_driver = {
- .name = (char *)hcd_name,
- .id_table = uhci_pci_ids,
+#ifdef CONFIG_SPARC_LEON
+#include "uhci-grlib.c"
+#define PLATFORM_DRIVER uhci_grlib_driver
+#endif
- .probe = usb_hcd_pci_probe,
- .remove = usb_hcd_pci_remove,
- .shutdown = uhci_shutdown,
+#ifdef CONFIG_USB_UHCI_PLATFORM
+#include "uhci-platform.c"
+#define PLATFORM_DRIVER uhci_platform_driver
+#endif
-#ifdef CONFIG_PM_SLEEP
- .driver = {
- .pm = &usb_hcd_pci_pm_ops
- },
+#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER)
+#error "missing bus glue for uhci-hcd"
#endif
-};
-
+
static int __init uhci_hcd_init(void)
{
int retval = -ENOMEM;
@@ -979,36 +870,52 @@ static int __init uhci_hcd_init(void)
ignore_oc ? ", overcurrent ignored" : "");
set_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
- if (DEBUG_CONFIGURED) {
- errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
- if (!errbuf)
- goto errbuf_failed;
- uhci_debugfs_root = debugfs_create_dir("uhci", usb_debug_root);
- if (!uhci_debugfs_root)
- goto debug_failed;
- }
+#ifdef CONFIG_DYNAMIC_DEBUG
+ errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
+ if (!errbuf)
+ goto errbuf_failed;
+ uhci_debugfs_root = debugfs_create_dir("uhci", usb_debug_root);
+ if (!uhci_debugfs_root)
+ goto debug_failed;
+#endif
uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
sizeof(struct urb_priv), 0, 0, NULL);
if (!uhci_up_cachep)
goto up_failed;
- retval = pci_register_driver(&uhci_pci_driver);
- if (retval)
- goto init_failed;
+#ifdef PLATFORM_DRIVER
+ retval = platform_driver_register(&PLATFORM_DRIVER);
+ if (retval < 0)
+ goto clean0;
+#endif
+
+#ifdef PCI_DRIVER
+ retval = pci_register_driver(&PCI_DRIVER);
+ if (retval < 0)
+ goto clean1;
+#endif
return 0;
-init_failed:
+#ifdef PCI_DRIVER
+clean1:
+#endif
+#ifdef PLATFORM_DRIVER
+ platform_driver_unregister(&PLATFORM_DRIVER);
+clean0:
+#endif
kmem_cache_destroy(uhci_up_cachep);
up_failed:
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
debugfs_remove(uhci_debugfs_root);
debug_failed:
kfree(errbuf);
errbuf_failed:
+#endif
clear_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
return retval;
@@ -1016,10 +923,17 @@ errbuf_failed:
static void __exit uhci_hcd_cleanup(void)
{
- pci_unregister_driver(&uhci_pci_driver);
+#ifdef PLATFORM_DRIVER
+ platform_driver_unregister(&PLATFORM_DRIVER);
+#endif
+#ifdef PCI_DRIVER
+ pci_unregister_driver(&PCI_DRIVER);
+#endif
kmem_cache_destroy(uhci_up_cachep);
debugfs_remove(uhci_debugfs_root);
+#ifdef CONFIG_DYNAMIC_DEBUG
kfree(errbuf);
+#endif
clear_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
}
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index 49bf2790f9c..6f986d82472 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -78,11 +78,11 @@
#define USBPORT1EN 0x01
#define USBPORT2EN 0x02
-#define UHCI_PTR_BITS cpu_to_le32(0x000F)
-#define UHCI_PTR_TERM cpu_to_le32(0x0001)
-#define UHCI_PTR_QH cpu_to_le32(0x0002)
-#define UHCI_PTR_DEPTH cpu_to_le32(0x0004)
-#define UHCI_PTR_BREADTH cpu_to_le32(0x0000)
+#define UHCI_PTR_BITS(uhci) cpu_to_hc32((uhci), 0x000F)
+#define UHCI_PTR_TERM(uhci) cpu_to_hc32((uhci), 0x0001)
+#define UHCI_PTR_QH(uhci) cpu_to_hc32((uhci), 0x0002)
+#define UHCI_PTR_DEPTH(uhci) cpu_to_hc32((uhci), 0x0004)
+#define UHCI_PTR_BREADTH(uhci) cpu_to_hc32((uhci), 0x0000)
#define UHCI_NUMFRAMES 1024 /* in the frame list [array] */
#define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */
@@ -99,6 +99,22 @@
/*
+ * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
+ * __leXX (normally) or __beXX (given UHCI_BIG_ENDIAN_DESC), depending on
+ * the host controller implementation.
+ *
+ * To facilitate the strongest possible byte-order checking from "sparse"
+ * and so on, we use __leXX unless that's not practical.
+ */
+#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_DESC
+typedef __u32 __bitwise __hc32;
+typedef __u16 __bitwise __hc16;
+#else
+#define __hc32 __le32
+#define __hc16 __le16
+#endif
+
+/*
* Queue Headers
*/
@@ -130,8 +146,8 @@
struct uhci_qh {
/* Hardware fields */
- __le32 link; /* Next QH in the schedule */
- __le32 element; /* Queue element (TD) pointer */
+ __hc32 link; /* Next QH in the schedule */
+ __hc32 element; /* Queue element (TD) pointer */
/* Software fields */
dma_addr_t dma_handle;
@@ -168,14 +184,10 @@ struct uhci_qh {
* We need a special accessor for the element pointer because it is
* subject to asynchronous updates by the controller.
*/
-static inline __le32 qh_element(struct uhci_qh *qh) {
- __le32 element = qh->element;
+#define qh_element(qh) ACCESS_ONCE((qh)->element)
- barrier();
- return element;
-}
-
-#define LINK_TO_QH(qh) (UHCI_PTR_QH | cpu_to_le32((qh)->dma_handle))
+#define LINK_TO_QH(uhci, qh) (UHCI_PTR_QH((uhci)) | \
+ cpu_to_hc32((uhci), (qh)->dma_handle))
/*
@@ -200,10 +212,6 @@ static inline __le32 qh_element(struct uhci_qh *qh) {
#define TD_CTRL_BITSTUFF (1 << 17) /* Bit Stuff Error */
#define TD_CTRL_ACTLEN_MASK 0x7FF /* actual length, encoded as n - 1 */
-#define TD_CTRL_ANY_ERROR (TD_CTRL_STALLED | TD_CTRL_DBUFERR | \
- TD_CTRL_BABBLE | TD_CTRL_CRCTIME | \
- TD_CTRL_BITSTUFF)
-
#define uhci_maxerr(err) ((err) << TD_CTRL_C_ERR_SHIFT)
#define uhci_status_bits(ctrl_sts) ((ctrl_sts) & 0xF60000)
#define uhci_actual_length(ctrl_sts) (((ctrl_sts) + 1) & \
@@ -212,7 +220,7 @@ static inline __le32 qh_element(struct uhci_qh *qh) {
/*
* for TD <info>: (a.k.a. Token)
*/
-#define td_token(td) le32_to_cpu((td)->token)
+#define td_token(uhci, td) hc32_to_cpu((uhci), (td)->token)
#define TD_TOKEN_DEVADDR_SHIFT 8
#define TD_TOKEN_TOGGLE_SHIFT 19
#define TD_TOKEN_TOGGLE (1 << 19)
@@ -245,10 +253,10 @@ static inline __le32 qh_element(struct uhci_qh *qh) {
*/
struct uhci_td {
/* Hardware fields */
- __le32 link;
- __le32 status;
- __le32 token;
- __le32 buffer;
+ __hc32 link;
+ __hc32 status;
+ __hc32 token;
+ __hc32 buffer;
/* Software fields */
dma_addr_t dma_handle;
@@ -263,14 +271,10 @@ struct uhci_td {
* We need a special accessor for the control/status word because it is
* subject to asynchronous updates by the controller.
*/
-static inline u32 td_status(struct uhci_td *td) {
- __le32 status = td->status;
-
- barrier();
- return le32_to_cpu(status);
-}
+#define td_status(uhci, td) hc32_to_cpu((uhci), \
+ ACCESS_ONCE((td)->status))
-#define LINK_TO_TD(td) (cpu_to_le32((td)->dma_handle))
+#define LINK_TO_TD(uhci, td) (cpu_to_hc32((uhci), (td)->dma_handle))
/*
@@ -380,6 +384,9 @@ struct uhci_hcd {
/* Grabbed from PCI */
unsigned long io_addr;
+ /* Used when registers are memory mapped */
+ void __iomem *regs;
+
struct dma_pool *qh_pool;
struct dma_pool *td_pool;
@@ -390,7 +397,7 @@ struct uhci_hcd {
spinlock_t lock;
dma_addr_t frame_dma_handle; /* Hardware frame list */
- __le32 *frame;
+ __hc32 *frame;
void **frame_cpu; /* CPU's frame list */
enum uhci_rh_state rh_state;
@@ -415,6 +422,12 @@ struct uhci_hcd {
struct timer_list fsbr_timer; /* For turning off FBSR */
+ /* Silicon quirks */
+ unsigned int oc_low:1; /* OverCurrent bit active low */
+ unsigned int wait_for_hp:1; /* Wait for HP port reset */
+ unsigned int big_endian_mmio:1; /* Big endian registers */
+ unsigned int big_endian_desc:1; /* Big endian descriptors */
+
/* Support for port suspend/resume/reset */
unsigned long port_c_suspend; /* Bit-arrays of ports */
unsigned long resuming_ports;
@@ -429,6 +442,16 @@ struct uhci_hcd {
int total_load; /* Sum of array values */
short load[MAX_PHASE]; /* Periodic allocations */
+
+ /* Reset host controller */
+ void (*reset_hc) (struct uhci_hcd *uhci);
+ int (*check_and_reset_hc) (struct uhci_hcd *uhci);
+ /* configure_hc should perform arch specific settings, if needed */
+ void (*configure_hc) (struct uhci_hcd *uhci);
+ /* Check for broken resume detect interrupts */
+ int (*resume_detect_interrupts_are_broken) (struct uhci_hcd *uhci);
+ /* Check for broken global suspend */
+ int (*global_suspend_mode_is_broken) (struct uhci_hcd *uhci);
};
/* Convert between a usb_hcd pointer and the corresponding uhci_hcd */
@@ -467,4 +490,171 @@ struct urb_priv {
#define PCI_VENDOR_ID_GENESYS 0x17a0
#define PCI_DEVICE_ID_GL880S_UHCI 0x8083
+/*
+ * Functions used to access controller registers. The UCHI spec says that host
+ * controller I/O registers are mapped into PCI I/O space. For non-PCI hosts
+ * we use memory mapped registers.
+ */
+
+#ifndef CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC
+/* Support PCI only */
+static inline u32 uhci_readl(const struct uhci_hcd *uhci, int reg)
+{
+ return inl(uhci->io_addr + reg);
+}
+
+static inline void uhci_writel(const struct uhci_hcd *uhci, u32 val, int reg)
+{
+ outl(val, uhci->io_addr + reg);
+}
+
+static inline u16 uhci_readw(const struct uhci_hcd *uhci, int reg)
+{
+ return inw(uhci->io_addr + reg);
+}
+
+static inline void uhci_writew(const struct uhci_hcd *uhci, u16 val, int reg)
+{
+ outw(val, uhci->io_addr + reg);
+}
+
+static inline u8 uhci_readb(const struct uhci_hcd *uhci, int reg)
+{
+ return inb(uhci->io_addr + reg);
+}
+
+static inline void uhci_writeb(const struct uhci_hcd *uhci, u8 val, int reg)
+{
+ outb(val, uhci->io_addr + reg);
+}
+
+#else
+/* Support non-PCI host controllers */
+#ifdef CONFIG_PCI
+/* Support PCI and non-PCI host controllers */
+#define uhci_has_pci_registers(u) ((u)->io_addr != 0)
+#else
+/* Support non-PCI host controllers only */
+#define uhci_has_pci_registers(u) 0
+#endif
+
+#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
+/* Support (non-PCI) big endian host controllers */
+#define uhci_big_endian_mmio(u) ((u)->big_endian_mmio)
+#else
+#define uhci_big_endian_mmio(u) 0
+#endif
+
+static inline u32 uhci_readl(const struct uhci_hcd *uhci, int reg)
+{
+ if (uhci_has_pci_registers(uhci))
+ return inl(uhci->io_addr + reg);
+#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
+ else if (uhci_big_endian_mmio(uhci))
+ return readl_be(uhci->regs + reg);
+#endif
+ else
+ return readl(uhci->regs + reg);
+}
+
+static inline void uhci_writel(const struct uhci_hcd *uhci, u32 val, int reg)
+{
+ if (uhci_has_pci_registers(uhci))
+ outl(val, uhci->io_addr + reg);
+#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
+ else if (uhci_big_endian_mmio(uhci))
+ writel_be(val, uhci->regs + reg);
+#endif
+ else
+ writel(val, uhci->regs + reg);
+}
+
+static inline u16 uhci_readw(const struct uhci_hcd *uhci, int reg)
+{
+ if (uhci_has_pci_registers(uhci))
+ return inw(uhci->io_addr + reg);
+#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
+ else if (uhci_big_endian_mmio(uhci))
+ return readw_be(uhci->regs + reg);
+#endif
+ else
+ return readw(uhci->regs + reg);
+}
+
+static inline void uhci_writew(const struct uhci_hcd *uhci, u16 val, int reg)
+{
+ if (uhci_has_pci_registers(uhci))
+ outw(val, uhci->io_addr + reg);
+#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
+ else if (uhci_big_endian_mmio(uhci))
+ writew_be(val, uhci->regs + reg);
+#endif
+ else
+ writew(val, uhci->regs + reg);
+}
+
+static inline u8 uhci_readb(const struct uhci_hcd *uhci, int reg)
+{
+ if (uhci_has_pci_registers(uhci))
+ return inb(uhci->io_addr + reg);
+#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
+ else if (uhci_big_endian_mmio(uhci))
+ return readb_be(uhci->regs + reg);
+#endif
+ else
+ return readb(uhci->regs + reg);
+}
+
+static inline void uhci_writeb(const struct uhci_hcd *uhci, u8 val, int reg)
+{
+ if (uhci_has_pci_registers(uhci))
+ outb(val, uhci->io_addr + reg);
+#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
+ else if (uhci_big_endian_mmio(uhci))
+ writeb_be(val, uhci->regs + reg);
+#endif
+ else
+ writeb(val, uhci->regs + reg);
+}
+#endif /* CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC */
+
+/*
+ * The GRLIB GRUSBHC controller can use big endian format for its descriptors.
+ *
+ * UHCI controllers accessed through PCI work normally (little-endian
+ * everywhere), so we don't bother supporting a BE-only mode.
+ */
+#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_DESC
+#define uhci_big_endian_desc(u) ((u)->big_endian_desc)
+
+/* cpu to uhci */
+static inline __hc32 cpu_to_hc32(const struct uhci_hcd *uhci, const u32 x)
+{
+ return uhci_big_endian_desc(uhci)
+ ? (__force __hc32)cpu_to_be32(x)
+ : (__force __hc32)cpu_to_le32(x);
+}
+
+/* uhci to cpu */
+static inline u32 hc32_to_cpu(const struct uhci_hcd *uhci, const __hc32 x)
+{
+ return uhci_big_endian_desc(uhci)
+ ? be32_to_cpu((__force __be32)x)
+ : le32_to_cpu((__force __le32)x);
+}
+
+#else
+/* cpu to uhci */
+static inline __hc32 cpu_to_hc32(const struct uhci_hcd *uhci, const u32 x)
+{
+ return cpu_to_le32(x);
+}
+
+/* uhci to cpu */
+static inline u32 hc32_to_cpu(const struct uhci_hcd *uhci, const __hc32 x)
+{
+ return le32_to_cpu(x);
+}
+#endif
+
#endif
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
index 6d59c0f77f2..93e17b12fb3 100644
--- a/drivers/usb/host/uhci-hub.c
+++ b/drivers/usb/host/uhci-hub.c
@@ -21,8 +21,8 @@ static const __u8 root_hub_hub_des[] =
0x00, /* (per-port OC, no power switching) */
0x01, /* __u8 bPwrOn2pwrGood; 2ms */
0x00, /* __u8 bHubContrCurrent; 0 mA */
- 0x00, /* __u8 DeviceRemovable; *** 7 Ports max *** */
- 0xff /* __u8 PortPwrCtrlMask; *** 7 ports max *** */
+ 0x00, /* __u8 DeviceRemovable; *** 7 Ports max */
+ 0xff /* __u8 PortPwrCtrlMask; *** 7 ports max */
};
#define UHCI_RH_MAXCHILD 7
@@ -44,7 +44,7 @@ static int any_ports_active(struct uhci_hcd *uhci)
int port;
for (port = 0; port < uhci->rh_numports; ++port) {
- if ((inw(uhci->io_addr + USBPORTSC1 + port * 2) &
+ if ((uhci_readw(uhci, USBPORTSC1 + port * 2) &
(USBPORTSC_CCS | RWC_BITS)) ||
test_bit(port, &uhci->port_c_suspend))
return 1;
@@ -68,27 +68,25 @@ static inline int get_hub_status_data(struct uhci_hcd *uhci, char *buf)
*buf = 0;
for (port = 0; port < uhci->rh_numports; ++port) {
- if ((inw(uhci->io_addr + USBPORTSC1 + port * 2) & mask) ||
+ if ((uhci_readw(uhci, USBPORTSC1 + port * 2) & mask) ||
test_bit(port, &uhci->port_c_suspend))
*buf |= (1 << (port + 1));
}
return !!*buf;
}
-#define OK(x) len = (x); break
-
#define CLR_RH_PORTSTAT(x) \
- status = inw(port_addr); \
+ status = uhci_readw(uhci, port_addr); \
status &= ~(RWC_BITS|WZ_BITS); \
status &= ~(x); \
status |= RWC_BITS & (x); \
- outw(status, port_addr)
+ uhci_writew(uhci, status, port_addr)
#define SET_RH_PORTSTAT(x) \
- status = inw(port_addr); \
+ status = uhci_readw(uhci, port_addr); \
status |= (x); \
status &= ~(RWC_BITS|WZ_BITS); \
- outw(status, port_addr)
+ uhci_writew(uhci, status, port_addr)
/* UHCI controllers don't automatically stop resume signalling after 20 msec,
* so we have to poll and check timeouts in order to take care of it.
@@ -99,7 +97,7 @@ static void uhci_finish_suspend(struct uhci_hcd *uhci, int port,
int status;
int i;
- if (inw(port_addr) & SUSPEND_BITS) {
+ if (uhci_readw(uhci, port_addr) & SUSPEND_BITS) {
CLR_RH_PORTSTAT(SUSPEND_BITS);
if (test_bit(port, &uhci->resuming_ports))
set_bit(port, &uhci->port_c_suspend);
@@ -110,23 +108,24 @@ static void uhci_finish_suspend(struct uhci_hcd *uhci, int port,
* Experiments show that some controllers take longer, so
* we'll poll for completion. */
for (i = 0; i < 10; ++i) {
- if (!(inw(port_addr) & SUSPEND_BITS))
+ if (!(uhci_readw(uhci, port_addr) & SUSPEND_BITS))
break;
udelay(1);
}
}
clear_bit(port, &uhci->resuming_ports);
+ usb_hcd_end_port_resume(&uhci_to_hcd(uhci)->self, port);
}
/* Wait for the UHCI controller in HP's iLO2 server management chip.
* It can take up to 250 us to finish a reset and set the CSC bit.
*/
-static void wait_for_HP(unsigned long port_addr)
+static void wait_for_HP(struct uhci_hcd *uhci, unsigned long port_addr)
{
int i;
for (i = 10; i < 250; i += 10) {
- if (inw(port_addr) & USBPORTSC_CSC)
+ if (uhci_readw(uhci, port_addr) & USBPORTSC_CSC)
return;
udelay(10);
}
@@ -140,8 +139,8 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
int status;
for (port = 0; port < uhci->rh_numports; ++port) {
- port_addr = uhci->io_addr + USBPORTSC1 + 2 * port;
- status = inw(port_addr);
+ port_addr = USBPORTSC1 + 2 * port;
+ status = uhci_readw(uhci, port_addr);
if (unlikely(status & USBPORTSC_PR)) {
if (time_after_eq(jiffies, uhci->ports_timeout)) {
CLR_RH_PORTSTAT(USBPORTSC_PR);
@@ -149,9 +148,8 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
/* HP's server management chip requires
* a longer delay. */
- if (to_pci_dev(uhci_dev(uhci))->vendor ==
- PCI_VENDOR_ID_HP)
- wait_for_HP(port_addr);
+ if (uhci->wait_for_hp)
+ wait_for_HP(uhci, port_addr);
/* If the port was enabled before, turning
* reset on caused a port enable change.
@@ -168,6 +166,8 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
set_bit(port, &uhci->resuming_ports);
uhci->ports_timeout = jiffies +
msecs_to_jiffies(25);
+ usb_hcd_start_port_resume(
+ &uhci_to_hcd(uhci)->self, port);
/* Make sure we see the port again
* after the resuming period is over. */
@@ -197,11 +197,12 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
status = get_hub_status_data(uhci, buf);
switch (uhci->rh_state) {
- case UHCI_RH_SUSPENDING:
case UHCI_RH_SUSPENDED:
/* if port change, ask to be resumed */
- if (status || uhci->resuming_ports)
+ if (status || uhci->resuming_ports) {
+ status = 1;
usb_hcd_resume_root_hub(hcd);
+ }
break;
case UHCI_RH_AUTO_STOPPED:
@@ -222,7 +223,8 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
/* auto-stop if nothing connected for 1 second */
if (any_ports_active(uhci))
uhci->rh_state = UHCI_RH_RUNNING;
- else if (time_after_eq(jiffies, uhci->auto_stop_time))
+ else if (time_after_eq(jiffies, uhci->auto_stop_time) &&
+ !uhci->wait_for_hp)
suspend_rh(uhci, UHCI_RH_AUTO_STOPPED);
break;
@@ -240,9 +242,9 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
- int status, lstatus, retval = 0, len = 0;
+ int status, lstatus, retval = 0;
unsigned int port = wIndex - 1;
- unsigned long port_addr = uhci->io_addr + USBPORTSC1 + 2 * port;
+ unsigned long port_addr = USBPORTSC1 + 2 * port;
u16 wPortChange, wPortStatus;
unsigned long flags;
@@ -254,20 +256,20 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case GetHubStatus:
*(__le32 *)buf = cpu_to_le32(0);
- OK(4); /* hub power */
+ retval = 4; /* hub power */
+ break;
case GetPortStatus:
if (port >= uhci->rh_numports)
goto err;
uhci_check_ports(uhci);
- status = inw(port_addr);
+ status = uhci_readw(uhci, port_addr);
/* Intel controllers report the OverCurrent bit active on.
* VIA controllers report it active off, so we'll adjust the
* bit value. (It's not standardized in the UHCI spec.)
*/
- if (to_pci_dev(hcd->self.controller)->vendor ==
- PCI_VENDOR_ID_VIA)
+ if (uhci->oc_low)
status ^= USBPORTSC_OC;
/* UHCI doesn't support C_RESET (always false) */
@@ -308,13 +310,14 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
*(__le16 *)buf = cpu_to_le16(wPortStatus);
*(__le16 *)(buf + 2) = cpu_to_le16(wPortChange);
- OK(4);
+ retval = 4;
+ break;
case SetHubFeature: /* We don't implement these */
case ClearHubFeature:
switch (wValue) {
case C_HUB_OVER_CURRENT:
case C_HUB_LOCAL_POWER:
- OK(0);
+ break;
default:
goto err;
}
@@ -326,7 +329,7 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
SET_RH_PORTSTAT(USBPORTSC_SUSP);
- OK(0);
+ break;
case USB_PORT_FEAT_RESET:
SET_RH_PORTSTAT(USBPORTSC_PR);
@@ -335,10 +338,10 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* USB v2.0 7.1.7.5 */
uhci->ports_timeout = jiffies + msecs_to_jiffies(50);
- OK(0);
+ break;
case USB_PORT_FEAT_POWER:
/* UHCI has no power switching */
- OK(0);
+ break;
default:
goto err;
}
@@ -353,12 +356,12 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* Disable terminates Resume signalling */
uhci_finish_suspend(uhci, port, port_addr);
- OK(0);
+ break;
case USB_PORT_FEAT_C_ENABLE:
CLR_RH_PORTSTAT(USBPORTSC_PEC);
- OK(0);
+ break;
case USB_PORT_FEAT_SUSPEND:
- if (!(inw(port_addr) & USBPORTSC_SUSP)) {
+ if (!(uhci_readw(uhci, port_addr) & USBPORTSC_SUSP)) {
/* Make certain the port isn't suspended */
uhci_finish_suspend(uhci, port, port_addr);
@@ -370,7 +373,8 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* if the port is disabled. When this happens
* just skip the Resume signalling.
*/
- if (!(inw(port_addr) & USBPORTSC_RD))
+ if (!(uhci_readw(uhci, port_addr) &
+ USBPORTSC_RD))
uhci_finish_suspend(uhci, port,
port_addr);
else
@@ -378,32 +382,32 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
uhci->ports_timeout = jiffies +
msecs_to_jiffies(20);
}
- OK(0);
+ break;
case USB_PORT_FEAT_C_SUSPEND:
clear_bit(port, &uhci->port_c_suspend);
- OK(0);
+ break;
case USB_PORT_FEAT_POWER:
/* UHCI has no power switching */
goto err;
case USB_PORT_FEAT_C_CONNECTION:
CLR_RH_PORTSTAT(USBPORTSC_CSC);
- OK(0);
+ break;
case USB_PORT_FEAT_C_OVER_CURRENT:
CLR_RH_PORTSTAT(USBPORTSC_OCC);
- OK(0);
+ break;
case USB_PORT_FEAT_C_RESET:
/* this driver won't report these */
- OK(0);
+ break;
default:
goto err;
}
break;
case GetHubDescriptor:
- len = min_t(unsigned int, sizeof(root_hub_hub_des), wLength);
- memcpy(buf, root_hub_hub_des, len);
- if (len > 2)
+ retval = min_t(unsigned int, sizeof(root_hub_hub_des), wLength);
+ memcpy(buf, root_hub_hub_des, retval);
+ if (retval > 2)
buf[2] = uhci->rh_numports;
- OK(len);
+ break;
default:
err:
retval = -EPIPE;
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
new file mode 100644
index 00000000000..940304c3322
--- /dev/null
+++ b/drivers/usb/host/uhci-pci.c
@@ -0,0 +1,306 @@
+/*
+ * UHCI HCD (Host Controller Driver) PCI Bus Glue.
+ *
+ * Extracted from uhci-hcd.c:
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
+ *
+ * (C) Copyright 1999 Linus Torvalds
+ * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
+ * (C) Copyright 1999 Randy Dunlap
+ * (C) Copyright 1999 Georg Acher, acher@in.tum.de
+ * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
+ * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
+ * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
+ * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
+ * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
+ * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
+ * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
+ */
+
+#include "pci-quirks.h"
+
+/*
+ * Make sure the controller is completely inactive, unable to
+ * generate interrupts or do DMA.
+ */
+static void uhci_pci_reset_hc(struct uhci_hcd *uhci)
+{
+ uhci_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr);
+}
+
+/*
+ * Initialize a controller that was newly discovered or has just been
+ * resumed. In either case we can't be sure of its previous state.
+ *
+ * Returns: 1 if the controller was reset, 0 otherwise.
+ */
+static int uhci_pci_check_and_reset_hc(struct uhci_hcd *uhci)
+{
+ return uhci_check_and_reset_hc(to_pci_dev(uhci_dev(uhci)),
+ uhci->io_addr);
+}
+
+/*
+ * Store the basic register settings needed by the controller.
+ * This function is called at the end of configure_hc in uhci-hcd.c.
+ */
+static void uhci_pci_configure_hc(struct uhci_hcd *uhci)
+{
+ struct pci_dev *pdev = to_pci_dev(uhci_dev(uhci));
+
+ /* Enable PIRQ */
+ pci_write_config_word(pdev, USBLEGSUP, USBLEGSUP_DEFAULT);
+
+ /* Disable platform-specific non-PME# wakeup */
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ pci_write_config_byte(pdev, USBRES_INTEL, 0);
+}
+
+static int uhci_pci_resume_detect_interrupts_are_broken(struct uhci_hcd *uhci)
+{
+ int port;
+
+ switch (to_pci_dev(uhci_dev(uhci))->vendor) {
+ default:
+ break;
+
+ case PCI_VENDOR_ID_GENESYS:
+ /* Genesys Logic's GL880S controllers don't generate
+ * resume-detect interrupts.
+ */
+ return 1;
+
+ case PCI_VENDOR_ID_INTEL:
+ /* Some of Intel's USB controllers have a bug that causes
+ * resume-detect interrupts if any port has an over-current
+ * condition. To make matters worse, some motherboards
+ * hardwire unused USB ports' over-current inputs active!
+ * To prevent problems, we will not enable resume-detect
+ * interrupts if any ports are OC.
+ */
+ for (port = 0; port < uhci->rh_numports; ++port) {
+ if (inw(uhci->io_addr + USBPORTSC1 + port * 2) &
+ USBPORTSC_OC)
+ return 1;
+ }
+ break;
+ }
+ return 0;
+}
+
+static int uhci_pci_global_suspend_mode_is_broken(struct uhci_hcd *uhci)
+{
+ int port;
+ const char *sys_info;
+ static const char bad_Asus_board[] = "A7V8X";
+
+ /* One of Asus's motherboards has a bug which causes it to
+ * wake up immediately from suspend-to-RAM if any of the ports
+ * are connected. In such cases we will not set EGSM.
+ */
+ sys_info = dmi_get_system_info(DMI_BOARD_NAME);
+ if (sys_info && !strcmp(sys_info, bad_Asus_board)) {
+ for (port = 0; port < uhci->rh_numports; ++port) {
+ if (inw(uhci->io_addr + USBPORTSC1 + port * 2) &
+ USBPORTSC_CCS)
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int uhci_pci_init(struct usb_hcd *hcd)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+
+ uhci->io_addr = (unsigned long) hcd->rsrc_start;
+
+ uhci->rh_numports = uhci_count_ports(hcd);
+
+ /* Intel controllers report the OverCurrent bit active on.
+ * VIA controllers report it active off, so we'll adjust the
+ * bit value. (It's not standardized in the UHCI spec.)
+ */
+ if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA)
+ uhci->oc_low = 1;
+
+ /* HP's server management chip requires a longer port reset delay. */
+ if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP)
+ uhci->wait_for_hp = 1;
+
+ /* Set up pointers to PCI-specific functions */
+ uhci->reset_hc = uhci_pci_reset_hc;
+ uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc;
+ uhci->configure_hc = uhci_pci_configure_hc;
+ uhci->resume_detect_interrupts_are_broken =
+ uhci_pci_resume_detect_interrupts_are_broken;
+ uhci->global_suspend_mode_is_broken =
+ uhci_pci_global_suspend_mode_is_broken;
+
+
+ /* Kick BIOS off this hardware and reset if the controller
+ * isn't already safely quiescent.
+ */
+ check_and_reset_hc(uhci);
+ return 0;
+}
+
+/* Make sure the controller is quiescent and that we're not using it
+ * any more. This is mainly for the benefit of programs which, like kexec,
+ * expect the hardware to be idle: not doing DMA or generating IRQs.
+ *
+ * This routine may be called in a damaged or failing kernel. Hence we
+ * do not acquire the spinlock before shutting down the controller.
+ */
+static void uhci_shutdown(struct pci_dev *pdev)
+{
+ struct usb_hcd *hcd = pci_get_drvdata(pdev);
+
+ uhci_hc_died(hcd_to_uhci(hcd));
+}
+
+#ifdef CONFIG_PM
+
+static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated);
+
+static int uhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(uhci_dev(uhci));
+ int rc = 0;
+
+ dev_dbg(uhci_dev(uhci), "%s\n", __func__);
+
+ spin_lock_irq(&uhci->lock);
+ if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead)
+ goto done_okay; /* Already suspended or dead */
+
+ /* All PCI host controllers are required to disable IRQ generation
+ * at the source, so we must turn off PIRQ.
+ */
+ pci_write_config_word(pdev, USBLEGSUP, 0);
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+
+ /* Enable platform-specific non-PME# wakeup */
+ if (do_wakeup) {
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ pci_write_config_byte(pdev, USBRES_INTEL,
+ USBPORT1EN | USBPORT2EN);
+ }
+
+done_okay:
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ spin_unlock_irq(&uhci->lock);
+
+ synchronize_irq(hcd->irq);
+
+ /* Check for race with a wakeup request */
+ if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
+ uhci_pci_resume(hcd, false);
+ rc = -EBUSY;
+ }
+ return rc;
+}
+
+static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+
+ dev_dbg(uhci_dev(uhci), "%s\n", __func__);
+
+ /* Since we aren't in D3 any more, it's safe to set this flag
+ * even if the controller was dead.
+ */
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ spin_lock_irq(&uhci->lock);
+
+ /* Make sure resume from hibernation re-enumerates everything */
+ if (hibernated) {
+ uhci->reset_hc(uhci);
+ finish_reset(uhci);
+ }
+
+ /* The firmware may have changed the controller settings during
+ * a system wakeup. Check it and reconfigure to avoid problems.
+ */
+ else {
+ check_and_reset_hc(uhci);
+ }
+ configure_hc(uhci);
+
+ /* Tell the core if the controller had to be reset */
+ if (uhci->rh_state == UHCI_RH_RESET)
+ usb_root_hub_lost_power(hcd->self.root_hub);
+
+ spin_unlock_irq(&uhci->lock);
+
+ /* If interrupts don't work and remote wakeup is enabled then
+ * the suspended root hub needs to be polled.
+ */
+ if (!uhci->RD_enable && hcd->self.root_hub->do_remote_wakeup)
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+
+ /* Does the root hub have a port wakeup pending? */
+ usb_hcd_poll_rh_status(hcd);
+ return 0;
+}
+
+#endif
+
+static const struct hc_driver uhci_driver = {
+ .description = hcd_name,
+ .product_desc = "UHCI Host Controller",
+ .hcd_priv_size = sizeof(struct uhci_hcd),
+
+ /* Generic hardware linkage */
+ .irq = uhci_irq,
+ .flags = HCD_USB11,
+
+ /* Basic lifecycle operations */
+ .reset = uhci_pci_init,
+ .start = uhci_start,
+#ifdef CONFIG_PM
+ .pci_suspend = uhci_pci_suspend,
+ .pci_resume = uhci_pci_resume,
+ .bus_suspend = uhci_rh_suspend,
+ .bus_resume = uhci_rh_resume,
+#endif
+ .stop = uhci_stop,
+
+ .urb_enqueue = uhci_urb_enqueue,
+ .urb_dequeue = uhci_urb_dequeue,
+
+ .endpoint_disable = uhci_hcd_endpoint_disable,
+ .get_frame_number = uhci_hcd_get_frame_number,
+
+ .hub_status_data = uhci_hub_status_data,
+ .hub_control = uhci_hub_control,
+};
+
+static const struct pci_device_id uhci_pci_ids[] = { {
+ /* handle any USB UHCI controller */
+ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0),
+ .driver_data = (unsigned long) &uhci_driver,
+ }, { /* end: all zeroes */ }
+};
+
+MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
+
+static struct pci_driver uhci_pci_driver = {
+ .name = (char *)hcd_name,
+ .id_table = uhci_pci_ids,
+
+ .probe = usb_hcd_pci_probe,
+ .remove = usb_hcd_pci_remove,
+ .shutdown = uhci_shutdown,
+
+#ifdef CONFIG_PM
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
+ },
+#endif
+};
+
+MODULE_SOFTDEP("pre: ehci_pci");
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
new file mode 100644
index 00000000000..01833ab2b5c
--- /dev/null
+++ b/drivers/usb/host/uhci-platform.c
@@ -0,0 +1,165 @@
+/*
+ * Generic UHCI HCD (Host Controller Driver) for Platform Devices
+ *
+ * Copyright (c) 2011 Tony Prisk <linux@prisktech.co.nz>
+ *
+ * This file is based on uhci-grlib.c
+ * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
+ */
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static int uhci_platform_init(struct usb_hcd *hcd)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+
+ uhci->rh_numports = uhci_count_ports(hcd);
+
+ /* Set up pointers to to generic functions */
+ uhci->reset_hc = uhci_generic_reset_hc;
+ uhci->check_and_reset_hc = uhci_generic_check_and_reset_hc;
+
+ /* No special actions need to be taken for the functions below */
+ uhci->configure_hc = NULL;
+ uhci->resume_detect_interrupts_are_broken = NULL;
+ uhci->global_suspend_mode_is_broken = NULL;
+
+ /* Reset if the controller isn't already safely quiescent. */
+ check_and_reset_hc(uhci);
+ return 0;
+}
+
+static const struct hc_driver uhci_platform_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Generic UHCI Host Controller",
+ .hcd_priv_size = sizeof(struct uhci_hcd),
+
+ /* Generic hardware linkage */
+ .irq = uhci_irq,
+ .flags = HCD_MEMORY | HCD_USB11,
+
+ /* Basic lifecycle operations */
+ .reset = uhci_platform_init,
+ .start = uhci_start,
+#ifdef CONFIG_PM
+ .pci_suspend = NULL,
+ .pci_resume = NULL,
+ .bus_suspend = uhci_rh_suspend,
+ .bus_resume = uhci_rh_resume,
+#endif
+ .stop = uhci_stop,
+
+ .urb_enqueue = uhci_urb_enqueue,
+ .urb_dequeue = uhci_urb_dequeue,
+
+ .endpoint_disable = uhci_hcd_endpoint_disable,
+ .get_frame_number = uhci_hcd_get_frame_number,
+
+ .hub_status_data = uhci_hub_status_data,
+ .hub_control = uhci_hub_control,
+};
+
+static int uhci_hcd_platform_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct uhci_hcd *uhci;
+ struct resource *res;
+ int ret;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ /*
+ * Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev,
+ pdev->name);
+ if (!hcd)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+ pr_err("%s: request_mem_region failed\n", __func__);
+ ret = -EBUSY;
+ goto err_rmr;
+ }
+
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ pr_err("%s: ioremap failed\n", __func__);
+ ret = -ENOMEM;
+ goto err_irq;
+ }
+ uhci = hcd_to_uhci(hcd);
+
+ uhci->regs = hcd->regs;
+
+ ret = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED);
+ if (ret)
+ goto err_uhci;
+
+ device_wakeup_enable(hcd->self.controller);
+ return 0;
+
+err_uhci:
+ iounmap(hcd->regs);
+err_irq:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err_rmr:
+ usb_put_hcd(hcd);
+
+ return ret;
+}
+
+static int uhci_hcd_platform_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+/* Make sure the controller is quiescent and that we're not using it
+ * any more. This is mainly for the benefit of programs which, like kexec,
+ * expect the hardware to be idle: not doing DMA or generating IRQs.
+ *
+ * This routine may be called in a damaged or failing kernel. Hence we
+ * do not acquire the spinlock before shutting down the controller.
+ */
+static void uhci_hcd_platform_shutdown(struct platform_device *op)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(op);
+
+ uhci_hc_died(hcd_to_uhci(hcd));
+}
+
+static const struct of_device_id platform_uhci_ids[] = {
+ { .compatible = "generic-uhci", },
+ { .compatible = "platform-uhci", },
+ {}
+};
+
+static struct platform_driver uhci_platform_driver = {
+ .probe = uhci_hcd_platform_probe,
+ .remove = uhci_hcd_platform_remove,
+ .shutdown = uhci_hcd_platform_shutdown,
+ .driver = {
+ .name = "platform-uhci",
+ .owner = THIS_MODULE,
+ .of_match_table = platform_uhci_ids,
+ },
+};
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 2090b45eb60..da6f56d996c 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -29,12 +29,12 @@ static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
{
if (uhci->is_stopped)
mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
- uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
+ uhci->term_td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
}
static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
{
- uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
+ uhci->term_td->status &= ~cpu_to_hc32(uhci, TD_CTRL_IOC);
}
@@ -53,7 +53,7 @@ static void uhci_fsbr_on(struct uhci_hcd *uhci)
uhci->fsbr_is_on = 1;
lqh = list_entry(uhci->skel_async_qh->node.prev,
struct uhci_qh, node);
- lqh->link = LINK_TO_QH(uhci->skel_term_qh);
+ lqh->link = LINK_TO_QH(uhci, uhci->skel_term_qh);
}
static void uhci_fsbr_off(struct uhci_hcd *uhci)
@@ -65,7 +65,7 @@ static void uhci_fsbr_off(struct uhci_hcd *uhci)
uhci->fsbr_is_on = 0;
lqh = list_entry(uhci->skel_async_qh->node.prev,
struct uhci_qh, node);
- lqh->link = UHCI_PTR_TERM;
+ lqh->link = UHCI_PTR_TERM(uhci);
}
static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
@@ -131,12 +131,12 @@ static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
dma_pool_free(uhci->td_pool, td, td->dma_handle);
}
-static inline void uhci_fill_td(struct uhci_td *td, u32 status,
- u32 token, u32 buffer)
+static inline void uhci_fill_td(struct uhci_hcd *uhci, struct uhci_td *td,
+ u32 status, u32 token, u32 buffer)
{
- td->status = cpu_to_le32(status);
- td->token = cpu_to_le32(token);
- td->buffer = cpu_to_le32(buffer);
+ td->status = cpu_to_hc32(uhci, status);
+ td->token = cpu_to_hc32(uhci, token);
+ td->buffer = cpu_to_hc32(uhci, buffer);
}
static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
@@ -170,11 +170,11 @@ static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
td->link = ltd->link;
wmb();
- ltd->link = LINK_TO_TD(td);
+ ltd->link = LINK_TO_TD(uhci, td);
} else {
td->link = uhci->frame[framenum];
wmb();
- uhci->frame[framenum] = LINK_TO_TD(td);
+ uhci->frame[framenum] = LINK_TO_TD(uhci, td);
uhci->frame_cpu[framenum] = td;
}
}
@@ -195,8 +195,10 @@ static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
} else {
struct uhci_td *ntd;
- ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
- uhci->frame[td->frame] = LINK_TO_TD(ntd);
+ ntd = list_entry(td->fl_list.next,
+ struct uhci_td,
+ fl_list);
+ uhci->frame[td->frame] = LINK_TO_TD(uhci, ntd);
uhci->frame_cpu[td->frame] = ntd;
}
} else {
@@ -253,8 +255,8 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
memset(qh, 0, sizeof(*qh));
qh->dma_handle = dma_handle;
- qh->element = UHCI_PTR_TERM;
- qh->link = UHCI_PTR_TERM;
+ qh->element = UHCI_PTR_TERM(uhci);
+ qh->link = UHCI_PTR_TERM(uhci);
INIT_LIST_HEAD(&qh->queue);
INIT_LIST_HEAD(&qh->node);
@@ -278,7 +280,7 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
qh->load = usb_calc_bus_time(udev->speed,
usb_endpoint_dir_in(&hep->desc),
qh->type == USB_ENDPOINT_XFER_ISOC,
- le16_to_cpu(hep->desc.wMaxPacketSize))
+ usb_endpoint_maxp(&hep->desc))
/ 1000 + 1;
} else { /* Skeleton QH */
@@ -346,9 +348,9 @@ static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
/* If the QH element pointer is UHCI_PTR_TERM then then currently
* executing URB has already been unlinked, so this one isn't it. */
- if (qh_element(qh) == UHCI_PTR_TERM)
+ if (qh_element(qh) == UHCI_PTR_TERM(uhci))
goto done;
- qh->element = UHCI_PTR_TERM;
+ qh->element = UHCI_PTR_TERM(uhci);
/* Control pipes don't have to worry about toggles */
if (qh->type == USB_ENDPOINT_XFER_CONTROL)
@@ -358,7 +360,7 @@ static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
WARN_ON(list_empty(&urbp->td_list));
td = list_entry(urbp->td_list.next, struct uhci_td, list);
qh->needs_fixup = 1;
- qh->initial_toggle = uhci_toggle(td_token(td));
+ qh->initial_toggle = uhci_toggle(td_token(uhci, td));
done:
return ret;
@@ -368,7 +370,8 @@ done:
* Fix up the data toggles for URBs in a queue, when one of them
* terminates early (short transfer, error, or dequeued).
*/
-static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
+static void uhci_fixup_toggles(struct uhci_hcd *uhci, struct uhci_qh *qh,
+ int skip_first)
{
struct urb_priv *urbp = NULL;
struct uhci_td *td;
@@ -382,7 +385,7 @@ static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
/* When starting with the first URB, if the QH element pointer is
* still valid then we know the URB's toggles are okay. */
- else if (qh_element(qh) != UHCI_PTR_TERM)
+ else if (qh_element(qh) != UHCI_PTR_TERM(uhci))
toggle = 2;
/* Fix up the toggle for the URBs in the queue. Normally this
@@ -394,15 +397,15 @@ static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
/* If the first TD has the right toggle value, we don't
* need to change any toggles in this URB */
td = list_entry(urbp->td_list.next, struct uhci_td, list);
- if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
+ if (toggle > 1 || uhci_toggle(td_token(uhci, td)) == toggle) {
td = list_entry(urbp->td_list.prev, struct uhci_td,
list);
- toggle = uhci_toggle(td_token(td)) ^ 1;
+ toggle = uhci_toggle(td_token(uhci, td)) ^ 1;
/* Otherwise all the toggles in the URB have to be switched */
} else {
list_for_each_entry(td, &urbp->td_list, list) {
- td->token ^= cpu_to_le32(
+ td->token ^= cpu_to_hc32(uhci,
TD_TOKEN_TOGGLE);
toggle ^= 1;
}
@@ -439,7 +442,7 @@ static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
pqh = list_entry(qh->node.prev, struct uhci_qh, node);
qh->link = pqh->link;
wmb();
- pqh->link = LINK_TO_QH(qh);
+ pqh->link = LINK_TO_QH(uhci, qh);
}
/*
@@ -449,7 +452,7 @@ static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
struct uhci_qh *pqh;
- __le32 link_to_new_qh;
+ __hc32 link_to_new_qh;
/* Find the predecessor QH for our new one and insert it in the list.
* The list of QHs is expected to be short, so linear search won't
@@ -463,7 +466,7 @@ static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
/* Link it into the schedule */
qh->link = pqh->link;
wmb();
- link_to_new_qh = LINK_TO_QH(qh);
+ link_to_new_qh = LINK_TO_QH(uhci, qh);
pqh->link = link_to_new_qh;
/* If this is now the first FSBR QH, link the terminating skeleton
@@ -481,13 +484,13 @@ static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
/* Set the element pointer if it isn't set already.
* This isn't needed for Isochronous queues, but it doesn't hurt. */
- if (qh_element(qh) == UHCI_PTR_TERM) {
+ if (qh_element(qh) == UHCI_PTR_TERM(uhci)) {
struct urb_priv *urbp = list_entry(qh->queue.next,
struct urb_priv, node);
struct uhci_td *td = list_entry(urbp->td_list.next,
struct uhci_td, list);
- qh->element = LINK_TO_TD(td);
+ qh->element = LINK_TO_TD(uhci, td);
}
/* Treat the queue as if it has just advanced */
@@ -531,7 +534,7 @@ static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
struct uhci_qh *pqh;
- __le32 link_to_next_qh = qh->link;
+ __hc32 link_to_next_qh = qh->link;
pqh = list_entry(qh->node.prev, struct uhci_qh, node);
pqh->link = link_to_next_qh;
@@ -728,7 +731,7 @@ static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
urbp->urb = urb;
urb->hcpriv = urbp;
-
+
INIT_LIST_HEAD(&urbp->node);
INIT_LIST_HEAD(&urbp->td_list);
@@ -755,8 +758,8 @@ static void uhci_free_urb_priv(struct uhci_hcd *uhci,
/*
* Map status to standard result codes
*
- * <status> is (td_status(td) & 0xF60000), a.k.a.
- * uhci_status_bits(td_status(td)).
+ * <status> is (td_status(uhci, td) & 0xF60000), a.k.a.
+ * uhci_status_bits(td_status(uhci, td)).
* Note: <status> does not include the TD_CTRL_NAK bit.
* <dir_out> is True for output TDs and False for input TDs.
*/
@@ -789,10 +792,10 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
{
struct uhci_td *td;
unsigned long destination, status;
- int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
+ int maxsze = usb_endpoint_maxp(&qh->hep->desc);
int len = urb->transfer_buffer_length;
dma_addr_t data = urb->transfer_dma;
- __le32 *plink;
+ __hc32 *plink;
struct urb_priv *urbp = urb->hcpriv;
int skel;
@@ -809,7 +812,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
*/
td = qh->dummy_td;
uhci_add_td_to_urbp(td, urbp);
- uhci_fill_td(td, status, destination | uhci_explen(8),
+ uhci_fill_td(uhci, td, status, destination | uhci_explen(8),
urb->setup_dma);
plink = &td->link;
status |= TD_CTRL_ACTIVE;
@@ -842,14 +845,14 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
td = uhci_alloc_td(uhci);
if (!td)
goto nomem;
- *plink = LINK_TO_TD(td);
+ *plink = LINK_TO_TD(uhci, td);
/* Alternate Data0/1 (start with Data1) */
destination ^= TD_TOKEN_TOGGLE;
-
+
uhci_add_td_to_urbp(td, urbp);
- uhci_fill_td(td, status, destination | uhci_explen(pktsze),
- data);
+ uhci_fill_td(uhci, td, status,
+ destination | uhci_explen(pktsze), data);
plink = &td->link;
data += pktsze;
@@ -857,19 +860,19 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
}
/*
- * Build the final TD for control status
+ * Build the final TD for control status
*/
td = uhci_alloc_td(uhci);
if (!td)
goto nomem;
- *plink = LINK_TO_TD(td);
+ *plink = LINK_TO_TD(uhci, td);
/* Change direction for the status transaction */
destination ^= (USB_PID_IN ^ USB_PID_OUT);
destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
uhci_add_td_to_urbp(td, urbp);
- uhci_fill_td(td, status | TD_CTRL_IOC,
+ uhci_fill_td(uhci, td, status | TD_CTRL_IOC,
destination | uhci_explen(0), 0);
plink = &td->link;
@@ -879,11 +882,11 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
td = uhci_alloc_td(uhci);
if (!td)
goto nomem;
- *plink = LINK_TO_TD(td);
+ *plink = LINK_TO_TD(uhci, td);
- uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
+ uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
wmb();
- qh->dummy_td->status |= cpu_to_le32(TD_CTRL_ACTIVE);
+ qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
qh->dummy_td = td;
/* Low-speed transfers get a different queue, and won't hog the bus.
@@ -915,11 +918,11 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
{
struct uhci_td *td;
unsigned long destination, status;
- int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
+ int maxsze = usb_endpoint_maxp(&qh->hep->desc);
int len = urb->transfer_buffer_length;
int this_sg_len;
dma_addr_t data;
- __le32 *plink;
+ __hc32 *plink;
struct urb_priv *urbp = urb->hcpriv;
unsigned int toggle;
struct scatterlist *sg;
@@ -940,7 +943,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
if (usb_pipein(urb->pipe))
status |= TD_CTRL_SPD;
- i = urb->num_sgs;
+ i = urb->num_mapped_sgs;
if (len > 0 && i > 0) {
sg = urb->sg;
data = sg_dma_address(sg);
@@ -972,10 +975,10 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
td = uhci_alloc_td(uhci);
if (!td)
goto nomem;
- *plink = LINK_TO_TD(td);
+ *plink = LINK_TO_TD(uhci, td);
}
uhci_add_td_to_urbp(td, urbp);
- uhci_fill_td(td, status,
+ uhci_fill_td(uhci, td, status,
destination | uhci_explen(pktsze) |
(toggle << TD_TOKEN_TOGGLE_SHIFT),
data);
@@ -1008,10 +1011,10 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
td = uhci_alloc_td(uhci);
if (!td)
goto nomem;
- *plink = LINK_TO_TD(td);
+ *plink = LINK_TO_TD(uhci, td);
uhci_add_td_to_urbp(td, urbp);
- uhci_fill_td(td, status,
+ uhci_fill_td(uhci, td, status,
destination | uhci_explen(0) |
(toggle << TD_TOKEN_TOGGLE_SHIFT),
data);
@@ -1026,7 +1029,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
* fast side but not enough to justify delaying an interrupt
* more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
* flag setting. */
- td->status |= cpu_to_le32(TD_CTRL_IOC);
+ td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
/*
* Build the new dummy TD and activate the old one
@@ -1034,11 +1037,11 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
td = uhci_alloc_td(uhci);
if (!td)
goto nomem;
- *plink = LINK_TO_TD(td);
+ *plink = LINK_TO_TD(uhci, td);
- uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
+ uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
wmb();
- qh->dummy_td->status |= cpu_to_le32(TD_CTRL_ACTIVE);
+ qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
qh->dummy_td = td;
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
@@ -1131,7 +1134,7 @@ static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
* the queue at the status stage transaction, which is
* the last TD. */
WARN_ON(list_empty(&urbp->td_list));
- qh->element = LINK_TO_TD(td);
+ qh->element = LINK_TO_TD(uhci, td);
tmp = td->list.prev;
ret = -EINPROGRESS;
@@ -1140,8 +1143,9 @@ static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
/* When a bulk/interrupt transfer is short, we have to
* fix up the toggles of the following URBs on the queue
* before restarting the queue at the next URB. */
- qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1;
- uhci_fixup_toggles(qh, 1);
+ qh->initial_toggle =
+ uhci_toggle(td_token(uhci, qh->post_td)) ^ 1;
+ uhci_fixup_toggles(uhci, qh, 1);
if (list_empty(&urbp->td_list))
td = qh->post_td;
@@ -1176,7 +1180,7 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
unsigned int ctrlstat;
int len;
- ctrlstat = td_status(td);
+ ctrlstat = td_status(uhci, td);
status = uhci_status_bits(ctrlstat);
if (status & TD_CTRL_ACTIVE)
return -EINPROGRESS;
@@ -1186,7 +1190,7 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
if (status) {
ret = uhci_map_status(status,
- uhci_packetout(td_token(td)));
+ uhci_packetout(td_token(uhci, td)));
if ((debug == 1 && ret != -EPIPE) || debug > 1) {
/* Some debugging code */
dev_dbg(&urb->dev->dev,
@@ -1196,13 +1200,13 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
if (debug > 1 && errbuf) {
/* Print the chain for debugging */
uhci_show_qh(uhci, urbp->qh, errbuf,
- ERRBUF_LEN, 0);
+ ERRBUF_LEN - EXTRA_SPACE, 0);
lprintk(errbuf);
}
}
/* Did we receive a short packet? */
- } else if (len < uhci_expected_length(td_token(td))) {
+ } else if (len < uhci_expected_length(td_token(uhci, td))) {
/* For control transfers, go to the status TD if
* this isn't already the last data TD */
@@ -1234,10 +1238,10 @@ err:
if (ret < 0) {
/* Note that the queue has stopped and save
* the next toggle value */
- qh->element = UHCI_PTR_TERM;
+ qh->element = UHCI_PTR_TERM(uhci);
qh->is_stopped = 1;
qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
- qh->initial_toggle = uhci_toggle(td_token(td)) ^
+ qh->initial_toggle = uhci_toggle(td_token(uhci, td)) ^
(ret == -EREMOTEIO);
} else /* Short packet received */
@@ -1252,7 +1256,8 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
struct uhci_qh *qh)
{
struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */
- int i, frame;
+ int i;
+ unsigned frame, next;
unsigned long destination, status;
struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
@@ -1261,37 +1266,29 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
urb->number_of_packets >= UHCI_NUMFRAMES)
return -EFBIG;
+ uhci_get_current_frame_number(uhci);
+
/* Check the period and figure out the starting frame number */
if (!qh->bandwidth_reserved) {
qh->period = urb->interval;
- if (urb->transfer_flags & URB_ISO_ASAP) {
- qh->phase = -1; /* Find the best phase */
- i = uhci_check_bandwidth(uhci, qh);
- if (i)
- return i;
-
- /* Allow a little time to allocate the TDs */
- uhci_get_current_frame_number(uhci);
- frame = uhci->frame_number + 10;
-
- /* Move forward to the first frame having the
- * correct phase */
- urb->start_frame = frame + ((qh->phase - frame) &
- (qh->period - 1));
- } else {
- i = urb->start_frame - uhci->last_iso_frame;
- if (i <= 0 || i >= UHCI_NUMFRAMES)
- return -EINVAL;
- qh->phase = urb->start_frame & (qh->period - 1);
- i = uhci_check_bandwidth(uhci, qh);
- if (i)
- return i;
- }
+ qh->phase = -1; /* Find the best phase */
+ i = uhci_check_bandwidth(uhci, qh);
+ if (i)
+ return i;
+
+ /* Allow a little time to allocate the TDs */
+ next = uhci->frame_number + 10;
+ frame = qh->phase;
+
+ /* Round up to the first available slot */
+ frame += (next - frame + qh->period - 1) & -qh->period;
} else if (qh->period != urb->interval) {
return -EINVAL; /* Can't change the period */
} else {
+ next = uhci->frame_number + 1;
+
/* Find the next unused frame */
if (list_empty(&qh->queue)) {
frame = qh->iso_frame;
@@ -1304,25 +1301,35 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
lurb->number_of_packets *
lurb->interval;
}
- if (urb->transfer_flags & URB_ISO_ASAP) {
- /* Skip some frames if necessary to insure
- * the start frame is in the future.
+
+ /* Fell behind? */
+ if (!uhci_frame_before_eq(next, frame)) {
+
+ /* USB_ISO_ASAP: Round up to the first available slot */
+ if (urb->transfer_flags & URB_ISO_ASAP)
+ frame += (next - frame + qh->period - 1) &
+ -qh->period;
+
+ /*
+ * Not ASAP: Use the next slot in the stream,
+ * no matter what.
*/
- uhci_get_current_frame_number(uhci);
- if (uhci_frame_before_eq(frame, uhci->frame_number)) {
- frame = uhci->frame_number + 1;
- frame += ((qh->phase - frame) &
- (qh->period - 1));
- }
- } /* Otherwise pick up where the last URB leaves off */
- urb->start_frame = frame;
+ else if (!uhci_frame_before_eq(next,
+ frame + (urb->number_of_packets - 1) *
+ qh->period))
+ dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
+ urb, frame,
+ (urb->number_of_packets - 1) *
+ qh->period,
+ next);
+ }
}
/* Make sure we won't have to go too far into the future */
if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
- urb->start_frame + urb->number_of_packets *
- urb->interval))
+ frame + urb->number_of_packets * urb->interval))
return -EFBIG;
+ urb->start_frame = frame;
status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
@@ -1333,14 +1340,14 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
return -ENOMEM;
uhci_add_td_to_urbp(td, urbp);
- uhci_fill_td(td, status, destination |
+ uhci_fill_td(uhci, td, status, destination |
uhci_explen(urb->iso_frame_desc[i].length),
urb->transfer_dma +
urb->iso_frame_desc[i].offset);
}
/* Set the interrupt-on-completion flag on the last packet. */
- td->status |= cpu_to_le32(TD_CTRL_IOC);
+ td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
/* Add the TDs to the frame list */
frame = urb->start_frame;
@@ -1376,7 +1383,7 @@ static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
uhci_remove_tds_from_frame(uhci, qh->iso_frame);
- ctrlstat = td_status(td);
+ ctrlstat = td_status(uhci, td);
if (ctrlstat & TD_CTRL_ACTIVE) {
status = -EXDEV; /* TD was added too late? */
} else {
@@ -1627,7 +1634,7 @@ restart:
* queue, the QH can now be re-activated. */
if (!list_empty(&qh->queue)) {
if (qh->needs_fixup)
- uhci_fixup_toggles(qh, 0);
+ uhci_fixup_toggles(uhci, qh, 0);
/* If the first URB on the queue wants FSBR but its time
* limit has expired, set the next TD to interrupt on
@@ -1637,7 +1644,7 @@ restart:
struct uhci_td *td = list_entry(urbp->td_list.next,
struct uhci_td, list);
- td->status |= __cpu_to_le32(TD_CTRL_IOC);
+ td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
}
uhci_activate_qh(uhci, qh);
@@ -1684,7 +1691,7 @@ static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
} else {
urbp = list_entry(qh->queue.next, struct urb_priv, node);
td = list_entry(urbp->td_list.next, struct uhci_td, list);
- status = td_status(td);
+ status = td_status(uhci, td);
if (!(status & TD_CTRL_ACTIVE)) {
/* We're okay, the queue has advanced */
@@ -1702,7 +1709,8 @@ static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
/* Detect the Intel bug and work around it */
- if (qh->post_td && qh_element(qh) == LINK_TO_TD(qh->post_td)) {
+ if (qh->post_td && qh_element(qh) ==
+ LINK_TO_TD(uhci, qh->post_td)) {
qh->element = qh->post_td->link;
qh->advance_jiffies = jiffies;
ret = 1;
diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c
index 767af265e00..ba61dae9e4d 100644
--- a/drivers/usb/host/whci/debug.c
+++ b/drivers/usb/host/whci/debug.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
#include "../../wusbcore/wusbhc.h"
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
index 72b6892fda6..d7b363a418d 100644
--- a/drivers/usb/host/whci/hcd.c
+++ b/drivers/usb/host/whci/hcd.c
@@ -17,6 +17,7 @@
*/
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/uwb/umc.h>
#include "../../wusbcore/wusbhc.h"
@@ -133,7 +134,7 @@ static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
default:
ret = asl_urb_enqueue(whc, urb, mem_flags);
break;
- };
+ }
return ret;
}
@@ -159,7 +160,7 @@ static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status)
default:
ret = asl_urb_dequeue(whc, urb, status);
break;
- };
+ }
return ret;
}
@@ -230,23 +231,21 @@ static struct hc_driver whc_hc_driver = {
.hub_status_data = wusbhc_rh_status_data,
.hub_control = wusbhc_rh_control,
- .bus_suspend = wusbhc_rh_suspend,
- .bus_resume = wusbhc_rh_resume,
.start_port_reset = wusbhc_rh_start_port_reset,
};
static int whc_probe(struct umc_dev *umc)
{
- int ret = -ENOMEM;
+ int ret;
struct usb_hcd *usb_hcd;
- struct wusbhc *wusbhc = NULL;
- struct whc *whc = NULL;
+ struct wusbhc *wusbhc;
+ struct whc *whc;
struct device *dev = &umc->dev;
usb_hcd = usb_create_hcd(&whc_hc_driver, dev, "whci");
if (usb_hcd == NULL) {
dev_err(dev, "unable to create hcd\n");
- goto error;
+ return -ENOMEM;
}
usb_hcd->wireless = 1;
@@ -294,6 +293,7 @@ static int whc_probe(struct umc_dev *umc)
dev_err(dev, "cannot add HCD: %d\n", ret);
goto error_usb_add_hcd;
}
+ device_wakeup_enable(usb_hcd->self.controller);
ret = wusbhc_b_create(wusbhc);
if (ret) {
@@ -356,7 +356,7 @@ static void __exit whci_hc_driver_exit(void)
module_exit(whci_hc_driver_exit);
/* PCI device ID's that we handle (so it gets loaded) */
-static struct pci_device_id whci_hcd_id_table[] = {
+static struct pci_device_id __used whci_hcd_id_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
{ /* empty last entry */ }
};
diff --git a/drivers/usb/host/whci/init.c b/drivers/usb/host/whci/init.c
index f7582e8e216..d3e13b640d4 100644
--- a/drivers/usb/host/whci/init.c
+++ b/drivers/usb/host/whci/init.c
@@ -178,7 +178,7 @@ void whc_clean_up(struct whc *whc)
if (whc->qset_pool)
dma_pool_destroy(whc->qset_pool);
- len = whc->umc->resource.end - whc->umc->resource.start + 1;
+ len = resource_size(&whc->umc->resource);
if (whc->base)
iounmap(whc->base);
if (whc->base_phys)
diff --git a/drivers/usb/host/whci/int.c b/drivers/usb/host/whci/int.c
index 6aae7002810..0c086b2790d 100644
--- a/drivers/usb/host/whci/int.c
+++ b/drivers/usb/host/whci/int.c
@@ -16,7 +16,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/uwb/umc.h>
#include "../../wusbcore/wusbhc.h"
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index dc0ab8382f5..dc31c425ce0 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -124,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset)
{
qset->td_start = qset->td_end = qset->ntds = 0;
- qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
+ qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
qset->qh.err_count = 0;
qset->qh.scratch[0] = 0;
@@ -436,14 +436,14 @@ static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *u
int i;
int ntds = 0;
struct whc_std *std = NULL;
- struct whc_page_list_entry *entry;
+ struct whc_page_list_entry *new_pl_virt;
dma_addr_t prev_end = 0;
size_t pl_len;
int p = 0;
remaining = urb->transfer_buffer_length;
- for_each_sg(urb->sg, sg, urb->num_sgs, i) {
+ for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
dma_addr_t dma_addr;
size_t dma_remaining;
dma_addr_t sp, ep;
@@ -508,12 +508,15 @@ static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *u
pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
- std->pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
- if (std->pl_virt == NULL) {
+ new_pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
+ if (new_pl_virt == NULL) {
+ kfree(std->pl_virt);
+ std->pl_virt = NULL;
return -ENOMEM;
}
+ std->pl_virt = new_pl_virt;
- for (;p < std->num_pointers; p++, entry++) {
+ for (;p < std->num_pointers; p++) {
std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
}
@@ -561,7 +564,7 @@ static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
remaining = urb->transfer_buffer_length;
- for_each_sg(urb->sg, sg, urb->num_sgs, i) {
+ for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
size_t len;
size_t sg_remaining;
void *orig;
@@ -739,7 +742,7 @@ static int get_urb_status_from_qtd(struct urb *urb, u32 status)
* process_inactive_qtd - process an inactive (but not halted) qTD.
*
* Update the urb with the transfer bytes from the qTD, if the urb is
- * completely transfered or (in the case of an IN only) the LPF is
+ * completely transferred or (in the case of an IN only) the LPF is
* set, then the transfer is complete and the urb should be returned
* to the system.
*/
diff --git a/drivers/usb/host/whci/wusb.c b/drivers/usb/host/whci/wusb.c
index f24efdebad1..8d276268286 100644
--- a/drivers/usb/host/whci/wusb.c
+++ b/drivers/usb/host/whci/wusb.c
@@ -16,7 +16,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/uwb/umc.h>
#include "../../wusbcore/wusbhc.h"
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index fcbf4abbf38..eb009a457fb 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -32,7 +32,7 @@ void xhci_dbg_regs(struct xhci_hcd *xhci)
xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
xhci->cap_regs);
- temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+ temp = readl(&xhci->cap_regs->hc_capbase);
xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
&xhci->cap_regs->hc_capbase, temp);
xhci_dbg(xhci, "// CAPLENGTH: 0x%x\n",
@@ -44,13 +44,13 @@ void xhci_dbg_regs(struct xhci_hcd *xhci)
xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
- temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
+ temp = readl(&xhci->cap_regs->run_regs_off);
xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
&xhci->cap_regs->run_regs_off,
(unsigned int) temp & RTSOFF_MASK);
xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
- temp = xhci_readl(xhci, &xhci->cap_regs->db_off);
+ temp = readl(&xhci->cap_regs->db_off);
xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
}
@@ -61,7 +61,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
- temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+ temp = readl(&xhci->cap_regs->hc_capbase);
xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
(unsigned int) temp);
xhci_dbg(xhci, "CAPLENGTH: 0x%x\n",
@@ -69,7 +69,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
xhci_dbg(xhci, "HCIVERSION: 0x%x\n",
(unsigned int) HC_VERSION(temp));
- temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
+ temp = readl(&xhci->cap_regs->hcs_params1);
xhci_dbg(xhci, "HCSPARAMS 1: 0x%x\n",
(unsigned int) temp);
xhci_dbg(xhci, " Max device slots: %u\n",
@@ -79,7 +79,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
xhci_dbg(xhci, " Max ports: %u\n",
(unsigned int) HCS_MAX_PORTS(temp));
- temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
+ temp = readl(&xhci->cap_regs->hcs_params2);
xhci_dbg(xhci, "HCSPARAMS 2: 0x%x\n",
(unsigned int) temp);
xhci_dbg(xhci, " Isoc scheduling threshold: %u\n",
@@ -87,7 +87,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
xhci_dbg(xhci, " Maximum allowed segments in event ring: %u\n",
(unsigned int) HCS_ERST_MAX(temp));
- temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
+ temp = readl(&xhci->cap_regs->hcs_params3);
xhci_dbg(xhci, "HCSPARAMS 3 0x%x:\n",
(unsigned int) temp);
xhci_dbg(xhci, " Worst case U1 device exit latency: %u\n",
@@ -95,14 +95,14 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
xhci_dbg(xhci, " Worst case U2 device exit latency: %u\n",
(unsigned int) HCS_U2_LATENCY(temp));
- temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+ temp = readl(&xhci->cap_regs->hcc_params);
xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
xhci_dbg(xhci, " HC generates %s bit addresses\n",
HCC_64BIT_ADDR(temp) ? "64" : "32");
/* FIXME */
xhci_dbg(xhci, " FIXME: more HCCPARAMS debugging\n");
- temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
+ temp = readl(&xhci->cap_regs->run_regs_off);
xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
}
@@ -110,7 +110,7 @@ static void xhci_print_command_reg(struct xhci_hcd *xhci)
{
u32 temp;
- temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp = readl(&xhci->op_regs->command);
xhci_dbg(xhci, "USBCMD 0x%x:\n", temp);
xhci_dbg(xhci, " HC is %s\n",
(temp & CMD_RUN) ? "running" : "being stopped");
@@ -119,7 +119,7 @@ static void xhci_print_command_reg(struct xhci_hcd *xhci)
xhci_dbg(xhci, " Event Interrupts %s\n",
(temp & CMD_EIE) ? "enabled " : "disabled");
xhci_dbg(xhci, " Host System Error Interrupts %s\n",
- (temp & CMD_EIE) ? "enabled " : "disabled");
+ (temp & CMD_HSEIE) ? "enabled " : "disabled");
xhci_dbg(xhci, " HC has %sfinished light reset\n",
(temp & CMD_LRESET) ? "not " : "");
}
@@ -128,7 +128,7 @@ static void xhci_print_status(struct xhci_hcd *xhci)
{
u32 temp;
- temp = xhci_readl(xhci, &xhci->op_regs->status);
+ temp = readl(&xhci->op_regs->status);
xhci_dbg(xhci, "USBSTS 0x%x:\n", temp);
xhci_dbg(xhci, " Event ring is %sempty\n",
(temp & STS_EINT) ? "not " : "");
@@ -147,7 +147,7 @@ static void xhci_print_op_regs(struct xhci_hcd *xhci)
static void xhci_print_ports(struct xhci_hcd *xhci)
{
- u32 __iomem *addr;
+ __le32 __iomem *addr;
int i, j;
int ports;
char *names[NUM_PORT_REGS] = {
@@ -163,20 +163,21 @@ static void xhci_print_ports(struct xhci_hcd *xhci)
for (j = 0; j < NUM_PORT_REGS; ++j) {
xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
addr, names[j],
- (unsigned int) xhci_readl(xhci, addr));
+ (unsigned int) readl(addr));
addr++;
}
}
}
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
{
- void *addr;
+ struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
+ void __iomem *addr;
u32 temp;
u64 temp_64;
addr = &ir_set->irq_pending;
- temp = xhci_readl(xhci, addr);
+ temp = readl(addr);
if (temp == XHCI_INIT_VALUE)
return;
@@ -186,17 +187,17 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int
(unsigned int)temp);
addr = &ir_set->irq_control;
- temp = xhci_readl(xhci, addr);
+ temp = readl(addr);
xhci_dbg(xhci, " %p: ir_set.control = 0x%x\n", addr,
(unsigned int)temp);
addr = &ir_set->erst_size;
- temp = xhci_readl(xhci, addr);
+ temp = readl(addr);
xhci_dbg(xhci, " %p: ir_set.erst_size = 0x%x\n", addr,
(unsigned int)temp);
addr = &ir_set->rsvd;
- temp = xhci_readl(xhci, addr);
+ temp = readl(addr);
if (temp != XHCI_INIT_VALUE)
xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n",
addr, (unsigned int)temp);
@@ -218,12 +219,12 @@ void xhci_print_run_regs(struct xhci_hcd *xhci)
int i;
xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
- temp = xhci_readl(xhci, &xhci->run_regs->microframe_index);
+ temp = readl(&xhci->run_regs->microframe_index);
xhci_dbg(xhci, " %p: Microframe index = 0x%x\n",
&xhci->run_regs->microframe_index,
(unsigned int) temp);
for (i = 0; i < 7; ++i) {
- temp = xhci_readl(xhci, &xhci->run_regs->rsvd[i]);
+ temp = readl(&xhci->run_regs->rsvd[i]);
if (temp != XHCI_INIT_VALUE)
xhci_dbg(xhci, " WARN: %p: Rsvd[%i] = 0x%x\n",
&xhci->run_regs->rsvd[i],
@@ -252,27 +253,27 @@ void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
{
u64 address;
- u32 type = xhci_readl(xhci, &trb->link.control) & TRB_TYPE_BITMASK;
+ u32 type = le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK;
switch (type) {
case TRB_TYPE(TRB_LINK):
xhci_dbg(xhci, "Link TRB:\n");
xhci_print_trb_offsets(xhci, trb);
- address = trb->link.segment_ptr;
+ address = le64_to_cpu(trb->link.segment_ptr);
xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
xhci_dbg(xhci, "Interrupter target = 0x%x\n",
- GET_INTR_TARGET(trb->link.intr_target));
+ GET_INTR_TARGET(le32_to_cpu(trb->link.intr_target)));
xhci_dbg(xhci, "Cycle bit = %u\n",
- (unsigned int) (trb->link.control & TRB_CYCLE));
+ le32_to_cpu(trb->link.control) & TRB_CYCLE);
xhci_dbg(xhci, "Toggle cycle bit = %u\n",
- (unsigned int) (trb->link.control & LINK_TOGGLE));
+ le32_to_cpu(trb->link.control) & LINK_TOGGLE);
xhci_dbg(xhci, "No Snoop bit = %u\n",
- (unsigned int) (trb->link.control & TRB_NO_SNOOP));
+ le32_to_cpu(trb->link.control) & TRB_NO_SNOOP);
break;
case TRB_TYPE(TRB_TRANSFER):
- address = trb->trans_event.buffer;
+ address = le64_to_cpu(trb->trans_event.buffer);
/*
* FIXME: look at flags to figure out if it's an address or if
* the data is directly in the buffer field.
@@ -280,11 +281,12 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
break;
case TRB_TYPE(TRB_COMPLETION):
- address = trb->event_cmd.cmd_trb;
+ address = le64_to_cpu(trb->event_cmd.cmd_trb);
xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
xhci_dbg(xhci, "Completion status = %u\n",
- (unsigned int) GET_COMP_CODE(trb->event_cmd.status));
- xhci_dbg(xhci, "Flags = 0x%x\n", (unsigned int) trb->event_cmd.flags);
+ GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status)));
+ xhci_dbg(xhci, "Flags = 0x%x\n",
+ le32_to_cpu(trb->event_cmd.flags));
break;
default:
xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
@@ -310,16 +312,16 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
{
int i;
- u32 addr = (u32) seg->dma;
+ u64 addr = seg->dma;
union xhci_trb *trb = seg->trbs;
for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
trb = &seg->trbs[i];
- xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
- lower_32_bits(trb->link.segment_ptr),
- upper_32_bits(trb->link.segment_ptr),
- (unsigned int) trb->link.intr_target,
- (unsigned int) trb->link.control);
+ xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", addr,
+ lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
+ upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
+ le32_to_cpu(trb->link.intr_target),
+ le32_to_cpu(trb->link.control));
addr += sizeof(*trb);
}
}
@@ -390,18 +392,18 @@ void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
{
- u32 addr = (u32) erst->erst_dma_addr;
+ u64 addr = erst->erst_dma_addr;
int i;
struct xhci_erst_entry *entry;
for (i = 0; i < erst->num_entries; ++i) {
entry = &erst->entries[i];
- xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
- (unsigned int) addr,
- lower_32_bits(entry->seg_addr),
- upper_32_bits(entry->seg_addr),
- (unsigned int) entry->seg_size,
- (unsigned int) entry->rsvd);
+ xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n",
+ addr,
+ lower_32_bits(le64_to_cpu(entry->seg_addr)),
+ upper_32_bits(le64_to_cpu(entry->seg_addr)),
+ le32_to_cpu(entry->seg_size),
+ le32_to_cpu(entry->rsvd));
addr += sizeof(*entry);
}
}
@@ -435,21 +437,21 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
{
struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
- switch (GET_SLOT_STATE(slot_ctx->dev_state)) {
- case 0:
+ switch (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state))) {
+ case SLOT_STATE_ENABLED:
return "enabled/disabled";
- case 1:
+ case SLOT_STATE_DEFAULT:
return "default";
- case 2:
+ case SLOT_STATE_ADDRESSED:
return "addressed";
- case 3:
+ case SLOT_STATE_CONFIGURED:
return "configured";
default:
return "reserved";
}
}
-void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
+static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
{
/* Fields are 32 bits wide, DMA addresses are in bytes */
int field_size = 32 / 8;
@@ -488,7 +490,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
}
-void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
+static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
unsigned int last_ep)
{
@@ -501,11 +503,14 @@ void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
if (last_ep < 31)
last_ep_ctx = last_ep + 1;
for (i = 0; i < last_ep_ctx; ++i) {
+ unsigned int epaddr = xhci_get_endpoint_address(i);
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
dma_addr_t dma = ctx->dma +
((unsigned long)ep_ctx - (unsigned long)ctx->bytes);
- xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
+ xhci_dbg(xhci, "%s Endpoint %02d Context (ep_index %02d):\n",
+ usb_endpoint_out(epaddr) ? "OUT" : "IN",
+ epaddr & USB_ENDPOINT_NUMBER_MASK, i);
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
&ep_ctx->ep_info,
(unsigned long long)dma, ep_ctx->ep_info);
@@ -542,13 +547,17 @@ void xhci_dbg_ctx(struct xhci_hcd *xhci,
int i;
/* Fields are 32 bits wide, DMA addresses are in bytes */
int field_size = 32 / 8;
- struct xhci_slot_ctx *slot_ctx;
dma_addr_t dma = ctx->dma;
int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
if (ctx->type == XHCI_CTX_TYPE_INPUT) {
struct xhci_input_control_ctx *ctrl_ctx =
xhci_get_input_control_ctx(xhci, ctx);
+ if (!ctrl_ctx) {
+ xhci_warn(xhci, "Could not get input context, bad type.\n");
+ return;
+ }
+
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
&ctrl_ctx->drop_flags, (unsigned long long)dma,
ctrl_ctx->drop_flags);
@@ -568,7 +577,20 @@ void xhci_dbg_ctx(struct xhci_hcd *xhci,
dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
}
- slot_ctx = xhci_get_slot_ctx(xhci, ctx);
xhci_dbg_slot_ctx(xhci, ctx);
xhci_dbg_ep_ctx(xhci, ctx, last_ep);
}
+
+void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ xhci_dbg(xhci, "%pV\n", &vaf);
+ trace(&vaf);
+ va_end(args);
+}
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index 78c4edac1db..9fe3225e6c6 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -19,8 +19,8 @@
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/* Up to 16 microframes to halt an HC - one microframe is 125 microsectonds */
-#define XHCI_MAX_HALT_USEC (16*125)
+/* Up to 16 ms to halt an HC */
+#define XHCI_MAX_HALT_USEC (16*1000)
/* HC not running - set to 1 when run/stop bit is cleared. */
#define XHCI_STS_HALT (1<<0)
@@ -62,8 +62,16 @@
/* USB Legacy Support Control and Status Register - section 7.1.2 */
/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
#define XHCI_LEGACY_CONTROL_OFFSET (0x04)
-/* bits 1:2, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
-#define XHCI_LEGACY_DISABLE_SMI ((0x3 << 1) + (0xff << 5) + (0x7 << 17))
+/* bits 1:3, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
+#define XHCI_LEGACY_DISABLE_SMI ((0x7 << 1) + (0xff << 5) + (0x7 << 17))
+#define XHCI_LEGACY_SMI_EVENTS (0x7 << 29)
+
+/* USB 2.0 xHCI 0.96 L1C capability - section 7.2.2.1.3.2 */
+#define XHCI_L1C (1 << 16)
+
+/* USB 2.0 xHCI 1.0 hardware LMP capability - section 7.2.2.1.3.2 */
+#define XHCI_HLC (1 << 19)
+#define XHCI_BLC (1 << 20)
/* command register values to disable interrupts and halt the HC */
/* start/stop HC execution - do not write unless HC is halted*/
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index fef5a1f9d48..aa79e874904 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -20,49 +20,153 @@
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+
+#include <linux/slab.h>
+#include <linux/device.h>
#include <asm/unaligned.h>
#include "xhci.h"
+#include "xhci-trace.h"
#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
PORT_RC | PORT_PLC | PORT_PE)
-static void xhci_hub_descriptor(struct xhci_hcd *xhci,
- struct usb_hub_descriptor *desc)
+/* USB 3.0 BOS descriptor and a capability descriptor, combined */
+static u8 usb_bos_descriptor [] = {
+ USB_DT_BOS_SIZE, /* __u8 bLength, 5 bytes */
+ USB_DT_BOS, /* __u8 bDescriptorType */
+ 0x0F, 0x00, /* __le16 wTotalLength, 15 bytes */
+ 0x1, /* __u8 bNumDeviceCaps */
+ /* First device capability */
+ USB_DT_USB_SS_CAP_SIZE, /* __u8 bLength, 10 bytes */
+ USB_DT_DEVICE_CAPABILITY, /* Device Capability */
+ USB_SS_CAP_TYPE, /* bDevCapabilityType, SUPERSPEED_USB */
+ 0x00, /* bmAttributes, LTM off by default */
+ USB_5GBPS_OPERATION, 0x00, /* wSpeedsSupported, 5Gbps only */
+ 0x03, /* bFunctionalitySupport,
+ USB 3.0 speed only */
+ 0x00, /* bU1DevExitLat, set later. */
+ 0x00, 0x00 /* __le16 bU2DevExitLat, set later. */
+};
+
+
+static void xhci_common_hub_descriptor(struct xhci_hcd *xhci,
+ struct usb_hub_descriptor *desc, int ports)
{
- int ports;
u16 temp;
- ports = HCS_MAX_PORTS(xhci->hcs_params1);
-
- /* USB 3.0 hubs have a different descriptor, but we fake this for now */
- desc->bDescriptorType = 0x29;
desc->bPwrOn2PwrGood = 10; /* xhci section 5.4.9 says 20ms max */
desc->bHubContrCurrent = 0;
desc->bNbrPorts = ports;
- temp = 1 + (ports / 8);
- desc->bDescLength = 7 + 2 * temp;
-
- /* Why does core/hcd.h define bitmap? It's just confusing. */
- memset(&desc->DeviceRemovable[0], 0, temp);
- memset(&desc->DeviceRemovable[temp], 0xff, temp);
-
- /* Ugh, these should be #defines, FIXME */
- /* Using table 11-13 in USB 2.0 spec. */
temp = 0;
- /* Bits 1:0 - support port power switching, or power always on */
+ /* Bits 1:0 - support per-port power switching, or power always on */
if (HCC_PPC(xhci->hcc_params))
- temp |= 0x0001;
+ temp |= HUB_CHAR_INDV_PORT_LPSM;
else
- temp |= 0x0002;
+ temp |= HUB_CHAR_NO_LPSM;
/* Bit 2 - root hubs are not part of a compound device */
/* Bits 4:3 - individual port over current protection */
- temp |= 0x0008;
+ temp |= HUB_CHAR_INDV_PORT_OCPM;
/* Bits 6:5 - no TTs in root ports */
/* Bit 7 - no port indicators */
- desc->wHubCharacteristics = (__force __u16) cpu_to_le16(temp);
+ desc->wHubCharacteristics = cpu_to_le16(temp);
+}
+
+/* Fill in the USB 2.0 roothub descriptor */
+static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+ struct usb_hub_descriptor *desc)
+{
+ int ports;
+ u16 temp;
+ __u8 port_removable[(USB_MAXCHILDREN + 1 + 7) / 8];
+ u32 portsc;
+ unsigned int i;
+
+ ports = xhci->num_usb2_ports;
+
+ xhci_common_hub_descriptor(xhci, desc, ports);
+ desc->bDescriptorType = USB_DT_HUB;
+ temp = 1 + (ports / 8);
+ desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * temp;
+
+ /* The Device Removable bits are reported on a byte granularity.
+ * If the port doesn't exist within that byte, the bit is set to 0.
+ */
+ memset(port_removable, 0, sizeof(port_removable));
+ for (i = 0; i < ports; i++) {
+ portsc = readl(xhci->usb2_ports[i]);
+ /* If a device is removable, PORTSC reports a 0, same as in the
+ * hub descriptor DeviceRemovable bits.
+ */
+ if (portsc & PORT_DEV_REMOVE)
+ /* This math is hairy because bit 0 of DeviceRemovable
+ * is reserved, and bit 1 is for port 1, etc.
+ */
+ port_removable[(i + 1) / 8] |= 1 << ((i + 1) % 8);
+ }
+
+ /* ch11.h defines a hub descriptor that has room for USB_MAXCHILDREN
+ * ports on it. The USB 2.0 specification says that there are two
+ * variable length fields at the end of the hub descriptor:
+ * DeviceRemovable and PortPwrCtrlMask. But since we can have less than
+ * USB_MAXCHILDREN ports, we may need to use the DeviceRemovable array
+ * to set PortPwrCtrlMask bits. PortPwrCtrlMask must always be set to
+ * 0xFF, so we initialize the both arrays (DeviceRemovable and
+ * PortPwrCtrlMask) to 0xFF. Then we set the DeviceRemovable for each
+ * set of ports that actually exist.
+ */
+ memset(desc->u.hs.DeviceRemovable, 0xff,
+ sizeof(desc->u.hs.DeviceRemovable));
+ memset(desc->u.hs.PortPwrCtrlMask, 0xff,
+ sizeof(desc->u.hs.PortPwrCtrlMask));
+
+ for (i = 0; i < (ports + 1 + 7) / 8; i++)
+ memset(&desc->u.hs.DeviceRemovable[i], port_removable[i],
+ sizeof(__u8));
+}
+
+/* Fill in the USB 3.0 roothub descriptor */
+static void xhci_usb3_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+ struct usb_hub_descriptor *desc)
+{
+ int ports;
+ u16 port_removable;
+ u32 portsc;
+ unsigned int i;
+
+ ports = xhci->num_usb3_ports;
+ xhci_common_hub_descriptor(xhci, desc, ports);
+ desc->bDescriptorType = USB_DT_SS_HUB;
+ desc->bDescLength = USB_DT_SS_HUB_SIZE;
+
+ /* header decode latency should be zero for roothubs,
+ * see section 4.23.5.2.
+ */
+ desc->u.ss.bHubHdrDecLat = 0;
+ desc->u.ss.wHubDelay = 0;
+
+ port_removable = 0;
+ /* bit 0 is reserved, bit 1 is for port 1, etc. */
+ for (i = 0; i < ports; i++) {
+ portsc = readl(xhci->usb3_ports[i]);
+ if (portsc & PORT_DEV_REMOVE)
+ port_removable |= 1 << (i + 1);
+ }
+
+ desc->u.ss.DeviceRemovable = cpu_to_le16(port_removable);
+}
+
+static void xhci_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+ struct usb_hub_descriptor *desc)
+{
+
+ if (hcd->speed == HCD_USB3)
+ xhci_usb3_hub_descriptor(hcd, xhci, desc);
+ else
+ xhci_usb2_hub_descriptor(hcd, xhci, desc);
+
}
static unsigned int xhci_port_speed(unsigned int port_status)
@@ -71,8 +175,6 @@ static unsigned int xhci_port_speed(unsigned int port_status)
return USB_PORT_STAT_LOW_SPEED;
if (DEV_HIGHSPEED(port_status))
return USB_PORT_STAT_HIGH_SPEED;
- if (DEV_SUPERSPEED(port_status))
- return USB_PORT_STAT_SUPER_SPEED;
/*
* FIXME: Yes, we should check for full speed, but the core uses that as
* a default in portspeed() in usb/core/hub.c (which is the only place
@@ -135,17 +237,22 @@ u32 xhci_port_state_to_neutral(u32 state)
/*
* find slot id based on port number.
+ * @port: The one-based port number from one of the two split roothubs.
*/
-int xhci_find_slot_id_by_port(struct xhci_hcd *xhci, u16 port)
+int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+ u16 port)
{
int slot_id;
int i;
+ enum usb_device_speed speed;
slot_id = 0;
for (i = 0; i < MAX_HC_SLOTS; i++) {
if (!xhci->devs[i])
continue;
- if (xhci->devs[i]->port == port) {
+ speed = xhci->devs[i]->udev->speed;
+ if (((speed == USB_SPEED_SUPER) == (hcd->speed == HCD_USB3))
+ && xhci->devs[i]->fake_port == port) {
slot_id = i;
break;
}
@@ -165,7 +272,6 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
struct xhci_virt_device *virt_dev;
struct xhci_command *cmd;
unsigned long flags;
- int timeleft;
int ret;
int i;
@@ -179,34 +285,31 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
spin_lock_irqsave(&xhci->lock, flags);
for (i = LAST_EP_INDEX; i > 0; i--) {
- if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue)
- xhci_queue_stop_endpoint(xhci, slot_id, i, suspend);
+ if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
+ struct xhci_command *command;
+ command = xhci_alloc_command(xhci, false, false,
+ GFP_NOWAIT);
+ if (!command) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_free_command(xhci, cmd);
+ return -ENOMEM;
+
+ }
+ xhci_queue_stop_endpoint(xhci, command, slot_id, i,
+ suspend);
+ }
}
- cmd->command_trb = xhci->cmd_ring->enqueue;
- list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list);
- xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend);
+ xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Wait for last stop endpoint command to finish */
- timeleft = wait_for_completion_interruptible_timeout(
- cmd->completion,
- USB_CTRL_SET_TIMEOUT);
- if (timeleft <= 0) {
- xhci_warn(xhci, "%s while waiting for stop endpoint command\n",
- timeleft == 0 ? "Timeout" : "Signal");
- spin_lock_irqsave(&xhci->lock, flags);
- /* The timeout might have raced with the event ring handler, so
- * only delete from the list if the item isn't poisoned.
- */
- if (cmd->cmd_list.next != LIST_POISON1)
- list_del(&cmd->cmd_list);
- spin_unlock_irqrestore(&xhci->lock, flags);
+ wait_for_completion(cmd->completion);
+
+ if (cmd->status == COMP_CMD_ABORT || cmd->status == COMP_CMD_STOP) {
+ xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
ret = -ETIME;
- goto command_cleanup;
}
-
-command_cleanup:
xhci_free_command(xhci, cmd);
return ret;
}
@@ -226,18 +329,25 @@ void xhci_ring_device(struct xhci_hcd *xhci, int slot_id)
return;
}
-static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex,
- u32 __iomem *addr, u32 port_status)
+static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+ u16 wIndex, __le32 __iomem *addr, u32 port_status)
{
+ /* Don't allow the USB core to disable SuperSpeed ports. */
+ if (hcd->speed == HCD_USB3) {
+ xhci_dbg(xhci, "Ignoring request to disable "
+ "SuperSpeed port.\n");
+ return;
+ }
+
/* Write 1 to disable the port */
- xhci_writel(xhci, port_status | PORT_PE, addr);
- port_status = xhci_readl(xhci, addr);
+ writel(port_status | PORT_PE, addr);
+ port_status = readl(addr);
xhci_dbg(xhci, "disable port, actual port %d status = 0x%x\n",
wIndex, port_status);
}
static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
- u16 wIndex, u32 __iomem *addr, u32 port_status)
+ u16 wIndex, __le32 __iomem *addr, u32 port_status)
{
char *port_change_bit;
u32 status;
@@ -247,6 +357,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
status = PORT_RC;
port_change_bit = "reset";
break;
+ case USB_PORT_FEAT_C_BH_PORT_RESET:
+ status = PORT_WRC;
+ port_change_bit = "warm(BH) reset";
+ break;
case USB_PORT_FEAT_C_CONNECTION:
status = PORT_CSC;
port_change_bit = "connect";
@@ -263,29 +377,327 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
status = PORT_PLC;
port_change_bit = "suspend/resume";
break;
+ case USB_PORT_FEAT_C_PORT_LINK_STATE:
+ status = PORT_PLC;
+ port_change_bit = "link state";
+ break;
default:
/* Should never happen */
return;
}
/* Change bits are all write 1 to clear */
- xhci_writel(xhci, port_status | status, addr);
- port_status = xhci_readl(xhci, addr);
+ writel(port_status | status, addr);
+ port_status = readl(addr);
xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
port_change_bit, wIndex, port_status);
}
+static int xhci_get_ports(struct usb_hcd *hcd, __le32 __iomem ***port_array)
+{
+ int max_ports;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ if (hcd->speed == HCD_USB3) {
+ max_ports = xhci->num_usb3_ports;
+ *port_array = xhci->usb3_ports;
+ } else {
+ max_ports = xhci->num_usb2_ports;
+ *port_array = xhci->usb2_ports;
+ }
+
+ return max_ports;
+}
+
+void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
+ int port_id, u32 link_state)
+{
+ u32 temp;
+
+ temp = readl(port_array[port_id]);
+ temp = xhci_port_state_to_neutral(temp);
+ temp &= ~PORT_PLS_MASK;
+ temp |= PORT_LINK_STROBE | link_state;
+ writel(temp, port_array[port_id]);
+}
+
+static void xhci_set_remote_wake_mask(struct xhci_hcd *xhci,
+ __le32 __iomem **port_array, int port_id, u16 wake_mask)
+{
+ u32 temp;
+
+ temp = readl(port_array[port_id]);
+ temp = xhci_port_state_to_neutral(temp);
+
+ if (wake_mask & USB_PORT_FEAT_REMOTE_WAKE_CONNECT)
+ temp |= PORT_WKCONN_E;
+ else
+ temp &= ~PORT_WKCONN_E;
+
+ if (wake_mask & USB_PORT_FEAT_REMOTE_WAKE_DISCONNECT)
+ temp |= PORT_WKDISC_E;
+ else
+ temp &= ~PORT_WKDISC_E;
+
+ if (wake_mask & USB_PORT_FEAT_REMOTE_WAKE_OVER_CURRENT)
+ temp |= PORT_WKOC_E;
+ else
+ temp &= ~PORT_WKOC_E;
+
+ writel(temp, port_array[port_id]);
+}
+
+/* Test and clear port RWC bit */
+void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
+ int port_id, u32 port_bit)
+{
+ u32 temp;
+
+ temp = readl(port_array[port_id]);
+ if (temp & port_bit) {
+ temp = xhci_port_state_to_neutral(temp);
+ temp |= port_bit;
+ writel(temp, port_array[port_id]);
+ }
+}
+
+/* Updates Link Status for USB 2.1 port */
+static void xhci_hub_report_usb2_link_state(u32 *status, u32 status_reg)
+{
+ if ((status_reg & PORT_PLS_MASK) == XDEV_U2)
+ *status |= USB_PORT_STAT_L1;
+}
+
+/* Updates Link Status for super Speed port */
+static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg)
+{
+ u32 pls = status_reg & PORT_PLS_MASK;
+
+ /* resume state is a xHCI internal state.
+ * Do not report it to usb core.
+ */
+ if (pls == XDEV_RESUME)
+ return;
+
+ /* When the CAS bit is set then warm reset
+ * should be performed on port
+ */
+ if (status_reg & PORT_CAS) {
+ /* The CAS bit can be set while the port is
+ * in any link state.
+ * Only roothubs have CAS bit, so we
+ * pretend to be in compliance mode
+ * unless we're already in compliance
+ * or the inactive state.
+ */
+ if (pls != USB_SS_PORT_LS_COMP_MOD &&
+ pls != USB_SS_PORT_LS_SS_INACTIVE) {
+ pls = USB_SS_PORT_LS_COMP_MOD;
+ }
+ /* Return also connection bit -
+ * hub state machine resets port
+ * when this bit is set.
+ */
+ pls |= USB_PORT_STAT_CONNECTION;
+ } else {
+ /*
+ * If CAS bit isn't set but the Port is already at
+ * Compliance Mode, fake a connection so the USB core
+ * notices the Compliance state and resets the port.
+ * This resolves an issue generated by the SN65LVPE502CP
+ * in which sometimes the port enters compliance mode
+ * caused by a delay on the host-device negotiation.
+ */
+ if (pls == USB_SS_PORT_LS_COMP_MOD)
+ pls |= USB_PORT_STAT_CONNECTION;
+ }
+
+ /* update status field */
+ *status |= pls;
+}
+
+/*
+ * Function for Compliance Mode Quirk.
+ *
+ * This Function verifies if all xhc USB3 ports have entered U0, if so,
+ * the compliance mode timer is deleted. A port won't enter
+ * compliance mode if it has previously entered U0.
+ */
+static void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status,
+ u16 wIndex)
+{
+ u32 all_ports_seen_u0 = ((1 << xhci->num_usb3_ports)-1);
+ bool port_in_u0 = ((status & PORT_PLS_MASK) == XDEV_U0);
+
+ if (!(xhci->quirks & XHCI_COMP_MODE_QUIRK))
+ return;
+
+ if ((xhci->port_status_u0 != all_ports_seen_u0) && port_in_u0) {
+ xhci->port_status_u0 |= 1 << wIndex;
+ if (xhci->port_status_u0 == all_ports_seen_u0) {
+ del_timer_sync(&xhci->comp_mode_recovery_timer);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "All USB3 ports have entered U0 already!");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Compliance Mode Recovery Timer Deleted.");
+ }
+ }
+}
+
+/*
+ * Converts a raw xHCI port status into the format that external USB 2.0 or USB
+ * 3.0 hubs use.
+ *
+ * Possible side effects:
+ * - Mark a port as being done with device resume,
+ * and ring the endpoint doorbells.
+ * - Stop the Synopsys redriver Compliance Mode polling.
+ * - Drop and reacquire the xHCI lock, in order to wait for port resume.
+ */
+static u32 xhci_get_port_status(struct usb_hcd *hcd,
+ struct xhci_bus_state *bus_state,
+ __le32 __iomem **port_array,
+ u16 wIndex, u32 raw_port_status,
+ unsigned long flags)
+ __releases(&xhci->lock)
+ __acquires(&xhci->lock)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ u32 status = 0;
+ int slot_id;
+
+ /* wPortChange bits */
+ if (raw_port_status & PORT_CSC)
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
+ if (raw_port_status & PORT_PEC)
+ status |= USB_PORT_STAT_C_ENABLE << 16;
+ if ((raw_port_status & PORT_OCC))
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+ if ((raw_port_status & PORT_RC))
+ status |= USB_PORT_STAT_C_RESET << 16;
+ /* USB3.0 only */
+ if (hcd->speed == HCD_USB3) {
+ if ((raw_port_status & PORT_PLC))
+ status |= USB_PORT_STAT_C_LINK_STATE << 16;
+ if ((raw_port_status & PORT_WRC))
+ status |= USB_PORT_STAT_C_BH_RESET << 16;
+ }
+
+ if (hcd->speed != HCD_USB3) {
+ if ((raw_port_status & PORT_PLS_MASK) == XDEV_U3
+ && (raw_port_status & PORT_POWER))
+ status |= USB_PORT_STAT_SUSPEND;
+ }
+ if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME &&
+ !DEV_SUPERSPEED(raw_port_status)) {
+ if ((raw_port_status & PORT_RESET) ||
+ !(raw_port_status & PORT_PE))
+ return 0xffffffff;
+ if (time_after_eq(jiffies,
+ bus_state->resume_done[wIndex])) {
+ int time_left;
+
+ xhci_dbg(xhci, "Resume USB2 port %d\n",
+ wIndex + 1);
+ bus_state->resume_done[wIndex] = 0;
+ clear_bit(wIndex, &bus_state->resuming_ports);
+
+ set_bit(wIndex, &bus_state->rexit_ports);
+ xhci_set_link_state(xhci, port_array, wIndex,
+ XDEV_U0);
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ time_left = wait_for_completion_timeout(
+ &bus_state->rexit_done[wIndex],
+ msecs_to_jiffies(
+ XHCI_MAX_REXIT_TIMEOUT));
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ if (time_left) {
+ slot_id = xhci_find_slot_id_by_port(hcd,
+ xhci, wIndex + 1);
+ if (!slot_id) {
+ xhci_dbg(xhci, "slot_id is zero\n");
+ return 0xffffffff;
+ }
+ xhci_ring_device(xhci, slot_id);
+ } else {
+ int port_status = readl(port_array[wIndex]);
+ xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
+ XHCI_MAX_REXIT_TIMEOUT,
+ port_status);
+ status |= USB_PORT_STAT_SUSPEND;
+ clear_bit(wIndex, &bus_state->rexit_ports);
+ }
+
+ bus_state->port_c_suspend |= 1 << wIndex;
+ bus_state->suspended_ports &= ~(1 << wIndex);
+ } else {
+ /*
+ * The resume has been signaling for less than
+ * 20ms. Report the port status as SUSPEND,
+ * let the usbcore check port status again
+ * and clear resume signaling later.
+ */
+ status |= USB_PORT_STAT_SUSPEND;
+ }
+ }
+ if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0
+ && (raw_port_status & PORT_POWER)
+ && (bus_state->suspended_ports & (1 << wIndex))) {
+ bus_state->suspended_ports &= ~(1 << wIndex);
+ if (hcd->speed != HCD_USB3)
+ bus_state->port_c_suspend |= 1 << wIndex;
+ }
+ if (raw_port_status & PORT_CONNECT) {
+ status |= USB_PORT_STAT_CONNECTION;
+ status |= xhci_port_speed(raw_port_status);
+ }
+ if (raw_port_status & PORT_PE)
+ status |= USB_PORT_STAT_ENABLE;
+ if (raw_port_status & PORT_OC)
+ status |= USB_PORT_STAT_OVERCURRENT;
+ if (raw_port_status & PORT_RESET)
+ status |= USB_PORT_STAT_RESET;
+ if (raw_port_status & PORT_POWER) {
+ if (hcd->speed == HCD_USB3)
+ status |= USB_SS_PORT_STAT_POWER;
+ else
+ status |= USB_PORT_STAT_POWER;
+ }
+ /* Update Port Link State */
+ if (hcd->speed == HCD_USB3) {
+ xhci_hub_report_usb3_link_state(&status, raw_port_status);
+ /*
+ * Verify if all USB3 Ports Have entered U0 already.
+ * Delete Compliance Mode Timer if so.
+ */
+ xhci_del_comp_mod_timer(xhci, raw_port_status, wIndex);
+ } else {
+ xhci_hub_report_usb2_link_state(&status, raw_port_status);
+ }
+ if (bus_state->port_c_suspend & (1 << wIndex))
+ status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+
+ return status;
+}
+
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- int ports;
+ int max_ports;
unsigned long flags;
- u32 temp, temp1, status;
+ u32 temp, status;
int retval = 0;
- u32 __iomem *addr;
+ __le32 __iomem **port_array;
int slot_id;
+ struct xhci_bus_state *bus_state;
+ u16 link_state = 0;
+ u16 wake_mask = 0;
+ u16 timeout = 0;
- ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ max_ports = xhci_get_ports(hcd, &port_array);
+ bus_state = &xhci->bus_state[hcd_index(hcd)];
spin_lock_irqsave(&xhci->lock, flags);
switch (typeReq) {
@@ -294,100 +706,97 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
memset(buf, 0, 4);
break;
case GetHubDescriptor:
- xhci_hub_descriptor(xhci, (struct usb_hub_descriptor *) buf);
+ /* Check to make sure userspace is asking for the USB 3.0 hub
+ * descriptor for the USB 3.0 roothub. If not, we stall the
+ * endpoint, like external hubs do.
+ */
+ if (hcd->speed == HCD_USB3 &&
+ (wLength < USB_DT_SS_HUB_SIZE ||
+ wValue != (USB_DT_SS_HUB << 8))) {
+ xhci_dbg(xhci, "Wrong hub descriptor type for "
+ "USB 3.0 roothub.\n");
+ goto error;
+ }
+ xhci_hub_descriptor(hcd, xhci,
+ (struct usb_hub_descriptor *) buf);
break;
- case GetPortStatus:
- if (!wIndex || wIndex > ports)
+ case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
+ if ((wValue & 0xff00) != (USB_DT_BOS << 8))
goto error;
- wIndex--;
- status = 0;
- addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(wIndex & 0xff);
- temp = xhci_readl(xhci, addr);
- xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", wIndex, temp);
-
- /* wPortChange bits */
- if (temp & PORT_CSC)
- status |= USB_PORT_STAT_C_CONNECTION << 16;
- if (temp & PORT_PEC)
- status |= USB_PORT_STAT_C_ENABLE << 16;
- if ((temp & PORT_OCC))
- status |= USB_PORT_STAT_C_OVERCURRENT << 16;
- /*
- * FIXME ignoring reset and USB 2.1/3.0 specific
- * changes
- */
- if ((temp & PORT_PLS_MASK) == XDEV_U3
- && (temp & PORT_POWER))
- status |= 1 << USB_PORT_FEAT_SUSPEND;
- if ((temp & PORT_PLS_MASK) == XDEV_RESUME) {
- if ((temp & PORT_RESET) || !(temp & PORT_PE))
- goto error;
- if (!DEV_SUPERSPEED(temp) && time_after_eq(jiffies,
- xhci->resume_done[wIndex])) {
- xhci_dbg(xhci, "Resume USB2 port %d\n",
- wIndex + 1);
- xhci->resume_done[wIndex] = 0;
- temp1 = xhci_port_state_to_neutral(temp);
- temp1 &= ~PORT_PLS_MASK;
- temp1 |= PORT_LINK_STROBE | XDEV_U0;
- xhci_writel(xhci, temp1, addr);
- xhci_dbg(xhci, "set port %d resume\n",
- wIndex + 1);
- slot_id = xhci_find_slot_id_by_port(xhci,
- wIndex + 1);
- if (!slot_id) {
- xhci_dbg(xhci, "slot_id is zero\n");
- goto error;
- }
- xhci_ring_device(xhci, slot_id);
- xhci->port_c_suspend[wIndex >> 5] |=
- 1 << (wIndex & 31);
- xhci->suspended_ports[wIndex >> 5] &=
- ~(1 << (wIndex & 31));
- }
- }
- if ((temp & PORT_PLS_MASK) == XDEV_U0
- && (temp & PORT_POWER)
- && (xhci->suspended_ports[wIndex >> 5] &
- (1 << (wIndex & 31)))) {
- xhci->suspended_ports[wIndex >> 5] &=
- ~(1 << (wIndex & 31));
- xhci->port_c_suspend[wIndex >> 5] |=
- 1 << (wIndex & 31);
+ if (hcd->speed != HCD_USB3)
+ goto error;
+
+ /* Set the U1 and U2 exit latencies. */
+ memcpy(buf, &usb_bos_descriptor,
+ USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE);
+ if ((xhci->quirks & XHCI_LPM_SUPPORT)) {
+ temp = readl(&xhci->cap_regs->hcs_params3);
+ buf[12] = HCS_U1_LATENCY(temp);
+ put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]);
}
- if (temp & PORT_CONNECT) {
- status |= USB_PORT_STAT_CONNECTION;
- status |= xhci_port_speed(temp);
+
+ /* Indicate whether the host has LTM support. */
+ temp = readl(&xhci->cap_regs->hcc_params);
+ if (HCC_LTC(temp))
+ buf[8] |= USB_LTM_SUPPORT;
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE;
+ case GetPortStatus:
+ if (!wIndex || wIndex > max_ports)
+ goto error;
+ wIndex--;
+ temp = readl(port_array[wIndex]);
+ if (temp == 0xffffffff) {
+ retval = -ENODEV;
+ break;
}
- if (temp & PORT_PE)
- status |= USB_PORT_STAT_ENABLE;
- if (temp & PORT_OC)
- status |= USB_PORT_STAT_OVERCURRENT;
- if (temp & PORT_RESET)
- status |= USB_PORT_STAT_RESET;
- if (temp & PORT_POWER)
- status |= USB_PORT_STAT_POWER;
- if (xhci->port_c_suspend[wIndex >> 5] & (1 << (wIndex & 31)))
- status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+ status = xhci_get_port_status(hcd, bus_state, port_array,
+ wIndex, temp, flags);
+ if (status == 0xffffffff)
+ goto error;
+
+ xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n",
+ wIndex, temp);
xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
+
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
break;
case SetPortFeature:
+ if (wValue == USB_PORT_FEAT_LINK_STATE)
+ link_state = (wIndex & 0xff00) >> 3;
+ if (wValue == USB_PORT_FEAT_REMOTE_WAKE_MASK)
+ wake_mask = wIndex & 0xff00;
+ /* The MSB of wIndex is the U1/U2 timeout */
+ timeout = (wIndex & 0xff00) >> 8;
wIndex &= 0xff;
- if (!wIndex || wIndex > ports)
+ if (!wIndex || wIndex > max_ports)
goto error;
wIndex--;
- addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(wIndex & 0xff);
- temp = xhci_readl(xhci, addr);
+ temp = readl(port_array[wIndex]);
+ if (temp == 0xffffffff) {
+ retval = -ENODEV;
+ break;
+ }
temp = xhci_port_state_to_neutral(temp);
+ /* FIXME: What new port features do we need to support? */
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- temp = xhci_readl(xhci, addr);
+ temp = readl(port_array[wIndex]);
+ if ((temp & PORT_PLS_MASK) != XDEV_U0) {
+ /* Resume the port to U0 first */
+ xhci_set_link_state(xhci, port_array, wIndex,
+ XDEV_U0);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&xhci->lock, flags);
+ }
/* In spec software should not attempt to suspend
* a port unless the port reports that it is in the
* enabled (PED = ‘1’,PLS < ‘3’) state.
*/
+ temp = readl(port_array[wIndex]);
if ((temp & PORT_PE) == 0 || (temp & PORT_RESET)
|| (temp & PORT_PLS_MASK) >= XDEV_U3) {
xhci_warn(xhci, "USB core suspending device "
@@ -395,7 +804,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
}
- slot_id = xhci_find_slot_id_by_port(xhci, wIndex + 1);
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ wIndex + 1);
if (!slot_id) {
xhci_warn(xhci, "slot_id is zero\n");
goto error;
@@ -405,18 +815,76 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_stop_device(xhci, slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
- temp = xhci_port_state_to_neutral(temp);
- temp &= ~PORT_PLS_MASK;
- temp |= PORT_LINK_STROBE | XDEV_U3;
- xhci_writel(xhci, temp, addr);
+ xhci_set_link_state(xhci, port_array, wIndex, XDEV_U3);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(10); /* wait device to enter */
spin_lock_irqsave(&xhci->lock, flags);
- temp = xhci_readl(xhci, addr);
- xhci->suspended_ports[wIndex >> 5] |=
- 1 << (wIndex & (31));
+ temp = readl(port_array[wIndex]);
+ bus_state->suspended_ports |= 1 << wIndex;
+ break;
+ case USB_PORT_FEAT_LINK_STATE:
+ temp = readl(port_array[wIndex]);
+
+ /* Disable port */
+ if (link_state == USB_SS_PORT_LS_SS_DISABLED) {
+ xhci_dbg(xhci, "Disable port %d\n", wIndex);
+ temp = xhci_port_state_to_neutral(temp);
+ /*
+ * Clear all change bits, so that we get a new
+ * connection event.
+ */
+ temp |= PORT_CSC | PORT_PEC | PORT_WRC |
+ PORT_OCC | PORT_RC | PORT_PLC |
+ PORT_CEC;
+ writel(temp | PORT_PE, port_array[wIndex]);
+ temp = readl(port_array[wIndex]);
+ break;
+ }
+
+ /* Put link in RxDetect (enable port) */
+ if (link_state == USB_SS_PORT_LS_RX_DETECT) {
+ xhci_dbg(xhci, "Enable port %d\n", wIndex);
+ xhci_set_link_state(xhci, port_array, wIndex,
+ link_state);
+ temp = readl(port_array[wIndex]);
+ break;
+ }
+
+ /* Software should not attempt to set
+ * port link state above '3' (U3) and the port
+ * must be enabled.
+ */
+ if ((temp & PORT_PE) == 0 ||
+ (link_state > USB_SS_PORT_LS_U3)) {
+ xhci_warn(xhci, "Cannot set link state.\n");
+ goto error;
+ }
+
+ if (link_state == USB_SS_PORT_LS_U3) {
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ wIndex + 1);
+ if (slot_id) {
+ /* unlock to execute stop endpoint
+ * commands */
+ spin_unlock_irqrestore(&xhci->lock,
+ flags);
+ xhci_stop_device(xhci, slot_id, 1);
+ spin_lock_irqsave(&xhci->lock, flags);
+ }
+ }
+
+ xhci_set_link_state(xhci, port_array, wIndex,
+ link_state);
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ msleep(20); /* wait device to enter */
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ temp = readl(port_array[wIndex]);
+ if (link_state == USB_SS_PORT_LS_U3)
+ bus_state->suspended_ports |= 1 << wIndex;
break;
case USB_PORT_FEAT_POWER:
/*
@@ -425,69 +893,96 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* However, khubd will ignore the roothub events until
* the roothub is registered.
*/
- xhci_writel(xhci, temp | PORT_POWER, addr);
+ writel(temp | PORT_POWER, port_array[wIndex]);
- temp = xhci_readl(xhci, addr);
+ temp = readl(port_array[wIndex]);
xhci_dbg(xhci, "set port power, actual port %d status = 0x%x\n", wIndex, temp);
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ temp = usb_acpi_power_manageable(hcd->self.root_hub,
+ wIndex);
+ if (temp)
+ usb_acpi_set_power_state(hcd->self.root_hub,
+ wIndex, true);
+ spin_lock_irqsave(&xhci->lock, flags);
break;
case USB_PORT_FEAT_RESET:
temp = (temp | PORT_RESET);
- xhci_writel(xhci, temp, addr);
+ writel(temp, port_array[wIndex]);
- temp = xhci_readl(xhci, addr);
+ temp = readl(port_array[wIndex]);
xhci_dbg(xhci, "set port reset, actual port %d status = 0x%x\n", wIndex, temp);
break;
+ case USB_PORT_FEAT_REMOTE_WAKE_MASK:
+ xhci_set_remote_wake_mask(xhci, port_array,
+ wIndex, wake_mask);
+ temp = readl(port_array[wIndex]);
+ xhci_dbg(xhci, "set port remote wake mask, "
+ "actual port %d status = 0x%x\n",
+ wIndex, temp);
+ break;
+ case USB_PORT_FEAT_BH_PORT_RESET:
+ temp |= PORT_WR;
+ writel(temp, port_array[wIndex]);
+
+ temp = readl(port_array[wIndex]);
+ break;
+ case USB_PORT_FEAT_U1_TIMEOUT:
+ if (hcd->speed != HCD_USB3)
+ goto error;
+ temp = readl(port_array[wIndex] + PORTPMSC);
+ temp &= ~PORT_U1_TIMEOUT_MASK;
+ temp |= PORT_U1_TIMEOUT(timeout);
+ writel(temp, port_array[wIndex] + PORTPMSC);
+ break;
+ case USB_PORT_FEAT_U2_TIMEOUT:
+ if (hcd->speed != HCD_USB3)
+ goto error;
+ temp = readl(port_array[wIndex] + PORTPMSC);
+ temp &= ~PORT_U2_TIMEOUT_MASK;
+ temp |= PORT_U2_TIMEOUT(timeout);
+ writel(temp, port_array[wIndex] + PORTPMSC);
+ break;
default:
goto error;
}
- temp = xhci_readl(xhci, addr); /* unblock any posted writes */
+ /* unblock any posted writes */
+ temp = readl(port_array[wIndex]);
break;
case ClearPortFeature:
- if (!wIndex || wIndex > ports)
+ if (!wIndex || wIndex > max_ports)
goto error;
wIndex--;
- addr = &xhci->op_regs->port_status_base +
- NUM_PORT_REGS*(wIndex & 0xff);
- temp = xhci_readl(xhci, addr);
+ temp = readl(port_array[wIndex]);
+ if (temp == 0xffffffff) {
+ retval = -ENODEV;
+ break;
+ }
+ /* FIXME: What new port features do we need to support? */
temp = xhci_port_state_to_neutral(temp);
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- temp = xhci_readl(xhci, addr);
+ temp = readl(port_array[wIndex]);
xhci_dbg(xhci, "clear USB_PORT_FEAT_SUSPEND\n");
xhci_dbg(xhci, "PORTSC %04x\n", temp);
if (temp & PORT_RESET)
goto error;
- if (temp & XDEV_U3) {
+ if ((temp & PORT_PLS_MASK) == XDEV_U3) {
if ((temp & PORT_PE) == 0)
goto error;
- if (DEV_SUPERSPEED(temp)) {
- temp = xhci_port_state_to_neutral(temp);
- temp &= ~PORT_PLS_MASK;
- temp |= PORT_LINK_STROBE | XDEV_U0;
- xhci_writel(xhci, temp, addr);
- xhci_readl(xhci, addr);
- } else {
- temp = xhci_port_state_to_neutral(temp);
- temp &= ~PORT_PLS_MASK;
- temp |= PORT_LINK_STROBE | XDEV_RESUME;
- xhci_writel(xhci, temp, addr);
- spin_unlock_irqrestore(&xhci->lock,
- flags);
- msleep(20);
- spin_lock_irqsave(&xhci->lock, flags);
-
- temp = xhci_readl(xhci, addr);
- temp = xhci_port_state_to_neutral(temp);
- temp &= ~PORT_PLS_MASK;
- temp |= PORT_LINK_STROBE | XDEV_U0;
- xhci_writel(xhci, temp, addr);
- }
- xhci->port_c_suspend[wIndex >> 5] |=
- 1 << (wIndex & 31);
+ xhci_set_link_state(xhci, port_array, wIndex,
+ XDEV_RESUME);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ msleep(20);
+ spin_lock_irqsave(&xhci->lock, flags);
+ xhci_set_link_state(xhci, port_array, wIndex,
+ XDEV_U0);
}
+ bus_state->port_c_suspend |= 1 << wIndex;
- slot_id = xhci_find_slot_id_by_port(xhci, wIndex + 1);
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ wIndex + 1);
if (!slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
goto error;
@@ -495,17 +990,30 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_ring_device(xhci, slot_id);
break;
case USB_PORT_FEAT_C_SUSPEND:
- xhci->port_c_suspend[wIndex >> 5] &=
- ~(1 << (wIndex & 31));
+ bus_state->port_c_suspend &= ~(1 << wIndex);
case USB_PORT_FEAT_C_RESET:
+ case USB_PORT_FEAT_C_BH_PORT_RESET:
case USB_PORT_FEAT_C_CONNECTION:
case USB_PORT_FEAT_C_OVER_CURRENT:
case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_PORT_LINK_STATE:
xhci_clear_port_change_bit(xhci, wValue, wIndex,
- addr, temp);
+ port_array[wIndex], temp);
break;
case USB_PORT_FEAT_ENABLE:
- xhci_disable_port(xhci, wIndex, addr, temp);
+ xhci_disable_port(hcd, xhci, wIndex,
+ port_array[wIndex], temp);
+ break;
+ case USB_PORT_FEAT_POWER:
+ writel(temp & ~PORT_POWER, port_array[wIndex]);
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ temp = usb_acpi_power_manageable(hcd->self.root_hub,
+ wIndex);
+ if (temp)
+ usb_acpi_set_power_state(hcd->self.root_hub,
+ wIndex, false);
+ spin_lock_irqsave(&xhci->lock, flags);
break;
default:
goto error;
@@ -535,31 +1043,47 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
u32 mask;
int i, retval;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- int ports;
- u32 __iomem *addr;
+ int max_ports;
+ __le32 __iomem **port_array;
+ struct xhci_bus_state *bus_state;
+ bool reset_change = false;
- ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ max_ports = xhci_get_ports(hcd, &port_array);
+ bus_state = &xhci->bus_state[hcd_index(hcd)];
/* Initial status is no changes */
- retval = (ports + 8) / 8;
+ retval = (max_ports + 8) / 8;
memset(buf, 0, retval);
- status = 0;
- mask = PORT_CSC | PORT_PEC | PORT_OCC;
+ /*
+ * Inform the usbcore about resume-in-progress by returning
+ * a non-zero value even if there are no status changes.
+ */
+ status = bus_state->resuming_ports;
+
+ mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
spin_lock_irqsave(&xhci->lock, flags);
/* For each port, did anything change? If so, set that bit in buf. */
- for (i = 0; i < ports; i++) {
- addr = &xhci->op_regs->port_status_base +
- NUM_PORT_REGS*i;
- temp = xhci_readl(xhci, addr);
+ for (i = 0; i < max_ports; i++) {
+ temp = readl(port_array[i]);
+ if (temp == 0xffffffff) {
+ retval = -ENODEV;
+ break;
+ }
if ((temp & mask) != 0 ||
- (xhci->port_c_suspend[i >> 5] & 1 << (i & 31)) ||
- (xhci->resume_done[i] && time_after_eq(
- jiffies, xhci->resume_done[i]))) {
+ (bus_state->port_c_suspend & 1 << i) ||
+ (bus_state->resume_done[i] && time_after_eq(
+ jiffies, bus_state->resume_done[i]))) {
buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
status = 1;
}
+ if ((temp & PORT_RC))
+ reset_change = true;
+ }
+ if (!status && !reset_change) {
+ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
}
spin_unlock_irqrestore(&xhci->lock, flags);
return status ? retval : 0;
@@ -570,42 +1094,39 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
int xhci_bus_suspend(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- int port;
+ int max_ports, port_index;
+ __le32 __iomem **port_array;
+ struct xhci_bus_state *bus_state;
unsigned long flags;
- xhci_dbg(xhci, "suspend root hub\n");
+ max_ports = xhci_get_ports(hcd, &port_array);
+ bus_state = &xhci->bus_state[hcd_index(hcd)];
spin_lock_irqsave(&xhci->lock, flags);
if (hcd->self.root_hub->do_remote_wakeup) {
- port = HCS_MAX_PORTS(xhci->hcs_params1);
- while (port--) {
- if (xhci->resume_done[port] != 0) {
- spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "suspend failed because "
- "port %d is resuming\n",
- port + 1);
- return -EBUSY;
- }
+ if (bus_state->resuming_ports) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "suspend failed because "
+ "a port is resuming\n");
+ return -EBUSY;
}
}
- port = HCS_MAX_PORTS(xhci->hcs_params1);
- xhci->bus_suspended = 0;
- while (port--) {
+ port_index = max_ports;
+ bus_state->bus_suspended = 0;
+ while (port_index--) {
/* suspend the port if the port is not suspended */
- u32 __iomem *addr;
u32 t1, t2;
int slot_id;
- addr = &xhci->op_regs->port_status_base +
- NUM_PORT_REGS * (port & 0xff);
- t1 = xhci_readl(xhci, addr);
+ t1 = readl(port_array[port_index]);
t2 = xhci_port_state_to_neutral(t1);
if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) {
- xhci_dbg(xhci, "port %d not suspended\n", port);
- slot_id = xhci_find_slot_id_by_port(xhci, port + 1);
+ xhci_dbg(xhci, "port %d not suspended\n", port_index);
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ port_index + 1);
if (slot_id) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_stop_device(xhci, slot_id, 1);
@@ -613,9 +1134,15 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
}
t2 &= ~PORT_PLS_MASK;
t2 |= PORT_LINK_STROBE | XDEV_U3;
- set_bit(port, &xhci->bus_suspended);
+ set_bit(port_index, &bus_state->bus_suspended);
}
- if (hcd->self.root_hub->do_remote_wakeup) {
+ /* USB core sets remote wake mask for USB 3.0 hubs,
+ * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME
+ * is enabled, so also enable remote wake here.
+ */
+ if (hcd->self.root_hub->do_remote_wakeup
+ && device_may_wakeup(hcd->self.controller)) {
+
if (t1 & PORT_CONNECT) {
t2 |= PORT_WKOC_E | PORT_WKDISC_E;
t2 &= ~PORT_WKCONN_E;
@@ -628,22 +1155,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
t1 = xhci_port_state_to_neutral(t1);
if (t1 != t2)
- xhci_writel(xhci, t2, addr);
-
- if (DEV_HIGHSPEED(t1)) {
- /* enable remote wake up for USB 2.0 */
- u32 __iomem *addr;
- u32 tmp;
-
- addr = &xhci->op_regs->port_power_base +
- NUM_PORT_REGS * (port & 0xff);
- tmp = xhci_readl(xhci, addr);
- tmp |= PORT_RWE;
- xhci_writel(xhci, tmp, addr);
- }
+ writel(t2, port_array[port_index]);
}
hcd->state = HC_STATE_SUSPENDED;
- xhci->next_statechange = jiffies + msecs_to_jiffies(10);
+ bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
spin_unlock_irqrestore(&xhci->lock, flags);
return 0;
}
@@ -651,13 +1166,16 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
int xhci_bus_resume(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- int port;
+ int max_ports, port_index;
+ __le32 __iomem **port_array;
+ struct xhci_bus_state *bus_state;
u32 temp;
unsigned long flags;
- xhci_dbg(xhci, "resume root hub\n");
+ max_ports = xhci_get_ports(hcd, &port_array);
+ bus_state = &xhci->bus_state[hcd_index(hcd)];
- if (time_before(jiffies, xhci->next_statechange))
+ if (time_before(jiffies, bus_state->next_statechange))
msleep(5);
spin_lock_irqsave(&xhci->lock, flags);
@@ -667,76 +1185,65 @@ int xhci_bus_resume(struct usb_hcd *hcd)
}
/* delay the irqs */
- temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp = readl(&xhci->op_regs->command);
temp &= ~CMD_EIE;
- xhci_writel(xhci, temp, &xhci->op_regs->command);
+ writel(temp, &xhci->op_regs->command);
- port = HCS_MAX_PORTS(xhci->hcs_params1);
- while (port--) {
+ port_index = max_ports;
+ while (port_index--) {
/* Check whether need resume ports. If needed
resume port and disable remote wakeup */
- u32 __iomem *addr;
u32 temp;
int slot_id;
- addr = &xhci->op_regs->port_status_base +
- NUM_PORT_REGS * (port & 0xff);
- temp = xhci_readl(xhci, addr);
+ temp = readl(port_array[port_index]);
if (DEV_SUPERSPEED(temp))
temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
else
temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
- if (test_bit(port, &xhci->bus_suspended) &&
+ if (test_bit(port_index, &bus_state->bus_suspended) &&
(temp & PORT_PLS_MASK)) {
if (DEV_SUPERSPEED(temp)) {
- temp = xhci_port_state_to_neutral(temp);
- temp &= ~PORT_PLS_MASK;
- temp |= PORT_LINK_STROBE | XDEV_U0;
- xhci_writel(xhci, temp, addr);
+ xhci_set_link_state(xhci, port_array,
+ port_index, XDEV_U0);
} else {
- temp = xhci_port_state_to_neutral(temp);
- temp &= ~PORT_PLS_MASK;
- temp |= PORT_LINK_STROBE | XDEV_RESUME;
- xhci_writel(xhci, temp, addr);
+ xhci_set_link_state(xhci, port_array,
+ port_index, XDEV_RESUME);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(20);
spin_lock_irqsave(&xhci->lock, flags);
- temp = xhci_readl(xhci, addr);
- temp = xhci_port_state_to_neutral(temp);
- temp &= ~PORT_PLS_MASK;
- temp |= PORT_LINK_STROBE | XDEV_U0;
- xhci_writel(xhci, temp, addr);
+ xhci_set_link_state(xhci, port_array,
+ port_index, XDEV_U0);
}
- slot_id = xhci_find_slot_id_by_port(xhci, port + 1);
+ /* wait for the port to enter U0 and report port link
+ * state change.
+ */
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ msleep(20);
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ /* Clear PLC */
+ xhci_test_and_clear_bit(xhci, port_array, port_index,
+ PORT_PLC);
+
+ slot_id = xhci_find_slot_id_by_port(hcd,
+ xhci, port_index + 1);
if (slot_id)
xhci_ring_device(xhci, slot_id);
} else
- xhci_writel(xhci, temp, addr);
-
- if (DEV_HIGHSPEED(temp)) {
- /* disable remote wake up for USB 2.0 */
- u32 __iomem *addr;
- u32 tmp;
-
- addr = &xhci->op_regs->port_power_base +
- NUM_PORT_REGS * (port & 0xff);
- tmp = xhci_readl(xhci, addr);
- tmp &= ~PORT_RWE;
- xhci_writel(xhci, tmp, addr);
- }
+ writel(temp, port_array[port_index]);
}
- (void) xhci_readl(xhci, &xhci->op_regs->command);
+ (void) readl(&xhci->op_regs->command);
- xhci->next_statechange = jiffies + msecs_to_jiffies(5);
- hcd->state = HC_STATE_RUNNING;
+ bus_state->next_statechange = jiffies + msecs_to_jiffies(5);
/* re-enable irqs */
- temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp = readl(&xhci->op_regs->command);
temp |= CMD_EIE;
- xhci_writel(xhci, temp, &xhci->op_regs->command);
- temp = xhci_readl(xhci, &xhci->op_regs->command);
+ writel(temp, &xhci->op_regs->command);
+ temp = readl(&xhci->op_regs->command);
spin_unlock_irqrestore(&xhci->lock, flags);
return 0;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 202770676da..8056d90690e 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -24,8 +24,10 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
#include "xhci.h"
+#include "xhci-trace.h"
/*
* Allocates a generic ring segment from the ring pool, sets the dma address,
@@ -34,25 +36,29 @@
* Section 4.11.1.1:
* "All components of all Command and Transfer TRBs shall be initialized to '0'"
*/
-static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
+static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
+ unsigned int cycle_state, gfp_t flags)
{
struct xhci_segment *seg;
dma_addr_t dma;
+ int i;
seg = kzalloc(sizeof *seg, flags);
if (!seg)
return NULL;
- xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
if (!seg->trbs) {
kfree(seg);
return NULL;
}
- xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
- seg->trbs, (unsigned long long)dma);
- memset(seg->trbs, 0, SEGMENT_SIZE);
+ memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
+ /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
+ if (cycle_state == 0) {
+ for (i = 0; i < TRBS_PER_SEGMENT; i++)
+ seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
+ }
seg->dma = dma;
seg->next = NULL;
@@ -61,18 +67,27 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag
static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
{
- if (!seg)
- return;
if (seg->trbs) {
- xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
- seg->trbs, (unsigned long long)seg->dma);
dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
seg->trbs = NULL;
}
- xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
kfree(seg);
}
+static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
+ struct xhci_segment *first)
+{
+ struct xhci_segment *seg;
+
+ seg = first->next;
+ while (seg != first) {
+ struct xhci_segment *next = seg->next;
+ xhci_segment_free(xhci, seg);
+ seg = next;
+ }
+ xhci_segment_free(xhci, first);
+}
+
/*
* Make the prev segment point to the next segment.
*
@@ -81,52 +96,199 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
* related flags, such as End TRB, Toggle Cycle, and no snoop.
*/
static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
- struct xhci_segment *next, bool link_trbs)
+ struct xhci_segment *next, enum xhci_ring_type type)
{
u32 val;
if (!prev || !next)
return;
prev->next = next;
- if (link_trbs) {
- prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma;
+ if (type != TYPE_EVENT) {
+ prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
+ cpu_to_le64(next->dma);
/* Set the last TRB in the segment to have a TRB type ID of Link TRB */
- val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
+ val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
val &= ~TRB_TYPE_BITMASK;
val |= TRB_TYPE(TRB_LINK);
/* Always set the chain bit with 0.95 hardware */
- if (xhci_link_trb_quirk(xhci))
+ /* Set chain bit for isoc rings on AMD 0.96 host */
+ if (xhci_link_trb_quirk(xhci) ||
+ (type == TYPE_ISOC &&
+ (xhci->quirks & XHCI_AMD_0x96_HOST)))
val |= TRB_CHAIN;
- prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
+ prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
}
- xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
- (unsigned long long)prev->dma,
- (unsigned long long)next->dma);
}
-/* XXX: Do we need the hcd structure in all these functions? */
-void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
+/*
+ * Link the ring to the new segments.
+ * Set Toggle Cycle for the new ring if needed.
+ */
+static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ struct xhci_segment *first, struct xhci_segment *last,
+ unsigned int num_segs)
+{
+ struct xhci_segment *next;
+
+ if (!ring || !first || !last)
+ return;
+
+ next = ring->enq_seg->next;
+ xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
+ xhci_link_segments(xhci, last, next, ring->type);
+ ring->num_segs += num_segs;
+ ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
+
+ if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
+ ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
+ &= ~cpu_to_le32(LINK_TOGGLE);
+ last->trbs[TRBS_PER_SEGMENT-1].link.control
+ |= cpu_to_le32(LINK_TOGGLE);
+ ring->last_seg = last;
+ }
+}
+
+/*
+ * We need a radix tree for mapping physical addresses of TRBs to which stream
+ * ID they belong to. We need to do this because the host controller won't tell
+ * us which stream ring the TRB came from. We could store the stream ID in an
+ * event data TRB, but that doesn't help us for the cancellation case, since the
+ * endpoint may stop before it reaches that event data TRB.
+ *
+ * The radix tree maps the upper portion of the TRB DMA address to a ring
+ * segment that has the same upper portion of DMA addresses. For example, say I
+ * have segments of size 1KB, that are always 1KB aligned. A segment may
+ * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
+ * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
+ * pass the radix tree a key to get the right stream ID:
+ *
+ * 0x10c90fff >> 10 = 0x43243
+ * 0x10c912c0 >> 10 = 0x43244
+ * 0x10c91400 >> 10 = 0x43245
+ *
+ * Obviously, only those TRBs with DMA addresses that are within the segment
+ * will make the radix tree return the stream ID for that ring.
+ *
+ * Caveats for the radix tree:
+ *
+ * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
+ * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
+ * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
+ * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
+ * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
+ * extended systems (where the DMA address can be bigger than 32-bits),
+ * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
+ */
+static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
+ struct xhci_ring *ring,
+ struct xhci_segment *seg,
+ gfp_t mem_flags)
+{
+ unsigned long key;
+ int ret;
+
+ key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
+ /* Skip any segments that were already added. */
+ if (radix_tree_lookup(trb_address_map, key))
+ return 0;
+
+ ret = radix_tree_maybe_preload(mem_flags);
+ if (ret)
+ return ret;
+ ret = radix_tree_insert(trb_address_map,
+ key, ring);
+ radix_tree_preload_end();
+ return ret;
+}
+
+static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
+ struct xhci_segment *seg)
+{
+ unsigned long key;
+
+ key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
+ if (radix_tree_lookup(trb_address_map, key))
+ radix_tree_delete(trb_address_map, key);
+}
+
+static int xhci_update_stream_segment_mapping(
+ struct radix_tree_root *trb_address_map,
+ struct xhci_ring *ring,
+ struct xhci_segment *first_seg,
+ struct xhci_segment *last_seg,
+ gfp_t mem_flags)
{
struct xhci_segment *seg;
- struct xhci_segment *first_seg;
+ struct xhci_segment *failed_seg;
+ int ret;
+
+ if (WARN_ON_ONCE(trb_address_map == NULL))
+ return 0;
- if (!ring || !ring->first_seg)
+ seg = first_seg;
+ do {
+ ret = xhci_insert_segment_mapping(trb_address_map,
+ ring, seg, mem_flags);
+ if (ret)
+ goto remove_streams;
+ if (seg == last_seg)
+ return 0;
+ seg = seg->next;
+ } while (seg != first_seg);
+
+ return 0;
+
+remove_streams:
+ failed_seg = seg;
+ seg = first_seg;
+ do {
+ xhci_remove_segment_mapping(trb_address_map, seg);
+ if (seg == failed_seg)
+ return ret;
+ seg = seg->next;
+ } while (seg != first_seg);
+
+ return ret;
+}
+
+static void xhci_remove_stream_mapping(struct xhci_ring *ring)
+{
+ struct xhci_segment *seg;
+
+ if (WARN_ON_ONCE(ring->trb_address_map == NULL))
return;
- first_seg = ring->first_seg;
- seg = first_seg->next;
- xhci_dbg(xhci, "Freeing ring at %p\n", ring);
- while (seg != first_seg) {
- struct xhci_segment *next = seg->next;
- xhci_segment_free(xhci, seg);
- seg = next;
+
+ seg = ring->first_seg;
+ do {
+ xhci_remove_segment_mapping(ring->trb_address_map, seg);
+ seg = seg->next;
+ } while (seg != ring->first_seg);
+}
+
+static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
+{
+ return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
+ ring->first_seg, ring->last_seg, mem_flags);
+}
+
+/* XXX: Do we need the hcd structure in all these functions? */
+void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ if (!ring)
+ return;
+
+ if (ring->first_seg) {
+ if (ring->type == TYPE_STREAM)
+ xhci_remove_stream_mapping(ring);
+ xhci_free_segments_for_ring(xhci, ring->first_seg);
}
- xhci_segment_free(xhci, first_seg);
- ring->first_seg = NULL;
+
kfree(ring);
}
-static void xhci_initialize_ring_info(struct xhci_ring *ring)
+static void xhci_initialize_ring_info(struct xhci_ring *ring,
+ unsigned int cycle_state)
{
/* The ring is empty, so the enqueue pointer == dequeue pointer */
ring->enqueue = ring->first_seg->trbs;
@@ -136,11 +298,58 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
/* The ring is initialized to 0. The producer must write 1 to the cycle
* bit to handover ownership of the TRB, so PCS = 1. The consumer must
* compare CCS to the cycle bit to check ownership, so CCS = 1.
+ *
+ * New rings are initialized with cycle state equal to 1; if we are
+ * handling ring expansion, set the cycle state equal to the old ring.
*/
- ring->cycle_state = 1;
+ ring->cycle_state = cycle_state;
/* Not necessary for new rings, but needed for re-initialized rings */
ring->enq_updates = 0;
ring->deq_updates = 0;
+
+ /*
+ * Each segment has a link TRB, and leave an extra TRB for SW
+ * accounting purpose
+ */
+ ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
+}
+
+/* Allocate segments and link them for a ring */
+static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
+ struct xhci_segment **first, struct xhci_segment **last,
+ unsigned int num_segs, unsigned int cycle_state,
+ enum xhci_ring_type type, gfp_t flags)
+{
+ struct xhci_segment *prev;
+
+ prev = xhci_segment_alloc(xhci, cycle_state, flags);
+ if (!prev)
+ return -ENOMEM;
+ num_segs--;
+
+ *first = prev;
+ while (num_segs > 0) {
+ struct xhci_segment *next;
+
+ next = xhci_segment_alloc(xhci, cycle_state, flags);
+ if (!next) {
+ prev = *first;
+ while (prev) {
+ next = prev->next;
+ xhci_segment_free(xhci, prev);
+ prev = next;
+ }
+ return -ENOMEM;
+ }
+ xhci_link_segments(xhci, prev, next, type);
+
+ prev = next;
+ num_segs--;
+ }
+ xhci_link_segments(xhci, prev, *first, type);
+ *last = prev;
+
+ return 0;
}
/**
@@ -151,51 +360,38 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
* See section 4.9.1 and figures 15 and 16.
*/
static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
- unsigned int num_segs, bool link_trbs, gfp_t flags)
+ unsigned int num_segs, unsigned int cycle_state,
+ enum xhci_ring_type type, gfp_t flags)
{
struct xhci_ring *ring;
- struct xhci_segment *prev;
+ int ret;
ring = kzalloc(sizeof *(ring), flags);
- xhci_dbg(xhci, "Allocating ring at %p\n", ring);
if (!ring)
return NULL;
+ ring->num_segs = num_segs;
INIT_LIST_HEAD(&ring->td_list);
+ ring->type = type;
if (num_segs == 0)
return ring;
- ring->first_seg = xhci_segment_alloc(xhci, flags);
- if (!ring->first_seg)
+ ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
+ &ring->last_seg, num_segs, cycle_state, type, flags);
+ if (ret)
goto fail;
- num_segs--;
-
- prev = ring->first_seg;
- while (num_segs > 0) {
- struct xhci_segment *next;
- next = xhci_segment_alloc(xhci, flags);
- if (!next)
- goto fail;
- xhci_link_segments(xhci, prev, next, link_trbs);
-
- prev = next;
- num_segs--;
- }
- xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
-
- if (link_trbs) {
+ /* Only event ring does not use link TRB */
+ if (type != TYPE_EVENT) {
/* See section 4.9.2.1 and 6.4.4.1 */
- prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
- xhci_dbg(xhci, "Wrote link toggle flag to"
- " segment %p (virtual), 0x%llx (DMA)\n",
- prev, (unsigned long long)prev->dma);
+ ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
+ cpu_to_le32(LINK_TOGGLE);
}
- xhci_initialize_ring_info(ring);
+ xhci_initialize_ring_info(ring, cycle_state);
return ring;
fail:
- xhci_ring_free(xhci, ring);
+ kfree(ring);
return NULL;
}
@@ -207,14 +403,13 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
rings_cached = virt_dev->num_rings_cached;
if (rings_cached < XHCI_MAX_RINGS_CACHED) {
- virt_dev->num_rings_cached++;
- rings_cached = virt_dev->num_rings_cached;
virt_dev->ring_cache[rings_cached] =
virt_dev->eps[ep_index].ring;
+ virt_dev->num_rings_cached++;
xhci_dbg(xhci, "Cached old ring, "
"%d ring%s cached\n",
- rings_cached,
- (rings_cached > 1) ? "s" : "");
+ virt_dev->num_rings_cached,
+ (virt_dev->num_rings_cached > 1) ? "s" : "");
} else {
xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
xhci_dbg(xhci, "Ring cache full (%d rings), "
@@ -228,39 +423,105 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
* pointers to the beginning of the ring.
*/
static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
- struct xhci_ring *ring)
+ struct xhci_ring *ring, unsigned int cycle_state,
+ enum xhci_ring_type type)
{
struct xhci_segment *seg = ring->first_seg;
+ int i;
+
do {
memset(seg->trbs, 0,
sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
+ if (cycle_state == 0) {
+ for (i = 0; i < TRBS_PER_SEGMENT; i++)
+ seg->trbs[i].link.control |=
+ cpu_to_le32(TRB_CYCLE);
+ }
/* All endpoint rings have link TRBs */
- xhci_link_segments(xhci, seg, seg->next, 1);
+ xhci_link_segments(xhci, seg, seg->next, type);
seg = seg->next;
} while (seg != ring->first_seg);
- xhci_initialize_ring_info(ring);
+ ring->type = type;
+ xhci_initialize_ring_info(ring, cycle_state);
/* td list should be empty since all URBs have been cancelled,
* but just in case...
*/
INIT_LIST_HEAD(&ring->td_list);
}
+/*
+ * Expand an existing ring.
+ * Look for a cached ring or allocate a new ring which has same segment numbers
+ * and link the two rings.
+ */
+int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ unsigned int num_trbs, gfp_t flags)
+{
+ struct xhci_segment *first;
+ struct xhci_segment *last;
+ unsigned int num_segs;
+ unsigned int num_segs_needed;
+ int ret;
+
+ num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
+ (TRBS_PER_SEGMENT - 1);
+
+ /* Allocate number of segments we needed, or double the ring size */
+ num_segs = ring->num_segs > num_segs_needed ?
+ ring->num_segs : num_segs_needed;
+
+ ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
+ num_segs, ring->cycle_state, ring->type, flags);
+ if (ret)
+ return -ENOMEM;
+
+ if (ring->type == TYPE_STREAM)
+ ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
+ ring, first, last, flags);
+ if (ret) {
+ struct xhci_segment *next;
+ do {
+ next = first->next;
+ xhci_segment_free(xhci, first);
+ if (first == last)
+ break;
+ first = next;
+ } while (true);
+ return ret;
+ }
+
+ xhci_link_rings(xhci, ring, first, last, num_segs);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
+ "ring expansion succeed, now has %d segments",
+ ring->num_segs);
+
+ return 0;
+}
+
#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
int type, gfp_t flags)
{
- struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
+ struct xhci_container_ctx *ctx;
+
+ if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
+ return NULL;
+
+ ctx = kzalloc(sizeof(*ctx), flags);
if (!ctx)
return NULL;
- BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
ctx->type = type;
ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
if (type == XHCI_CTX_TYPE_INPUT)
ctx->size += CTX_SIZE(xhci->hcc_params);
ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
+ if (!ctx->bytes) {
+ kfree(ctx);
+ return NULL;
+ }
memset(ctx->bytes, 0, ctx->size);
return ctx;
}
@@ -277,7 +538,9 @@ static void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
- BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
+ if (ctx->type != XHCI_CTX_TYPE_INPUT)
+ return NULL;
+
return (struct xhci_input_control_ctx *)ctx->bytes;
}
@@ -307,17 +570,17 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
/***************** Streams structures manipulation *************************/
-void xhci_free_stream_ctx(struct xhci_hcd *xhci,
+static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
- if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
- pci_free_consistent(pdev,
- sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
+ if (size > MEDIUM_STREAM_ARRAY_SIZE)
+ dma_free_coherent(dev, size,
stream_ctx, dma);
- else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
+ else if (size <= SMALL_STREAM_ARRAY_SIZE)
return dma_pool_free(xhci->small_streams_pool,
stream_ctx, dma);
else
@@ -335,17 +598,17 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci,
* The stream context array must be a power of 2, and can be as small as
* 64 bytes or as large as 1MB.
*/
-struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
+static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs, dma_addr_t *dma,
gfp_t mem_flags)
{
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
- if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
- return pci_alloc_consistent(pdev,
- sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
- dma);
- else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
+ if (size > MEDIUM_STREAM_ARRAY_SIZE)
+ return dma_alloc_coherent(dev, size,
+ dma, mem_flags);
+ else if (size <= SMALL_STREAM_ARRAY_SIZE)
return dma_pool_alloc(xhci->small_streams_pool,
mem_flags, dma);
else
@@ -359,21 +622,10 @@ struct xhci_ring *xhci_dma_to_transfer_ring(
{
if (ep->ep_state & EP_HAS_STREAMS)
return radix_tree_lookup(&ep->stream_info->trb_address_map,
- address >> SEGMENT_SHIFT);
+ address >> TRB_SEGMENT_SHIFT);
return ep->ring;
}
-/* Only use this when you know stream_info is valid */
-#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-static struct xhci_ring *dma_to_stream_ring(
- struct xhci_stream_info *stream_info,
- u64 address)
-{
- return radix_tree_lookup(&stream_info->trb_address_map,
- address >> SEGMENT_SHIFT);
-}
-#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
-
struct xhci_ring *xhci_stream_id_to_ring(
struct xhci_virt_device *dev,
unsigned int ep_index,
@@ -391,58 +643,6 @@ struct xhci_ring *xhci_stream_id_to_ring(
return ep->stream_info->stream_rings[stream_id];
}
-#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-static int xhci_test_radix_tree(struct xhci_hcd *xhci,
- unsigned int num_streams,
- struct xhci_stream_info *stream_info)
-{
- u32 cur_stream;
- struct xhci_ring *cur_ring;
- u64 addr;
-
- for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
- struct xhci_ring *mapped_ring;
- int trb_size = sizeof(union xhci_trb);
-
- cur_ring = stream_info->stream_rings[cur_stream];
- for (addr = cur_ring->first_seg->dma;
- addr < cur_ring->first_seg->dma + SEGMENT_SIZE;
- addr += trb_size) {
- mapped_ring = dma_to_stream_ring(stream_info, addr);
- if (cur_ring != mapped_ring) {
- xhci_warn(xhci, "WARN: DMA address 0x%08llx "
- "didn't map to stream ID %u; "
- "mapped to ring %p\n",
- (unsigned long long) addr,
- cur_stream,
- mapped_ring);
- return -EINVAL;
- }
- }
- /* One TRB after the end of the ring segment shouldn't return a
- * pointer to the current ring (although it may be a part of a
- * different ring).
- */
- mapped_ring = dma_to_stream_ring(stream_info, addr);
- if (mapped_ring != cur_ring) {
- /* One TRB before should also fail */
- addr = cur_ring->first_seg->dma - trb_size;
- mapped_ring = dma_to_stream_ring(stream_info, addr);
- }
- if (mapped_ring == cur_ring) {
- xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx "
- "mapped to valid stream ID %u; "
- "mapped ring = %p\n",
- (unsigned long long) addr,
- cur_stream,
- mapped_ring);
- return -EINVAL;
- }
- }
- return 0;
-}
-#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
-
/*
* Change an endpoint's internal structure so it supports stream IDs. The
* number of requested streams includes stream 0, which cannot be used by device
@@ -451,36 +651,6 @@ static int xhci_test_radix_tree(struct xhci_hcd *xhci,
* The number of stream contexts in the stream context array may be bigger than
* the number of streams the driver wants to use. This is because the number of
* stream context array entries must be a power of two.
- *
- * We need a radix tree for mapping physical addresses of TRBs to which stream
- * ID they belong to. We need to do this because the host controller won't tell
- * us which stream ring the TRB came from. We could store the stream ID in an
- * event data TRB, but that doesn't help us for the cancellation case, since the
- * endpoint may stop before it reaches that event data TRB.
- *
- * The radix tree maps the upper portion of the TRB DMA address to a ring
- * segment that has the same upper portion of DMA addresses. For example, say I
- * have segments of size 1KB, that are always 64-byte aligned. A segment may
- * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
- * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
- * pass the radix tree a key to get the right stream ID:
- *
- * 0x10c90fff >> 10 = 0x43243
- * 0x10c912c0 >> 10 = 0x43244
- * 0x10c91400 >> 10 = 0x43245
- *
- * Obviously, only those TRBs with DMA addresses that are within the segment
- * will make the radix tree return the stream ID for that ring.
- *
- * Caveats for the radix tree:
- *
- * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
- * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
- * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
- * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
- * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
- * extended systems (where the DMA address can be bigger than 32-bits),
- * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
*/
struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
@@ -489,7 +659,6 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
struct xhci_stream_info *stream_info;
u32 cur_stream;
struct xhci_ring *cur_ring;
- unsigned long key;
u64 addr;
int ret;
@@ -539,23 +708,22 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
*/
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
stream_info->stream_rings[cur_stream] =
- xhci_ring_alloc(xhci, 1, true, mem_flags);
+ xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags);
cur_ring = stream_info->stream_rings[cur_stream];
if (!cur_ring)
goto cleanup_rings;
cur_ring->stream_id = cur_stream;
+ cur_ring->trb_address_map = &stream_info->trb_address_map;
/* Set deq ptr, cycle bit, and stream context type */
addr = cur_ring->first_seg->dma |
SCT_FOR_CTX(SCT_PRI_TR) |
cur_ring->cycle_state;
- stream_info->stream_ctx_array[cur_stream].stream_ring = addr;
+ stream_info->stream_ctx_array[cur_stream].stream_ring =
+ cpu_to_le64(addr);
xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
cur_stream, (unsigned long long) addr);
- key = (unsigned long)
- (cur_ring->first_seg->dma >> SEGMENT_SHIFT);
- ret = radix_tree_insert(&stream_info->trb_address_map,
- key, cur_ring);
+ ret = xhci_update_stream_mapping(cur_ring, mem_flags);
if (ret) {
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
@@ -568,13 +736,6 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
* was any other way, the host controller would assume the ring is
* "empty" and wait forever for data to be queued to that stream ID).
*/
-#if XHCI_DEBUG
- /* Do a little test on the radix tree to make sure it returns the
- * correct values.
- */
- if (xhci_test_radix_tree(xhci, num_streams, stream_info))
- goto cleanup_rings;
-#endif
return stream_info;
@@ -582,9 +743,6 @@ cleanup_rings:
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
if (cur_ring) {
- addr = cur_ring->first_seg->dma;
- radix_tree_delete(&stream_info->trb_address_map,
- addr >> SEGMENT_SHIFT);
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
@@ -612,12 +770,13 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
* fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
*/
max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
- xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Setting number of stream ctx array entries to %u",
1 << (max_primary_streams + 1));
- ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK;
- ep_ctx->ep_info |= EP_MAXPSTREAMS(max_primary_streams);
- ep_ctx->ep_info |= EP_HAS_LSA;
- ep_ctx->deq = stream_info->ctx_array_dma;
+ ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
+ ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
+ | EP_HAS_LSA);
+ ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
}
/*
@@ -630,10 +789,9 @@ void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep)
{
dma_addr_t addr;
- ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK;
- ep_ctx->ep_info &= ~EP_HAS_LSA;
+ ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
- ep_ctx->deq = addr | ep->ring->cycle_state;
+ ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
}
/* Frees all stream contexts associated with the endpoint,
@@ -645,7 +803,6 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
{
int cur_stream;
struct xhci_ring *cur_ring;
- dma_addr_t addr;
if (!stream_info)
return;
@@ -654,9 +811,6 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
if (cur_ring) {
- addr = cur_ring->first_seg->dma;
- radix_tree_delete(&stream_info->trb_address_map,
- addr >> SEGMENT_SHIFT);
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
@@ -669,8 +823,7 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
stream_info->stream_ctx_array,
stream_info->ctx_array_dma);
- if (stream_info)
- kfree(stream_info->stream_rings);
+ kfree(stream_info->stream_rings);
kfree(stream_info);
}
@@ -686,11 +839,84 @@ static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
ep->xhci = xhci;
}
-/* All the xhci_tds in the ring's TD list should be freed at this point */
+static void xhci_free_tt_info(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int slot_id)
+{
+ struct list_head *tt_list_head;
+ struct xhci_tt_bw_info *tt_info, *next;
+ bool slot_found = false;
+
+ /* If the device never made it past the Set Address stage,
+ * it may not have the real_port set correctly.
+ */
+ if (virt_dev->real_port == 0 ||
+ virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
+ xhci_dbg(xhci, "Bad real port.\n");
+ return;
+ }
+
+ tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
+ list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
+ /* Multi-TT hubs will have more than one entry */
+ if (tt_info->slot_id == slot_id) {
+ slot_found = true;
+ list_del(&tt_info->tt_list);
+ kfree(tt_info);
+ } else if (slot_found) {
+ break;
+ }
+ }
+}
+
+int xhci_alloc_tt_info(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags)
+{
+ struct xhci_tt_bw_info *tt_info;
+ unsigned int num_ports;
+ int i, j;
+
+ if (!tt->multi)
+ num_ports = 1;
+ else
+ num_ports = hdev->maxchild;
+
+ for (i = 0; i < num_ports; i++, tt_info++) {
+ struct xhci_interval_bw_table *bw_table;
+
+ tt_info = kzalloc(sizeof(*tt_info), mem_flags);
+ if (!tt_info)
+ goto free_tts;
+ INIT_LIST_HEAD(&tt_info->tt_list);
+ list_add(&tt_info->tt_list,
+ &xhci->rh_bw[virt_dev->real_port - 1].tts);
+ tt_info->slot_id = virt_dev->udev->slot_id;
+ if (tt->multi)
+ tt_info->ttport = i+1;
+ bw_table = &tt_info->bw_table;
+ for (j = 0; j < XHCI_MAX_INTERVAL; j++)
+ INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
+ }
+ return 0;
+
+free_tts:
+ xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
+ return -ENOMEM;
+}
+
+
+/* All the xhci_tds in the ring's TD list should be freed at this point.
+ * Should be called with xhci->lock held if there is any chance the TT lists
+ * will be manipulated by the configure endpoint, allocate device, or update
+ * hub functions while this function is removing the TT entries from the list.
+ */
void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
{
struct xhci_virt_device *dev;
int i;
+ int old_active_eps = 0;
/* Slot ID 0 is reserved */
if (slot_id == 0 || !xhci->devs[slot_id])
@@ -701,13 +927,29 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
if (!dev)
return;
+ if (dev->tt_info)
+ old_active_eps = dev->tt_info->active_eps;
+
for (i = 0; i < 31; ++i) {
if (dev->eps[i].ring)
xhci_ring_free(xhci, dev->eps[i].ring);
if (dev->eps[i].stream_info)
xhci_free_stream_info(xhci,
dev->eps[i].stream_info);
+ /* Endpoints on the TT/root port lists should have been removed
+ * when usb_disable_device() was called for the device.
+ * We can't drop them anyway, because the udev might have gone
+ * away by this point, and we can't tell what speed it was.
+ */
+ if (!list_empty(&dev->eps[i].bw_endpoint_list))
+ xhci_warn(xhci, "Slot %u endpoint %u "
+ "not removed from BW list!\n",
+ slot_id, i);
}
+ /* If this is a hub, free the TT(s) from the TT list */
+ xhci_free_tt_info(xhci, dev, slot_id);
+ /* If necessary, update the number of active TTs on this root port */
+ xhci_update_tt_active_eps(xhci, dev, old_active_eps);
if (dev->ring_cache) {
for (i = 0; i < dev->num_rings_cached; i++)
@@ -761,10 +1003,11 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
for (i = 0; i < 31; i++) {
xhci_init_endpoint_timer(xhci, &dev->eps[i]);
INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
+ INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
}
/* Allocate endpoint 0 ring */
- dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
+ dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags);
if (!dev->eps[0].ring)
goto fail;
@@ -777,15 +1020,14 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
dev->num_rings_cached = 0;
init_completion(&dev->cmd_completion);
- INIT_LIST_HEAD(&dev->cmd_list);
dev->udev = udev;
/* Point to output device context in dcbaa. */
- xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
+ xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
- slot_id,
- &xhci->dcbaa->dev_context_ptrs[slot_id],
- (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]);
+ slot_id,
+ &xhci->dcbaa->dev_context_ptrs[slot_id],
+ le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
return 1;
fail:
@@ -810,8 +1052,38 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
* configured device has reset, so all control transfers should have
* been completed or cancelled before the reset.
*/
- ep0_ctx->deq = xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue);
- ep0_ctx->deq |= ep_ring->cycle_state;
+ ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
+ ep_ring->enqueue)
+ | ep_ring->cycle_state);
+}
+
+/*
+ * The xHCI roothub may have ports of differing speeds in any order in the port
+ * status registers. xhci->port_array provides an array of the port speed for
+ * each offset into the port status registers.
+ *
+ * The xHCI hardware wants to know the roothub port number that the USB device
+ * is attached to (or the roothub port its ancestor hub is attached to). All we
+ * know is the index of that port under either the USB 2.0 or the USB 3.0
+ * roothub, but that doesn't give us the real index into the HW port status
+ * registers. Call xhci_find_raw_port_number() to get real index.
+ */
+static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
+ struct usb_device *udev)
+{
+ struct usb_device *top_dev;
+ struct usb_hcd *hcd;
+
+ if (udev->speed == USB_SPEED_SUPER)
+ hcd = xhci->shared_hcd;
+ else
+ hcd = xhci->main_hcd;
+
+ for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
+ top_dev = top_dev->parent)
+ /* Found device below root hub */;
+
+ return xhci_find_raw_port_number(hcd, top_dev->portnum);
}
/* Setup an xHCI virtual device for a Set Address command */
@@ -819,9 +1091,10 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
{
struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep0_ctx;
- struct usb_device *top_dev;
struct xhci_slot_ctx *slot_ctx;
- struct xhci_input_control_ctx *ctrl_ctx;
+ u32 port_num;
+ u32 max_packets;
+ struct usb_device *top_dev;
dev = xhci->devs[udev->slot_id];
/* Slot ID 0 is reserved */
@@ -831,28 +1104,27 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
return -EINVAL;
}
ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
- ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
- /* 2) New slot context and endpoint 0 context are valid*/
- ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
-
/* 3) Only the control endpoint is valid - one endpoint context */
- slot_ctx->dev_info |= LAST_CTX(1);
-
- slot_ctx->dev_info |= (u32) udev->route;
+ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
switch (udev->speed) {
case USB_SPEED_SUPER:
- slot_ctx->dev_info |= (u32) SLOT_SPEED_SS;
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
+ max_packets = MAX_PACKET(512);
break;
case USB_SPEED_HIGH:
- slot_ctx->dev_info |= (u32) SLOT_SPEED_HS;
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
+ max_packets = MAX_PACKET(64);
break;
+ /* USB core guesses at a 64-byte max packet first for FS devices */
case USB_SPEED_FULL:
- slot_ctx->dev_info |= (u32) SLOT_SPEED_FS;
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
+ max_packets = MAX_PACKET(64);
break;
case USB_SPEED_LOW:
- slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
+ max_packets = MAX_PACKET(8);
break;
case USB_SPEED_WIRELESS:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
@@ -860,66 +1132,147 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
break;
default:
/* Speed was set earlier, this shouldn't happen. */
- BUG();
+ return -EINVAL;
}
/* Find the root hub port this device is under */
+ port_num = xhci_find_real_port_number(xhci, udev);
+ if (!port_num)
+ return -EINVAL;
+ slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
+ /* Set the port number in the virtual_device to the faked port number */
for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
top_dev = top_dev->parent)
/* Found device below root hub */;
- slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
- dev->port = top_dev->portnum;
- xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
-
- /* Is this a LS/FS device under a HS hub? */
- if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
- udev->tt) {
- slot_ctx->tt_info = udev->tt->hub->slot_id;
- slot_ctx->tt_info |= udev->ttport << 8;
+ dev->fake_port = top_dev->portnum;
+ dev->real_port = port_num;
+ xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
+ xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
+
+ /* Find the right bandwidth table that this device will be a part of.
+ * If this is a full speed device attached directly to a root port (or a
+ * decendent of one), it counts as a primary bandwidth domain, not a
+ * secondary bandwidth domain under a TT. An xhci_tt_info structure
+ * will never be created for the HS root hub.
+ */
+ if (!udev->tt || !udev->tt->hub->parent) {
+ dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
+ } else {
+ struct xhci_root_port_bw_info *rh_bw;
+ struct xhci_tt_bw_info *tt_bw;
+
+ rh_bw = &xhci->rh_bw[port_num - 1];
+ /* Find the right TT. */
+ list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
+ if (tt_bw->slot_id != udev->tt->hub->slot_id)
+ continue;
+
+ if (!dev->udev->tt->multi ||
+ (udev->tt->multi &&
+ tt_bw->ttport == dev->udev->ttport)) {
+ dev->bw_table = &tt_bw->bw_table;
+ dev->tt_info = tt_bw;
+ break;
+ }
+ }
+ if (!dev->tt_info)
+ xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
+ }
+
+ /* Is this a LS/FS device under an external HS hub? */
+ if (udev->tt && udev->tt->hub->parent) {
+ slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
+ (udev->ttport << 8));
if (udev->tt->multi)
- slot_ctx->dev_info |= DEV_MTT;
+ slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
}
xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
/* Step 4 - ring already allocated */
/* Step 5 */
- ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
- /*
- * XXX: Not sure about wireless USB devices.
- */
- switch (udev->speed) {
- case USB_SPEED_SUPER:
- ep0_ctx->ep_info2 |= MAX_PACKET(512);
- break;
- case USB_SPEED_HIGH:
- /* USB core guesses at a 64-byte max packet first for FS devices */
- case USB_SPEED_FULL:
- ep0_ctx->ep_info2 |= MAX_PACKET(64);
- break;
- case USB_SPEED_LOW:
- ep0_ctx->ep_info2 |= MAX_PACKET(8);
- break;
- case USB_SPEED_WIRELESS:
- xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
- return -EINVAL;
- break;
- default:
- /* New speed? */
- BUG();
- }
+ ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
+
/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
- ep0_ctx->ep_info2 |= MAX_BURST(0);
- ep0_ctx->ep_info2 |= ERROR_COUNT(3);
+ ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
+ max_packets);
- ep0_ctx->deq =
- dev->eps[0].ring->first_seg->dma;
- ep0_ctx->deq |= dev->eps[0].ring->cycle_state;
+ ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
+ dev->eps[0].ring->cycle_state);
/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
return 0;
}
+/*
+ * Convert interval expressed as 2^(bInterval - 1) == interval into
+ * straight exponent value 2^n == interval.
+ *
+ */
+static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ unsigned int interval;
+
+ interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
+ if (interval != ep->desc.bInterval - 1)
+ dev_warn(&udev->dev,
+ "ep %#x - rounding interval to %d %sframes\n",
+ ep->desc.bEndpointAddress,
+ 1 << interval,
+ udev->speed == USB_SPEED_FULL ? "" : "micro");
+
+ if (udev->speed == USB_SPEED_FULL) {
+ /*
+ * Full speed isoc endpoints specify interval in frames,
+ * not microframes. We are using microframes everywhere,
+ * so adjust accordingly.
+ */
+ interval += 3; /* 1 frame = 2^3 uframes */
+ }
+
+ return interval;
+}
+
+/*
+ * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
+ * microframes, rounded down to nearest power of 2.
+ */
+static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
+ struct usb_host_endpoint *ep, unsigned int desc_interval,
+ unsigned int min_exponent, unsigned int max_exponent)
+{
+ unsigned int interval;
+
+ interval = fls(desc_interval) - 1;
+ interval = clamp_val(interval, min_exponent, max_exponent);
+ if ((1 << interval) != desc_interval)
+ dev_warn(&udev->dev,
+ "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
+ ep->desc.bEndpointAddress,
+ 1 << interval,
+ desc_interval);
+
+ return interval;
+}
+
+static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ if (ep->desc.bInterval == 0)
+ return 0;
+ return xhci_microframes_to_exponent(udev, ep,
+ ep->desc.bInterval, 0, 15);
+}
+
+
+static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ return xhci_microframes_to_exponent(udev, ep,
+ ep->desc.bInterval * 8, 3, 10);
+}
+
/* Return the polling or NAK interval.
*
* The polling interval is expressed in "microframes". If xHCI's Interval field
@@ -928,7 +1281,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
* The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
* is set to 0.
*/
-static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
+static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
unsigned int interval = 0;
@@ -937,45 +1290,38 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
case USB_SPEED_HIGH:
/* Max NAK rate */
if (usb_endpoint_xfer_control(&ep->desc) ||
- usb_endpoint_xfer_bulk(&ep->desc))
- interval = ep->desc.bInterval;
+ usb_endpoint_xfer_bulk(&ep->desc)) {
+ interval = xhci_parse_microframe_interval(udev, ep);
+ break;
+ }
/* Fall through - SS and HS isoc/int have same decoding */
+
case USB_SPEED_SUPER:
if (usb_endpoint_xfer_int(&ep->desc) ||
- usb_endpoint_xfer_isoc(&ep->desc)) {
- if (ep->desc.bInterval == 0)
- interval = 0;
- else
- interval = ep->desc.bInterval - 1;
- if (interval > 15)
- interval = 15;
- if (interval != ep->desc.bInterval + 1)
- dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
- ep->desc.bEndpointAddress, 1 << interval);
+ usb_endpoint_xfer_isoc(&ep->desc)) {
+ interval = xhci_parse_exponent_interval(udev, ep);
}
break;
- /* Convert bInterval (in 1-255 frames) to microframes and round down to
- * nearest power of 2.
- */
+
case USB_SPEED_FULL:
+ if (usb_endpoint_xfer_isoc(&ep->desc)) {
+ interval = xhci_parse_exponent_interval(udev, ep);
+ break;
+ }
+ /*
+ * Fall through for interrupt endpoint interval decoding
+ * since it uses the same rules as low speed interrupt
+ * endpoints.
+ */
+
case USB_SPEED_LOW:
if (usb_endpoint_xfer_int(&ep->desc) ||
- usb_endpoint_xfer_isoc(&ep->desc)) {
- interval = fls(8*ep->desc.bInterval) - 1;
- if (interval > 10)
- interval = 10;
- if (interval < 3)
- interval = 3;
- if ((1 << interval) != 8*ep->desc.bInterval)
- dev_warn(&udev->dev,
- "ep %#x - rounding interval"
- " to %d microframes, "
- "ep desc says %d microframes\n",
- ep->desc.bEndpointAddress,
- 1 << interval,
- 8*ep->desc.bInterval);
+ usb_endpoint_xfer_isoc(&ep->desc)) {
+
+ interval = xhci_parse_frame_interval(udev, ep);
}
break;
+
default:
BUG();
}
@@ -987,7 +1333,7 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
* transaction opportunities per microframe", but that goes in the Max Burst
* endpoint context field.
*/
-static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
+static u32 xhci_get_endpoint_mult(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
if (udev->speed != USB_SPEED_SUPER ||
@@ -996,7 +1342,7 @@ static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
return ep->ss_ep_comp.bmAttributes;
}
-static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
+static u32 xhci_get_endpoint_type(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
int in;
@@ -1021,7 +1367,7 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
else
type = EP_TYPE(INT_OUT_EP);
} else {
- BUG();
+ type = 0;
}
return type;
}
@@ -1030,7 +1376,7 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
* Basically, this is the maxpacket size, multiplied by the burst size
* and mult size.
*/
-static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
+static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
struct usb_device *udev,
struct usb_host_endpoint *ep)
{
@@ -1043,10 +1389,10 @@ static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
return 0;
if (udev->speed == USB_SPEED_SUPER)
- return ep->ss_ep_comp.wBytesPerInterval;
+ return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
- max_packet = ep->desc.wMaxPacketSize & 0x3ff;
- max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
+ max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
+ max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
/* A 0 in max burst means 1 transfer per ESIT */
return max_packet * (max_burst + 1);
}
@@ -1065,24 +1411,22 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring;
unsigned int max_packet;
unsigned int max_burst;
+ enum xhci_ring_type type;
u32 max_esit_payload;
+ u32 endpoint_type;
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+ endpoint_type = xhci_get_endpoint_type(udev, ep);
+ if (!endpoint_type)
+ return -EINVAL;
+ ep_ctx->ep_info2 = cpu_to_le32(endpoint_type);
+
+ type = usb_endpoint_type(&ep->desc);
/* Set up the endpoint ring */
- /*
- * Isochronous endpoint ring needs bigger size because one isoc URB
- * carries multiple packets and it will insert multiple tds to the
- * ring.
- * This should be replaced with dynamic ring resizing in the future.
- */
- if (usb_endpoint_xfer_isoc(&ep->desc))
- virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 8, true, mem_flags);
- else
- virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 1, true, mem_flags);
+ virt_dev->eps[ep_index].new_ring =
+ xhci_ring_alloc(xhci, 2, 1, type, mem_flags);
if (!virt_dev->eps[ep_index].new_ring) {
/* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0)
@@ -1091,58 +1435,57 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
virt_dev->ring_cache[virt_dev->num_rings_cached];
virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
virt_dev->num_rings_cached--;
- xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
+ xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
+ 1, type);
}
virt_dev->eps[ep_index].skip = false;
ep_ring = virt_dev->eps[ep_index].new_ring;
- ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
+ ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
- ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
- ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep));
+ ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
+ | EP_MULT(xhci_get_endpoint_mult(udev, ep)));
/* FIXME dig Mult and streams info out of ep companion desc */
/* Allow 3 retries for everything but isoc;
- * error count = 0 means infinite retries.
+ * CErr shall be set to 0 for Isoch endpoints.
*/
if (!usb_endpoint_xfer_isoc(&ep->desc))
- ep_ctx->ep_info2 = ERROR_COUNT(3);
+ ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(3));
else
- ep_ctx->ep_info2 = ERROR_COUNT(1);
-
- ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
+ ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(0));
/* Set the max packet size and max burst */
+ max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
+ max_burst = 0;
switch (udev->speed) {
case USB_SPEED_SUPER:
- max_packet = ep->desc.wMaxPacketSize;
- ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
/* dig out max burst from ep companion desc */
- max_packet = ep->ss_ep_comp.bMaxBurst;
- if (!max_packet)
- xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n");
- ep_ctx->ep_info2 |= MAX_BURST(max_packet);
+ max_burst = ep->ss_ep_comp.bMaxBurst;
break;
case USB_SPEED_HIGH:
+ /* Some devices get this wrong */
+ if (usb_endpoint_xfer_bulk(&ep->desc))
+ max_packet = 512;
/* bits 11:12 specify the number of additional transaction
* opportunities per microframe (USB 2.0, section 9.6.6)
*/
if (usb_endpoint_xfer_isoc(&ep->desc) ||
usb_endpoint_xfer_int(&ep->desc)) {
- max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
- ep_ctx->ep_info2 |= MAX_BURST(max_burst);
+ max_burst = (usb_endpoint_maxp(&ep->desc)
+ & 0x1800) >> 11;
}
- /* Fall through */
+ break;
case USB_SPEED_FULL:
case USB_SPEED_LOW:
- max_packet = ep->desc.wMaxPacketSize & 0x3ff;
- ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
break;
default:
BUG();
}
+ ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
+ MAX_BURST(max_burst));
max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
- ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload);
+ ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
/*
* XXX no idea how to calculate the average TRB buffer length for bulk
@@ -1158,8 +1501,15 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
* including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
* use Event Data TRBs, and we don't chain in a link TRB on short
* transfers, we're basically dividing by 1.
+ *
+ * xHCI 1.0 specification indicates that the Average TRB Length should
+ * be set to 8 for control endpoints.
*/
- ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload);
+ if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
+ ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
+ else
+ ep_ctx->tx_info |=
+ cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
/* FIXME Debug endpoint context */
return 0;
@@ -1184,6 +1534,70 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
*/
}
+void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
+{
+ bw_info->ep_interval = 0;
+ bw_info->mult = 0;
+ bw_info->num_packets = 0;
+ bw_info->max_packet_size = 0;
+ bw_info->type = 0;
+ bw_info->max_esit_payload = 0;
+}
+
+void xhci_update_bw_info(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx,
+ struct xhci_input_control_ctx *ctrl_ctx,
+ struct xhci_virt_device *virt_dev)
+{
+ struct xhci_bw_info *bw_info;
+ struct xhci_ep_ctx *ep_ctx;
+ unsigned int ep_type;
+ int i;
+
+ for (i = 1; i < 31; ++i) {
+ bw_info = &virt_dev->eps[i].bw_info;
+
+ /* We can't tell what endpoint type is being dropped, but
+ * unconditionally clearing the bandwidth info for non-periodic
+ * endpoints should be harmless because the info will never be
+ * set in the first place.
+ */
+ if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
+ /* Dropped endpoint */
+ xhci_clear_endpoint_bw_info(bw_info);
+ continue;
+ }
+
+ if (EP_IS_ADDED(ctrl_ctx, i)) {
+ ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
+ ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
+
+ /* Ignore non-periodic endpoints */
+ if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
+ ep_type != ISOC_IN_EP &&
+ ep_type != INT_IN_EP)
+ continue;
+
+ /* Added or changed endpoint */
+ bw_info->ep_interval = CTX_TO_EP_INTERVAL(
+ le32_to_cpu(ep_ctx->ep_info));
+ /* Number of packets and mult are zero-based in the
+ * input context, but we want one-based for the
+ * interval table.
+ */
+ bw_info->mult = CTX_TO_EP_MULT(
+ le32_to_cpu(ep_ctx->ep_info)) + 1;
+ bw_info->num_packets = CTX_TO_MAX_BURST(
+ le32_to_cpu(ep_ctx->ep_info2)) + 1;
+ bw_info->max_packet_size = MAX_PACKET_DECODED(
+ le32_to_cpu(ep_ctx->ep_info2));
+ bw_info->type = ep_type;
+ bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
+ le32_to_cpu(ep_ctx->tx_info));
+ }
+ }
+}
+
/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
* Useful when you want to change one particular aspect of the endpoint and then
* issue a configure endpoint command.
@@ -1233,7 +1647,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
struct device *dev = xhci_to_hcd(xhci)->self.controller;
int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
- xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Allocating %d scratchpad buffers", num_sp);
if (!num_sp)
return 0;
@@ -1242,10 +1657,9 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
if (!xhci->scratchpad)
goto fail_sp;
- xhci->scratchpad->sp_array =
- pci_alloc_consistent(to_pci_dev(dev),
+ xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
num_sp * sizeof(u64),
- &xhci->scratchpad->sp_dma);
+ &xhci->scratchpad->sp_dma, flags);
if (!xhci->scratchpad->sp_array)
goto fail_sp2;
@@ -1259,11 +1673,11 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
if (!xhci->scratchpad->sp_dma_buffers)
goto fail_sp4;
- xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma;
+ xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
for (i = 0; i < num_sp; i++) {
dma_addr_t dma;
- void *buf = pci_alloc_consistent(to_pci_dev(dev),
- xhci->page_size, &dma);
+ void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
+ flags);
if (!buf)
goto fail_sp5;
@@ -1276,7 +1690,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
fail_sp5:
for (i = i - 1; i >= 0; i--) {
- pci_free_consistent(to_pci_dev(dev), xhci->page_size,
+ dma_free_coherent(dev, xhci->page_size,
xhci->scratchpad->sp_buffers[i],
xhci->scratchpad->sp_dma_buffers[i]);
}
@@ -1286,7 +1700,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
kfree(xhci->scratchpad->sp_buffers);
fail_sp3:
- pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64),
+ dma_free_coherent(dev, num_sp * sizeof(u64),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
@@ -1302,7 +1716,7 @@ static void scratchpad_free(struct xhci_hcd *xhci)
{
int num_sp;
int i;
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
if (!xhci->scratchpad)
return;
@@ -1310,13 +1724,13 @@ static void scratchpad_free(struct xhci_hcd *xhci)
num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
for (i = 0; i < num_sp; i++) {
- pci_free_consistent(pdev, xhci->page_size,
+ dma_free_coherent(dev, xhci->page_size,
xhci->scratchpad->sp_buffers[i],
xhci->scratchpad->sp_dma_buffers[i]);
}
kfree(xhci->scratchpad->sp_dma_buffers);
kfree(xhci->scratchpad->sp_buffers);
- pci_free_consistent(pdev, num_sp * sizeof(u64),
+ dma_free_coherent(dev, num_sp * sizeof(u64),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
kfree(xhci->scratchpad);
@@ -1361,18 +1775,10 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
{
- int last;
-
- if (!urb_priv)
- return;
-
- last = urb_priv->length - 1;
- if (last >= 0) {
- int i;
- for (i = 0; i <= last; i++)
- kfree(urb_priv->td[i]);
+ if (urb_priv) {
+ kfree(urb_priv->td[0]);
+ kfree(urb_priv);
}
- kfree(urb_priv);
}
void xhci_free_command(struct xhci_hcd *xhci,
@@ -1386,32 +1792,41 @@ void xhci_free_command(struct xhci_hcd *xhci,
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
int size;
- int i;
+ int i, j, num_ports;
+
+ del_timer_sync(&xhci->cmd_timer);
/* Free the Event Ring Segment Table and the actual Event Ring */
- if (xhci->ir_set) {
- xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
- xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
- xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
- }
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
if (xhci->erst.entries)
- pci_free_consistent(pdev, size,
+ dma_free_coherent(dev, size,
xhci->erst.entries, xhci->erst.erst_dma_addr);
xhci->erst.entries = NULL;
- xhci_dbg(xhci, "Freed ERST\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
if (xhci->event_ring)
xhci_ring_free(xhci, xhci->event_ring);
xhci->event_ring = NULL;
- xhci_dbg(xhci, "Freed event ring\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
- xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
+ if (xhci->lpm_command)
+ xhci_free_command(xhci, xhci->lpm_command);
if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL;
- xhci_dbg(xhci, "Freed command ring\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
+ xhci_cleanup_command_queue(xhci);
+
+ num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ for (i = 0; i < num_ports; i++) {
+ struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
+ for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
+ struct list_head *ep = &bwt->interval_bw[j].endpoints;
+ while (!list_empty(ep))
+ list_del_init(ep->next);
+ }
+ }
for (i = 1; i < MAX_HC_SLOTS; ++i)
xhci_free_virt_device(xhci, i);
@@ -1419,33 +1834,58 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
if (xhci->segment_pool)
dma_pool_destroy(xhci->segment_pool);
xhci->segment_pool = NULL;
- xhci_dbg(xhci, "Freed segment pool\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
if (xhci->device_pool)
dma_pool_destroy(xhci->device_pool);
xhci->device_pool = NULL;
- xhci_dbg(xhci, "Freed device context pool\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
if (xhci->small_streams_pool)
dma_pool_destroy(xhci->small_streams_pool);
xhci->small_streams_pool = NULL;
- xhci_dbg(xhci, "Freed small stream array pool\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Freed small stream array pool");
if (xhci->medium_streams_pool)
dma_pool_destroy(xhci->medium_streams_pool);
xhci->medium_streams_pool = NULL;
- xhci_dbg(xhci, "Freed medium stream array pool\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Freed medium stream array pool");
- xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
if (xhci->dcbaa)
- pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
+ dma_free_coherent(dev, sizeof(*xhci->dcbaa),
xhci->dcbaa, xhci->dcbaa->dma);
xhci->dcbaa = NULL;
scratchpad_free(xhci);
+
+ if (!xhci->rh_bw)
+ goto no_bw;
+
+ for (i = 0; i < num_ports; i++) {
+ struct xhci_tt_bw_info *tt, *n;
+ list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
+ list_del(&tt->tt_list);
+ kfree(tt);
+ }
+ }
+
+no_bw:
+ xhci->cmd_ring_reserved_trbs = 0;
+ xhci->num_usb2_ports = 0;
+ xhci->num_usb3_ports = 0;
+ xhci->num_active_eps = 0;
+ kfree(xhci->usb2_ports);
+ kfree(xhci->usb3_ports);
+ kfree(xhci->port_array);
+ kfree(xhci->rh_bw);
+ kfree(xhci->ext_caps);
+
xhci->page_size = 0;
xhci->page_shift = 0;
- xhci->bus_suspended = 0;
+ xhci->bus_state[0].bus_suspended = 0;
+ xhci->bus_state[1].bus_suspended = 0;
}
static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
@@ -1621,12 +2061,253 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
* there might be more events to service.
*/
temp &= ~ERST_EHB;
- xhci_dbg(xhci, "// Write event ring dequeue pointer, "
- "preserving EHB bit\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Write event ring dequeue pointer, "
+ "preserving EHB bit");
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
&xhci->ir_set->erst_dequeue);
}
+static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ __le32 __iomem *addr, u8 major_revision, int max_caps)
+{
+ u32 temp, port_offset, port_count;
+ int i;
+
+ if (major_revision > 0x03) {
+ xhci_warn(xhci, "Ignoring unknown port speed, "
+ "Ext Cap %p, revision = 0x%x\n",
+ addr, major_revision);
+ /* Ignoring port protocol we can't understand. FIXME */
+ return;
+ }
+
+ /* Port offset and count in the third dword, see section 7.2 */
+ temp = readl(addr + 2);
+ port_offset = XHCI_EXT_PORT_OFF(temp);
+ port_count = XHCI_EXT_PORT_COUNT(temp);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Ext Cap %p, port offset = %u, "
+ "count = %u, revision = 0x%x",
+ addr, port_offset, port_count, major_revision);
+ /* Port count includes the current port offset */
+ if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
+ /* WTF? "Valid values are ‘1’ to MaxPorts" */
+ return;
+
+ /* cache usb2 port capabilities */
+ if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
+ xhci->ext_caps[xhci->num_ext_caps++] = temp;
+
+ /* Check the host's USB2 LPM capability */
+ if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
+ (temp & XHCI_L1C)) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xHCI 0.96: support USB2 software lpm");
+ xhci->sw_lpm_support = 1;
+ }
+
+ if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xHCI 1.0: support USB2 software lpm");
+ xhci->sw_lpm_support = 1;
+ if (temp & XHCI_HLC) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xHCI 1.0: support USB2 hardware lpm");
+ xhci->hw_lpm_support = 1;
+ }
+ }
+
+ port_offset--;
+ for (i = port_offset; i < (port_offset + port_count); i++) {
+ /* Duplicate entry. Ignore the port if the revisions differ. */
+ if (xhci->port_array[i] != 0) {
+ xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
+ " port %u\n", addr, i);
+ xhci_warn(xhci, "Port was marked as USB %u, "
+ "duplicated as USB %u\n",
+ xhci->port_array[i], major_revision);
+ /* Only adjust the roothub port counts if we haven't
+ * found a similar duplicate.
+ */
+ if (xhci->port_array[i] != major_revision &&
+ xhci->port_array[i] != DUPLICATE_ENTRY) {
+ if (xhci->port_array[i] == 0x03)
+ xhci->num_usb3_ports--;
+ else
+ xhci->num_usb2_ports--;
+ xhci->port_array[i] = DUPLICATE_ENTRY;
+ }
+ /* FIXME: Should we disable the port? */
+ continue;
+ }
+ xhci->port_array[i] = major_revision;
+ if (major_revision == 0x03)
+ xhci->num_usb3_ports++;
+ else
+ xhci->num_usb2_ports++;
+ }
+ /* FIXME: Should we disable ports not in the Extended Capabilities? */
+}
+
+/*
+ * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
+ * specify what speeds each port is supposed to be. We can't count on the port
+ * speed bits in the PORTSC register being correct until a device is connected,
+ * but we need to set up the two fake roothubs with the correct number of USB
+ * 3.0 and USB 2.0 ports at host controller initialization time.
+ */
+static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
+{
+ __le32 __iomem *addr, *tmp_addr;
+ u32 offset, tmp_offset;
+ unsigned int num_ports;
+ int i, j, port_index;
+ int cap_count = 0;
+
+ addr = &xhci->cap_regs->hcc_params;
+ offset = XHCI_HCC_EXT_CAPS(readl(addr));
+ if (offset == 0) {
+ xhci_err(xhci, "No Extended Capability registers, "
+ "unable to set up roothub.\n");
+ return -ENODEV;
+ }
+
+ num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
+ if (!xhci->port_array)
+ return -ENOMEM;
+
+ xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
+ if (!xhci->rh_bw)
+ return -ENOMEM;
+ for (i = 0; i < num_ports; i++) {
+ struct xhci_interval_bw_table *bw_table;
+
+ INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
+ bw_table = &xhci->rh_bw[i].bw_table;
+ for (j = 0; j < XHCI_MAX_INTERVAL; j++)
+ INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
+ }
+
+ /*
+ * For whatever reason, the first capability offset is from the
+ * capability register base, not from the HCCPARAMS register.
+ * See section 5.3.6 for offset calculation.
+ */
+ addr = &xhci->cap_regs->hc_capbase + offset;
+
+ tmp_addr = addr;
+ tmp_offset = offset;
+
+ /* count extended protocol capability entries for later caching */
+ do {
+ u32 cap_id;
+ cap_id = readl(tmp_addr);
+ if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
+ cap_count++;
+ tmp_offset = XHCI_EXT_CAPS_NEXT(cap_id);
+ tmp_addr += tmp_offset;
+ } while (tmp_offset);
+
+ xhci->ext_caps = kzalloc(sizeof(*xhci->ext_caps) * cap_count, flags);
+ if (!xhci->ext_caps)
+ return -ENOMEM;
+
+ while (1) {
+ u32 cap_id;
+
+ cap_id = readl(addr);
+ if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
+ xhci_add_in_port(xhci, num_ports, addr,
+ (u8) XHCI_EXT_PORT_MAJOR(cap_id),
+ cap_count);
+ offset = XHCI_EXT_CAPS_NEXT(cap_id);
+ if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
+ == num_ports)
+ break;
+ /*
+ * Once you're into the Extended Capabilities, the offset is
+ * always relative to the register holding the offset.
+ */
+ addr += offset;
+ }
+
+ if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
+ xhci_warn(xhci, "No ports on the roothubs?\n");
+ return -ENODEV;
+ }
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Found %u USB 2.0 ports and %u USB 3.0 ports.",
+ xhci->num_usb2_ports, xhci->num_usb3_ports);
+
+ /* Place limits on the number of roothub ports so that the hub
+ * descriptors aren't longer than the USB core will allocate.
+ */
+ if (xhci->num_usb3_ports > 15) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Limiting USB 3.0 roothub ports to 15.");
+ xhci->num_usb3_ports = 15;
+ }
+ if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Limiting USB 2.0 roothub ports to %u.",
+ USB_MAXCHILDREN);
+ xhci->num_usb2_ports = USB_MAXCHILDREN;
+ }
+
+ /*
+ * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
+ * Not sure how the USB core will handle a hub with no ports...
+ */
+ if (xhci->num_usb2_ports) {
+ xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
+ xhci->num_usb2_ports, flags);
+ if (!xhci->usb2_ports)
+ return -ENOMEM;
+
+ port_index = 0;
+ for (i = 0; i < num_ports; i++) {
+ if (xhci->port_array[i] == 0x03 ||
+ xhci->port_array[i] == 0 ||
+ xhci->port_array[i] == DUPLICATE_ENTRY)
+ continue;
+
+ xhci->usb2_ports[port_index] =
+ &xhci->op_regs->port_status_base +
+ NUM_PORT_REGS*i;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "USB 2.0 port at index %u, "
+ "addr = %p", i,
+ xhci->usb2_ports[port_index]);
+ port_index++;
+ if (port_index == xhci->num_usb2_ports)
+ break;
+ }
+ }
+ if (xhci->num_usb3_ports) {
+ xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
+ xhci->num_usb3_ports, flags);
+ if (!xhci->usb3_ports)
+ return -ENOMEM;
+
+ port_index = 0;
+ for (i = 0; i < num_ports; i++)
+ if (xhci->port_array[i] == 0x03) {
+ xhci->usb3_ports[port_index] =
+ &xhci->op_regs->port_status_base +
+ NUM_PORT_REGS*i;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "USB 3.0 port at index %u, "
+ "addr = %p", i,
+ xhci->usb3_ports[port_index]);
+ port_index++;
+ if (port_index == xhci->num_usb3_ports)
+ break;
+ }
+ }
+ return 0;
+}
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
@@ -1635,60 +2316,67 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
unsigned int val, val2;
u64 val_64;
struct xhci_segment *seg;
- u32 page_size;
+ u32 page_size, temp;
int i;
- page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
- xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
+ INIT_LIST_HEAD(&xhci->cmd_list);
+
+ page_size = readl(&xhci->op_regs->page_size);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Supported page size register = 0x%x", page_size);
for (i = 0; i < 16; i++) {
if ((0x1 & page_size) != 0)
break;
page_size = page_size >> 1;
}
if (i < 16)
- xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Supported page size of %iK", (1 << (i+12)) / 1024);
else
xhci_warn(xhci, "WARN: no supported page size\n");
/* Use 4K pages, since that's common and the minimum the HC supports */
xhci->page_shift = 12;
xhci->page_size = 1 << xhci->page_shift;
- xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "HCD page size set to %iK", xhci->page_size / 1024);
/*
* Program the Number of Device Slots Enabled field in the CONFIG
* register with the max value of slots the HC can handle.
*/
- val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
- xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
- (unsigned int) val);
- val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
+ val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// xHC can handle at most %d device slots.", val);
+ val2 = readl(&xhci->op_regs->config_reg);
val |= (val2 & ~HCS_SLOTS_MASK);
- xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
- (unsigned int) val);
- xhci_writel(xhci, val, &xhci->op_regs->config_reg);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Setting Max device slots reg = 0x%x.", val);
+ writel(val, &xhci->op_regs->config_reg);
/*
* Section 5.4.8 - doorbell array must be
* "physically contiguous and 64-byte (cache line) aligned".
*/
- xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev),
- sizeof(*xhci->dcbaa), &dma);
+ xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
+ GFP_KERNEL);
if (!xhci->dcbaa)
goto fail;
memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
xhci->dcbaa->dma = dma;
- xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Device context base array address = 0x%llx (DMA), %p (virt)",
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
/*
* Initialize the ring segment pool. The ring must be a contiguous
* structure comprised of TRBs. The TRBs must be 16 byte aligned,
- * however, the command ring segment needs 64-byte aligned segments,
- * so we pick the greater alignment need.
+ * however, the command ring segment needs 64-byte aligned segments
+ * and our use of dma addresses in the trb_address_map radix tree needs
+ * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
*/
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
- SEGMENT_SIZE, 64, xhci->page_size);
+ TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
/* See Table 46 and Note on Figure 55 */
xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
@@ -1706,18 +2394,19 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
dma_pool_create("xHCI 1KB stream ctx arrays",
dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
/* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
- * will be allocated with pci_alloc_consistent()
+ * will be allocated with dma_alloc_coherent()
*/
if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
goto fail;
/* Set up the command ring to have one segments for now. */
- xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
+ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
if (!xhci->cmd_ring)
goto fail;
- xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
- xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Allocated command ring at %p", xhci->cmd_ring);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
(unsigned long long)xhci->cmd_ring->first_seg->dma);
/* Set the address in the Command Ring Control register */
@@ -1725,42 +2414,58 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state;
- xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Setting command ring address to 0x%x", val);
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
- val = xhci_readl(xhci, &xhci->cap_regs->db_off);
+ xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
+ if (!xhci->lpm_command)
+ goto fail;
+
+ /* Reserve one command ring TRB for disabling LPM.
+ * Since the USB core grabs the shared usb_bus bandwidth mutex before
+ * disabling LPM, we only need to reserve one TRB for all devices.
+ */
+ xhci->cmd_ring_reserved_trbs++;
+
+ val = readl(&xhci->cap_regs->db_off);
val &= DBOFF_MASK;
- xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
- " from cap regs base addr\n", val);
- xhci->dba = (void *) xhci->cap_regs + val;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Doorbell array is located at offset 0x%x"
+ " from cap regs base addr", val);
+ xhci->dba = (void __iomem *) xhci->cap_regs + val;
xhci_dbg_regs(xhci);
xhci_print_run_regs(xhci);
/* Set ir_set to interrupt register set 0 */
- xhci->ir_set = (void *) xhci->run_regs->ir_set;
+ xhci->ir_set = &xhci->run_regs->ir_set[0];
/*
* Event ring setup: Allocate a normal ring, but also setup
* the event ring segment table (ERST). Section 4.9.3.
*/
- xhci_dbg(xhci, "// Allocating event ring\n");
- xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
+ xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
+ flags);
if (!xhci->event_ring)
goto fail;
if (xhci_check_trb_in_td_math(xhci, flags) < 0)
goto fail;
- xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
- sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
+ xhci->erst.entries = dma_alloc_coherent(dev,
+ sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
+ GFP_KERNEL);
if (!xhci->erst.entries)
goto fail;
- xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Allocated event ring segment table at 0x%llx",
(unsigned long long)dma);
memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
xhci->erst.num_entries = ERST_NUM_SEGS;
xhci->erst.erst_dma_addr = dma;
- xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
xhci->erst.num_entries,
xhci->erst.entries,
(unsigned long long)xhci->erst.erst_dma_addr);
@@ -1768,23 +2473,26 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/* set ring base address and size for each segment table entry */
for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
struct xhci_erst_entry *entry = &xhci->erst.entries[val];
- entry->seg_addr = seg->dma;
- entry->seg_size = TRBS_PER_SEGMENT;
+ entry->seg_addr = cpu_to_le64(seg->dma);
+ entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
entry->rsvd = 0;
seg = seg->next;
}
/* set ERST count with the number of entries in the segment table */
- val = xhci_readl(xhci, &xhci->ir_set->erst_size);
+ val = readl(&xhci->ir_set->erst_size);
val &= ERST_SIZE_MASK;
val |= ERST_NUM_SEGS;
- xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Write ERST size = %i to ir_set 0 (some bits preserved)",
val);
- xhci_writel(xhci, val, &xhci->ir_set->erst_size);
+ writel(val, &xhci->ir_set->erst_size);
- xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Set ERST entries to point to event ring.");
/* set the segment table base address */
- xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Set ERST base address for ir_set 0 = 0x%llx",
(unsigned long long)xhci->erst.erst_dma_addr);
val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
val_64 &= ERST_PTR_MASK;
@@ -1793,8 +2501,14 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci);
- xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Wrote ERST address to ir_set 0.");
+ xhci_print_ir_set(xhci, 0);
+
+ /* init command timeout timer */
+ init_timer(&xhci->cmd_timer);
+ xhci->cmd_timer.data = (unsigned long) xhci;
+ xhci->cmd_timer.function = xhci_handle_command_timeout;
/*
* XXX: Might need to set the Interrupter Moderation Register to
@@ -1804,16 +2518,33 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
init_completion(&xhci->addr_dev);
for (i = 0; i < MAX_HC_SLOTS; ++i)
xhci->devs[i] = NULL;
- for (i = 0; i < MAX_HC_PORTS; ++i)
- xhci->resume_done[i] = 0;
+ for (i = 0; i < USB_MAXCHILDREN; ++i) {
+ xhci->bus_state[0].resume_done[i] = 0;
+ xhci->bus_state[1].resume_done[i] = 0;
+ /* Only the USB 2.0 completions will ever be used. */
+ init_completion(&xhci->bus_state[1].rexit_done[i]);
+ }
if (scratchpad_alloc(xhci, flags))
goto fail;
+ if (xhci_setup_port_arrays(xhci, flags))
+ goto fail;
+
+ /* Enable USB 3.0 device notifications for function remote wake, which
+ * is necessary for allowing USB 3.0 devices to do remote wakeup from
+ * U3 (device suspend).
+ */
+ temp = readl(&xhci->op_regs->dev_notification);
+ temp &= ~DEV_NOTE_MASK;
+ temp |= DEV_NOTE_FWAKE;
+ writel(temp, &xhci->op_regs->dev_notification);
return 0;
fail:
xhci_warn(xhci, "Couldn't initialize memory\n");
+ xhci_halt(xhci);
+ xhci_reset(xhci);
xhci_mem_cleanup(xhci);
return -ENOMEM;
}
diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
new file mode 100644
index 00000000000..1eefc988192
--- /dev/null
+++ b/drivers/usb/host/xhci-mvebu.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2014 Marvell
+ * Author: Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/mbus.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "xhci-mvebu.h"
+
+#define USB3_MAX_WINDOWS 4
+#define USB3_WIN_CTRL(w) (0x0 + ((w) * 8))
+#define USB3_WIN_BASE(w) (0x4 + ((w) * 8))
+
+static void xhci_mvebu_mbus_config(void __iomem *base,
+ const struct mbus_dram_target_info *dram)
+{
+ int win;
+
+ /* Clear all existing windows */
+ for (win = 0; win < USB3_MAX_WINDOWS; win++) {
+ writel(0, base + USB3_WIN_CTRL(win));
+ writel(0, base + USB3_WIN_BASE(win));
+ }
+
+ /* Program each DRAM CS in a seperate window */
+ for (win = 0; win < dram->num_cs; win++) {
+ const struct mbus_dram_window *cs = dram->cs + win;
+
+ writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) |
+ (dram->mbus_dram_target_id << 4) | 1,
+ base + USB3_WIN_CTRL(win));
+
+ writel((cs->base & 0xffff0000), base + USB3_WIN_BASE(win));
+ }
+}
+
+int xhci_mvebu_mbus_init_quirk(struct platform_device *pdev)
+{
+ struct resource *res;
+ void __iomem *base;
+ const struct mbus_dram_target_info *dram;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res)
+ return -ENODEV;
+
+ /*
+ * We don't use devm_ioremap() because this mapping should
+ * only exists for the duration of this probe function.
+ */
+ base = ioremap(res->start, resource_size(res));
+ if (!base)
+ return -ENODEV;
+
+ dram = mv_mbus_dram_info();
+ xhci_mvebu_mbus_config(base, dram);
+
+ /*
+ * This memory area was only needed to configure the MBus
+ * windows, and is therefore no longer useful.
+ */
+ iounmap(base);
+
+ return 0;
+}
diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
new file mode 100644
index 00000000000..7ede92aa41f
--- /dev/null
+++ b/drivers/usb/host/xhci-mvebu.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2014 Marvell
+ *
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __LINUX_XHCI_MVEBU_H
+#define __LINUX_XHCI_MVEBU_H
+#if IS_ENABLED(CONFIG_USB_XHCI_MVEBU)
+int xhci_mvebu_mbus_init_quirk(struct platform_device *pdev);
+#else
+static inline int xhci_mvebu_mbus_init_quirk(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+#endif /* __LINUX_XHCI_MVEBU_H */
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index bb668a894ab..e20520f4275 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -21,12 +21,22 @@
*/
#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/module.h>
#include "xhci.h"
+#include "xhci-trace.h"
/* Device for a quirk */
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
+#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400
+
+#define PCI_VENDOR_ID_ETRON 0x1b6f
+#define PCI_DEVICE_ID_ASROCK_P67 0x7023
+
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
static const char hcd_name[] = "xhci_hcd";
@@ -47,94 +57,257 @@ static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev)
return 0;
}
-/* called during probe() after chip reset completes */
-static int xhci_pci_setup(struct usb_hcd *hcd)
+static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- int retval;
- u32 temp;
-
- hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
-
- xhci->cap_regs = hcd->regs;
- xhci->op_regs = hcd->regs +
- HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
- xhci->run_regs = hcd->regs +
- (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
- /* Cache read-only capability registers */
- xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
- xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
- xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
- xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
- xhci->hci_version = HC_VERSION(xhci->hcc_params);
- xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
- xhci_print_registers(xhci);
+ struct pci_dev *pdev = to_pci_dev(dev);
/* Look for vendor-specific quirks */
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
- pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
- pdev->revision == 0x0) {
+ (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK ||
+ pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) {
+ if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
+ pdev->revision == 0x0) {
xhci->quirks |= XHCI_RESET_EP_QUIRK;
- xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure"
- " endpoint cmd after reset endpoint\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "QUIRK: Fresco Logic xHC needs configure"
+ " endpoint cmd after reset endpoint");
+ }
+ if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
+ pdev->revision == 0x4) {
+ xhci->quirks |= XHCI_SLOW_SUSPEND;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "QUIRK: Fresco Logic xHC revision %u"
+ "must be suspended extra slowly",
+ pdev->revision);
+ }
+ /* Fresco Logic confirms: all revisions of this chip do not
+ * support MSI, even though some of them claim to in their PCI
+ * capabilities.
+ */
+ xhci->quirks |= XHCI_BROKEN_MSI;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "QUIRK: Fresco Logic revision %u "
+ "has broken MSI implementation",
+ pdev->revision);
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
+
if (pdev->vendor == PCI_VENDOR_ID_NEC)
xhci->quirks |= XHCI_NEC_HOST;
- /* Make sure the HC is halted. */
- retval = xhci_halt(xhci);
- if (retval)
- return retval;
+ if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96)
+ xhci->quirks |= XHCI_AMD_0x96_HOST;
- xhci_dbg(xhci, "Resetting HCD\n");
- /* Reset the internal HC memory state and registers. */
- retval = xhci_reset(xhci);
- if (retval)
- return retval;
- xhci_dbg(xhci, "Reset complete\n");
-
- temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
- if (HCC_64BIT_ADDR(temp)) {
- xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
- dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
- } else {
- dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
+ /* AMD PLL quirk */
+ if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
+ xhci->quirks |= XHCI_AMD_PLL_FIX;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ xhci->quirks |= XHCI_LPM_SUPPORT;
+ xhci->quirks |= XHCI_INTEL_HOST;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
+ xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
+ xhci->limit_active_eps = 64;
+ xhci->quirks |= XHCI_SW_BW_CHECKING;
+ /*
+ * PPT desktop boards DH77EB and DH77DF will power back on after
+ * a few seconds of being shutdown. The fix for this is to
+ * switch the ports from xHCI to EHCI on shutdown. We can't use
+ * DMI information to find those particular boards (since each
+ * vendor will change the board name), so we have to key off all
+ * PPT chipsets.
+ */
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ xhci->quirks |= XHCI_AVOID_BEI;
}
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) {
+ /* Workaround for occasional spurious wakeups from S5 (or
+ * any other sleep) on Haswell machines with LPT and LPT-LP
+ * with the new Intel BIOS
+ */
+ /* Limit the quirk to only known vendors, as this triggers
+ * yet another BIOS bug on some other machines
+ * https://bugzilla.kernel.org/show_bug.cgi?id=66171
+ */
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
+ xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ pdev->device == 0x0015)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ if (pdev->vendor == PCI_VENDOR_ID_VIA)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "QUIRK: Resetting on resume");
+}
- xhci_dbg(xhci, "Calling HCD init\n");
- /* Initialize HCD and host controller data structures. */
- retval = xhci_init(hcd);
+/* called during probe() after chip reset completes */
+static int xhci_pci_setup(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci;
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ int retval;
+
+ retval = xhci_gen_setup(hcd, xhci_pci_quirks);
if (retval)
return retval;
- xhci_dbg(xhci, "Called HCD init\n");
+
+ xhci = hcd_to_xhci(hcd);
+ if (!usb_hcd_is_primary_hcd(hcd))
+ return 0;
pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn);
xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
/* Find any debug ports */
- return xhci_pci_reinit(xhci, pdev);
+ retval = xhci_pci_reinit(xhci, pdev);
+ if (!retval)
+ return retval;
+
+ kfree(xhci);
+ return retval;
+}
+
+/*
+ * We need to register our own PCI probe function (instead of the USB core's
+ * function) in order to create a second roothub under xHCI.
+ */
+static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int retval;
+ struct xhci_hcd *xhci;
+ struct hc_driver *driver;
+ struct usb_hcd *hcd;
+
+ driver = (struct hc_driver *)id->driver_data;
+
+ /* Prevent runtime suspending between USB-2 and USB-3 initialization */
+ pm_runtime_get_noresume(&dev->dev);
+
+ /* Register the USB 2.0 roothub.
+ * FIXME: USB core must know to register the USB 2.0 roothub first.
+ * This is sort of silly, because we could just set the HCD driver flags
+ * to say USB 2.0, but I'm not sure what the implications would be in
+ * the other parts of the HCD code.
+ */
+ retval = usb_hcd_pci_probe(dev, id);
+
+ if (retval)
+ goto put_runtime_pm;
+
+ /* USB 2.0 roothub is stored in the PCI device now. */
+ hcd = dev_get_drvdata(&dev->dev);
+ xhci = hcd_to_xhci(hcd);
+ xhci->shared_hcd = usb_create_shared_hcd(driver, &dev->dev,
+ pci_name(dev), hcd);
+ if (!xhci->shared_hcd) {
+ retval = -ENOMEM;
+ goto dealloc_usb2_hcd;
+ }
+
+ /* Set the xHCI pointer before xhci_pci_setup() (aka hcd_driver.reset)
+ * is called by usb_add_hcd().
+ */
+ *((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;
+
+ retval = usb_add_hcd(xhci->shared_hcd, dev->irq,
+ IRQF_SHARED);
+ if (retval)
+ goto put_usb3_hcd;
+ /* Roothub already marked as USB 3.0 speed */
+
+ if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ xhci->shared_hcd->can_do_streams = 1;
+
+ /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
+ pm_runtime_put_noidle(&dev->dev);
+
+ return 0;
+
+put_usb3_hcd:
+ usb_put_hcd(xhci->shared_hcd);
+dealloc_usb2_hcd:
+ usb_hcd_pci_remove(dev);
+put_runtime_pm:
+ pm_runtime_put_noidle(&dev->dev);
+ return retval;
+}
+
+static void xhci_pci_remove(struct pci_dev *dev)
+{
+ struct xhci_hcd *xhci;
+
+ xhci = hcd_to_xhci(pci_get_drvdata(dev));
+ if (xhci->shared_hcd) {
+ usb_remove_hcd(xhci->shared_hcd);
+ usb_put_hcd(xhci->shared_hcd);
+ }
+ usb_hcd_pci_remove(dev);
+
+ /* Workaround for spurious wakeups at shutdown with HSW */
+ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+ pci_set_power_state(dev, PCI_D3hot);
+
+ kfree(xhci);
}
#ifdef CONFIG_PM
static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- int retval = 0;
-
- if (hcd->state != HC_STATE_SUSPENDED)
- return -EINVAL;
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- retval = xhci_suspend(xhci);
+ /*
+ * Systems with the TI redriver that loses port status change events
+ * need to have the registers polled during D3, so avoid D3cold.
+ */
+ if (xhci_compliance_mode_recovery_timer_quirk_check())
+ pdev->no_d3cold = true;
- return retval;
+ return xhci_suspend(xhci);
}
static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval = 0;
+ /* The BIOS on systems with the Intel Panther Point chipset may or may
+ * not support xHCI natively. That means that during system resume, it
+ * may switch the ports back to EHCI so that users can use their
+ * keyboard to select a kernel from GRUB after resume from hibernate.
+ *
+ * The BIOS is supposed to remember whether the OS had xHCI ports
+ * enabled before resume, and switch the ports back to xHCI when the
+ * BIOS/OS semaphore is written, but we all know we can't trust BIOS
+ * writers.
+ *
+ * Unconditionally switch the ports back to xHCI after a system resume.
+ * It should not matter whether the EHCI or xHCI controller is
+ * resumed first. It's enough to do the switchover in xHCI because
+ * USB core won't notice anything as the hub driver doesn't start
+ * running again until after all the devices (including both EHCI and
+ * xHCI host controllers) have been resumed.
+ */
+
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ usb_enable_intel_xhci_ports(pdev);
+
retval = xhci_resume(xhci, hibernated);
return retval;
}
@@ -143,13 +316,13 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
static const struct hc_driver xhci_pci_hc_driver = {
.description = hcd_name,
.product_desc = "xHCI Host Controller",
- .hcd_priv_size = sizeof(struct xhci_hcd),
+ .hcd_priv_size = sizeof(struct xhci_hcd *),
/*
* generic hardware linkage
*/
.irq = xhci_irq,
- .flags = HCD_MEMORY | HCD_USB3,
+ .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
/*
* basic lifecycle operations
@@ -178,6 +351,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
.check_bandwidth = xhci_check_bandwidth,
.reset_bandwidth = xhci_reset_bandwidth,
.address_device = xhci_address_device,
+ .enable_device = xhci_enable_device,
.update_hub_device = xhci_update_hub_device,
.reset_device = xhci_discover_or_reset_device,
@@ -191,6 +365,14 @@ static const struct hc_driver xhci_pci_hc_driver = {
.hub_status_data = xhci_hub_status_data,
.bus_suspend = xhci_bus_suspend,
.bus_resume = xhci_bus_resume,
+ /*
+ * call back when device connected and addressed
+ */
+ .update_device = xhci_update_device,
+ .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
+ .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
+ .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
+ .find_raw_port_number = xhci_find_raw_port_number,
};
/*-------------------------------------------------------------------------*/
@@ -210,19 +392,19 @@ static struct pci_driver xhci_pci_driver = {
.name = (char *) hcd_name,
.id_table = pci_ids,
- .probe = usb_hcd_pci_probe,
- .remove = usb_hcd_pci_remove,
+ .probe = xhci_pci_probe,
+ .remove = xhci_pci_remove,
/* suspend and resume implemented later */
.shutdown = usb_hcd_pci_shutdown,
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
.driver = {
.pm = &usb_hcd_pci_pm_ops
},
#endif
};
-int xhci_register_pci(void)
+int __init xhci_register_pci(void)
{
return pci_register_driver(&xhci_pci_driver);
}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
new file mode 100644
index 00000000000..29d8adb5c8d
--- /dev/null
+++ b/drivers/usb/host/xhci-plat.c
@@ -0,0 +1,297 @@
+/*
+ * xhci-plat.c - xHCI host controller driver platform Bus Glue.
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * A lot of code borrowed from the Linux xHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "xhci.h"
+#include "xhci-mvebu.h"
+
+static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
+{
+ /*
+ * As of now platform drivers don't provide MSI support so we ensure
+ * here that the generic code does not try to make a pci_dev from our
+ * dev struct in order to setup MSI
+ */
+ xhci->quirks |= XHCI_PLAT;
+}
+
+/* called during probe() after chip reset completes */
+static int xhci_plat_setup(struct usb_hcd *hcd)
+{
+ return xhci_gen_setup(hcd, xhci_plat_quirks);
+}
+
+static int xhci_plat_start(struct usb_hcd *hcd)
+{
+ return xhci_run(hcd);
+}
+
+static const struct hc_driver xhci_plat_xhci_driver = {
+ .description = "xhci-hcd",
+ .product_desc = "xHCI Host Controller",
+ .hcd_priv_size = sizeof(struct xhci_hcd *),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = xhci_irq,
+ .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = xhci_plat_setup,
+ .start = xhci_plat_start,
+ .stop = xhci_stop,
+ .shutdown = xhci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = xhci_urb_enqueue,
+ .urb_dequeue = xhci_urb_dequeue,
+ .alloc_dev = xhci_alloc_dev,
+ .free_dev = xhci_free_dev,
+ .alloc_streams = xhci_alloc_streams,
+ .free_streams = xhci_free_streams,
+ .add_endpoint = xhci_add_endpoint,
+ .drop_endpoint = xhci_drop_endpoint,
+ .endpoint_reset = xhci_endpoint_reset,
+ .check_bandwidth = xhci_check_bandwidth,
+ .reset_bandwidth = xhci_reset_bandwidth,
+ .address_device = xhci_address_device,
+ .enable_device = xhci_enable_device,
+ .update_hub_device = xhci_update_hub_device,
+ .reset_device = xhci_discover_or_reset_device,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = xhci_get_frame,
+
+ /* Root hub support */
+ .hub_control = xhci_hub_control,
+ .hub_status_data = xhci_hub_status_data,
+ .bus_suspend = xhci_bus_suspend,
+ .bus_resume = xhci_bus_resume,
+};
+
+static int xhci_plat_probe(struct platform_device *pdev)
+{
+ const struct hc_driver *driver;
+ struct xhci_hcd *xhci;
+ struct resource *res;
+ struct usb_hcd *hcd;
+ struct clk *clk;
+ int ret;
+ int irq;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ driver = &xhci_plat_xhci_driver;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "marvell,armada-375-xhci") ||
+ of_device_is_compatible(pdev->dev.of_node,
+ "marvell,armada-380-xhci")) {
+ ret = xhci_mvebu_mbus_init_quirk(pdev);
+ if (ret)
+ return ret;
+ }
+
+ /* Initialize dma_mask and coherent_dma_mask to 32-bits */
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ else
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+
+ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
+ driver->description)) {
+ dev_dbg(&pdev->dev, "controller already in use\n");
+ ret = -EBUSY;
+ goto put_hcd;
+ }
+
+ hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ dev_dbg(&pdev->dev, "error mapping memory\n");
+ ret = -EFAULT;
+ goto release_mem_region;
+ }
+
+ /*
+ * Not all platforms have a clk so it is not an error if the
+ * clock does not exists.
+ */
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk)) {
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto unmap_registers;
+ }
+
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (ret)
+ goto disable_clk;
+
+ device_wakeup_enable(hcd->self.controller);
+
+ /* USB 2.0 roothub is stored in the platform_device now. */
+ hcd = platform_get_drvdata(pdev);
+ xhci = hcd_to_xhci(hcd);
+ xhci->clk = clk;
+ xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev,
+ dev_name(&pdev->dev), hcd);
+ if (!xhci->shared_hcd) {
+ ret = -ENOMEM;
+ goto dealloc_usb2_hcd;
+ }
+
+ /*
+ * Set the xHCI pointer before xhci_plat_setup() (aka hcd_driver.reset)
+ * is called by usb_add_hcd().
+ */
+ *((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;
+
+ if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ xhci->shared_hcd->can_do_streams = 1;
+
+ ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
+ if (ret)
+ goto put_usb3_hcd;
+
+ return 0;
+
+put_usb3_hcd:
+ usb_put_hcd(xhci->shared_hcd);
+
+dealloc_usb2_hcd:
+ usb_remove_hcd(hcd);
+
+disable_clk:
+ if (!IS_ERR(clk))
+ clk_disable_unprepare(clk);
+
+unmap_registers:
+ iounmap(hcd->regs);
+
+release_mem_region:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+
+put_hcd:
+ usb_put_hcd(hcd);
+
+ return ret;
+}
+
+static int xhci_plat_remove(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct clk *clk = xhci->clk;
+
+ usb_remove_hcd(xhci->shared_hcd);
+ usb_put_hcd(xhci->shared_hcd);
+
+ usb_remove_hcd(hcd);
+ if (!IS_ERR(clk))
+ clk_disable_unprepare(clk);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+ kfree(xhci);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int xhci_plat_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ return xhci_suspend(xhci);
+}
+
+static int xhci_plat_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ return xhci_resume(xhci, 0);
+}
+
+static const struct dev_pm_ops xhci_plat_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume)
+};
+#define DEV_PM_OPS (&xhci_plat_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_OF
+static const struct of_device_id usb_xhci_of_match[] = {
+ { .compatible = "generic-xhci" },
+ { .compatible = "xhci-platform" },
+ { .compatible = "marvell,armada-375-xhci"},
+ { .compatible = "marvell,armada-380-xhci"},
+ { },
+};
+MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
+#endif
+
+static struct platform_driver usb_xhci_driver = {
+ .probe = xhci_plat_probe,
+ .remove = xhci_plat_remove,
+ .driver = {
+ .name = "xhci-hcd",
+ .pm = DEV_PM_OPS,
+ .of_match_table = of_match_ptr(usb_xhci_of_match),
+ },
+};
+MODULE_ALIAS("platform:xhci-hcd");
+
+int xhci_register_plat(void)
+{
+ return platform_driver_register(&usb_xhci_driver);
+}
+
+void xhci_unregister_plat(void)
+{
+ platform_driver_unregister(&usb_xhci_driver);
+}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9f3115e729b..749fc68eb5c 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -67,10 +67,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include "xhci.h"
-
-static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
- struct xhci_virt_device *virt_dev,
- struct xhci_event_cmd *event);
+#include "xhci-trace.h"
/*
* Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
@@ -93,33 +90,33 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
/* Does this link TRB point to the first segment in a ring,
* or was the previous TRB the last TRB on the last segment in the ERST?
*/
-static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
+static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_segment *seg, union xhci_trb *trb)
{
if (ring == xhci->event_ring)
return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
(seg->next == xhci->event_ring->first_seg);
else
- return trb->link.control & LINK_TOGGLE;
+ return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
}
/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
* segment? I.e. would the updated event TRB pointer step off the end of the
* event seg?
*/
-static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
+static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_segment *seg, union xhci_trb *trb)
{
if (ring == xhci->event_ring)
return trb == &seg->trbs[TRBS_PER_SEGMENT];
else
- return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
+ return TRB_TYPE_LINK_LE32(trb->link.control);
}
-static inline int enqueue_is_link_trb(struct xhci_ring *ring)
+static int enqueue_is_link_trb(struct xhci_ring *ring)
{
struct xhci_link_trb *link = &ring->enqueue->link;
- return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK));
+ return TRB_TYPE_LINK_LE32(link->control);
}
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
@@ -143,34 +140,36 @@ static void next_trb(struct xhci_hcd *xhci,
* See Cycle bit rules. SW is the consumer for the event ring only.
* Don't make a ring full of link TRBs. That would be dumb and this would loop.
*/
-static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
+static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
- union xhci_trb *next = ++(ring->dequeue);
- unsigned long long addr;
-
ring->deq_updates++;
- /* Update the dequeue pointer further if that was a link TRB or we're at
- * the end of an event ring segment (which doesn't have link TRBS)
+
+ /*
+ * If this is not event ring, and the dequeue pointer
+ * is not on a link TRB, there is one more usable TRB
*/
- while (last_trb(xhci, ring, ring->deq_seg, next)) {
- if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
- ring->cycle_state = (ring->cycle_state ? 0 : 1);
- if (!in_interrupt())
- xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
- ring,
- (unsigned int) ring->cycle_state);
+ if (ring->type != TYPE_EVENT &&
+ !last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
+ ring->num_trbs_free++;
+
+ do {
+ /*
+ * Update the dequeue pointer further if that was a link TRB or
+ * we're at the end of an event ring segment (which doesn't have
+ * link TRBS)
+ */
+ if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
+ if (ring->type == TYPE_EVENT &&
+ last_trb_on_last_seg(xhci, ring,
+ ring->deq_seg, ring->dequeue)) {
+ ring->cycle_state ^= 1;
+ }
+ ring->deq_seg = ring->deq_seg->next;
+ ring->dequeue = ring->deq_seg->trbs;
+ } else {
+ ring->dequeue++;
}
- ring->deq_seg = ring->deq_seg->next;
- ring->dequeue = ring->deq_seg->trbs;
- next = ring->dequeue;
- }
- addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
- if (ring == xhci->event_ring)
- xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
- else if (ring == xhci->cmd_ring)
- xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
- else
- xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
+ } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
}
/*
@@ -191,13 +190,16 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
* prepare_transfer()?
*/
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
- bool consumer, bool more_trbs_coming)
+ bool more_trbs_coming)
{
u32 chain;
union xhci_trb *next;
- unsigned long long addr;
- chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
+ chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
+ /* If this is not event ring, there is one less usable TRB */
+ if (ring->type != TYPE_EVENT &&
+ !last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
+ ring->num_trbs_free--;
next = ++(ring->enqueue);
ring->enq_updates++;
@@ -205,116 +207,110 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
* the end of an event ring segment (which doesn't have link TRBS)
*/
while (last_trb(xhci, ring, ring->enq_seg, next)) {
- if (!consumer) {
- if (ring != xhci->event_ring) {
- /*
- * If the caller doesn't plan on enqueueing more
- * TDs before ringing the doorbell, then we
- * don't want to give the link TRB to the
- * hardware just yet. We'll give the link TRB
- * back in prepare_ring() just before we enqueue
- * the TD at the top of the ring.
- */
- if (!chain && !more_trbs_coming)
- break;
+ if (ring->type != TYPE_EVENT) {
+ /*
+ * If the caller doesn't plan on enqueueing more
+ * TDs before ringing the doorbell, then we
+ * don't want to give the link TRB to the
+ * hardware just yet. We'll give the link TRB
+ * back in prepare_ring() just before we enqueue
+ * the TD at the top of the ring.
+ */
+ if (!chain && !more_trbs_coming)
+ break;
- /* If we're not dealing with 0.95 hardware,
- * carry over the chain bit of the previous TRB
- * (which may mean the chain bit is cleared).
- */
- if (!xhci_link_trb_quirk(xhci)) {
- next->link.control &= ~TRB_CHAIN;
- next->link.control |= chain;
- }
- /* Give this link TRB to the hardware */
- wmb();
- next->link.control ^= TRB_CYCLE;
+ /* If we're not dealing with 0.95 hardware or
+ * isoc rings on AMD 0.96 host,
+ * carry over the chain bit of the previous TRB
+ * (which may mean the chain bit is cleared).
+ */
+ if (!(ring->type == TYPE_ISOC &&
+ (xhci->quirks & XHCI_AMD_0x96_HOST))
+ && !xhci_link_trb_quirk(xhci)) {
+ next->link.control &=
+ cpu_to_le32(~TRB_CHAIN);
+ next->link.control |=
+ cpu_to_le32(chain);
}
+ /* Give this link TRB to the hardware */
+ wmb();
+ next->link.control ^= cpu_to_le32(TRB_CYCLE);
+
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
- if (!in_interrupt())
- xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
- ring,
- (unsigned int) ring->cycle_state);
}
}
ring->enq_seg = ring->enq_seg->next;
ring->enqueue = ring->enq_seg->trbs;
next = ring->enqueue;
}
- addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
- if (ring == xhci->event_ring)
- xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
- else if (ring == xhci->cmd_ring)
- xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
- else
- xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
}
/*
- * Check to see if there's room to enqueue num_trbs on the ring. See rules
- * above.
- * FIXME: this would be simpler and faster if we just kept track of the number
- * of free TRBs in a ring.
+ * Check to see if there's room to enqueue num_trbs on the ring and make sure
+ * enqueue pointer will not advance into dequeue segment. See rules above.
*/
-static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
+static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs)
{
- int i;
- union xhci_trb *enq = ring->enqueue;
- struct xhci_segment *enq_seg = ring->enq_seg;
- struct xhci_segment *cur_seg;
- unsigned int left_on_ring;
-
- /* If we are currently pointing to a link TRB, advance the
- * enqueue pointer before checking for space */
- while (last_trb(xhci, ring, enq_seg, enq)) {
- enq_seg = enq_seg->next;
- enq = enq_seg->trbs;
- }
-
- /* Check if ring is empty */
- if (enq == ring->dequeue) {
- /* Can't use link trbs */
- left_on_ring = TRBS_PER_SEGMENT - 1;
- for (cur_seg = enq_seg->next; cur_seg != enq_seg;
- cur_seg = cur_seg->next)
- left_on_ring += TRBS_PER_SEGMENT - 1;
-
- /* Always need one TRB free in the ring. */
- left_on_ring -= 1;
- if (num_trbs > left_on_ring) {
- xhci_warn(xhci, "Not enough room on ring; "
- "need %u TRBs, %u TRBs left\n",
- num_trbs, left_on_ring);
- return 0;
- }
- return 1;
- }
- /* Make sure there's an extra empty TRB available */
- for (i = 0; i <= num_trbs; ++i) {
- if (enq == ring->dequeue)
+ int num_trbs_in_deq_seg;
+
+ if (ring->num_trbs_free < num_trbs)
+ return 0;
+
+ if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
+ num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
+ if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
return 0;
- enq++;
- while (last_trb(xhci, ring, enq_seg, enq)) {
- enq_seg = enq_seg->next;
- enq = enq_seg->trbs;
- }
}
+
return 1;
}
/* Ring the host controller doorbell after placing a command on the ring */
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
{
- u32 temp;
+ if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
+ return;
xhci_dbg(xhci, "// Ding dong!\n");
- temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
- xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
+ writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
/* Flush PCI posted writes */
- xhci_readl(xhci, &xhci->dba->doorbell[0]);
+ readl(&xhci->dba->doorbell[0]);
+}
+
+static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
+{
+ u64 temp_64;
+ int ret;
+
+ xhci_dbg(xhci, "Abort command ring\n");
+
+ temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+ xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
+ xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
+ &xhci->op_regs->cmd_ring);
+
+ /* Section 4.6.1.2 of xHCI 1.0 spec says software should
+ * time the completion od all xHCI commands, including
+ * the Command Abort operation. If software doesn't see
+ * CRR negated in a timely manner (e.g. longer than 5
+ * seconds), then it should assume that the there are
+ * larger problems with the xHC and assert HCRST.
+ */
+ ret = xhci_handshake(xhci, &xhci->op_regs->cmd_ring,
+ CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
+ if (ret < 0) {
+ xhci_err(xhci, "Stopped the command ring failed, "
+ "maybe the host is dead\n");
+ xhci->xhc_state |= XHCI_STATE_DYING;
+ xhci_quiesce(xhci);
+ xhci_halt(xhci);
+ return -ESHUTDOWN;
+ }
+
+ return 0;
}
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
@@ -322,26 +318,24 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
unsigned int ep_index,
unsigned int stream_id)
{
- struct xhci_virt_ep *ep;
- unsigned int ep_state;
- u32 field;
- __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
+ __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
+ struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+ unsigned int ep_state = ep->ep_state;
- ep = &xhci->devs[slot_id]->eps[ep_index];
- ep_state = ep->ep_state;
/* Don't ring the doorbell for this endpoint if there are pending
- * cancellations because the we don't want to interrupt processing.
+ * cancellations because we don't want to interrupt processing.
* We don't want to restart any stream rings if there's a set dequeue
* pointer command pending because the device can choose to start any
* stream once the endpoint is on the HW schedule.
* FIXME - check all the stream rings for pending cancellations.
*/
- if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
- && !(ep_state & EP_HALTED)) {
- field = xhci_readl(xhci, db_addr) & DB_MASK;
- field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
- xhci_writel(xhci, field, db_addr);
- }
+ if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
+ (ep_state & EP_HALTED))
+ return;
+ writel(DB_VALUE(ep_index, stream_id), db_addr);
+ /* The CPU has better things to do at this point than wait for a
+ * write-posting flush. It'll get there soon enough.
+ */
}
/* Ring the doorbell for any rings with pending URBs */
@@ -356,7 +350,7 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
/* A ring has pending URBs if its TD list is not empty */
if (!(ep->ep_state & EP_HAS_STREAMS)) {
- if (!(list_empty(&ep->ring->td_list)))
+ if (ep->ring && !(list_empty(&ep->ring->td_list)))
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
return;
}
@@ -385,10 +379,8 @@ static struct xhci_segment *find_trb_seg(
while (cur_seg->trbs > trb ||
&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
- if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
- TRB_TYPE(TRB_LINK) &&
- (generic_trb->field[3] & LINK_TOGGLE))
- *cycle_state = ~(*cycle_state) & 0x1;
+ if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
+ *cycle_state ^= 0x1;
cur_seg = cur_seg->next;
if (cur_seg == start_seg)
/* Looped over the entire list. Oops! */
@@ -454,6 +446,10 @@ static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
* any link TRBs with the toggle cycle bit set.
* - Finally we move the dequeue state one TRB further, toggling the cycle bit
* if we've moved it past a link TRB with the toggle cycle bit set.
+ *
+ * Some of the uses of xhci_generic_trb are grotty, but if they're done
+ * with correct __le32 accesses they should work fine. Only users of this are
+ * in here.
*/
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
@@ -461,10 +457,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_dequeue_state *state)
{
struct xhci_virt_device *dev = xhci->devs[slot_id];
+ struct xhci_virt_ep *ep = &dev->eps[ep_index];
struct xhci_ring *ep_ring;
struct xhci_generic_trb *trb;
- struct xhci_ep_ctx *ep_ctx;
dma_addr_t addr;
+ u64 hw_dequeue;
ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
ep_index, stream_id);
@@ -474,45 +471,80 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
stream_id);
return;
}
- state->new_cycle_state = 0;
- xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
- state->new_deq_seg = find_trb_seg(cur_td->start_seg,
- dev->eps[ep_index].stopped_trb,
- &state->new_cycle_state);
- if (!state->new_deq_seg)
- BUG();
+
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
- xhci_dbg(xhci, "Finding endpoint context\n");
- ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
- state->new_cycle_state = 0x1 & ep_ctx->deq;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Finding endpoint context");
+ /* 4.6.9 the css flag is written to the stream context for streams */
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ struct xhci_stream_ctx *ctx =
+ &ep->stream_info->stream_ctx_array[stream_id];
+ hw_dequeue = le64_to_cpu(ctx->stream_ring);
+ } else {
+ struct xhci_ep_ctx *ep_ctx
+ = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
+ hw_dequeue = le64_to_cpu(ep_ctx->deq);
+ }
+
+ /* Find virtual address and segment of hardware dequeue pointer */
+ state->new_deq_seg = ep_ring->deq_seg;
+ state->new_deq_ptr = ep_ring->dequeue;
+ while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr)
+ != (dma_addr_t)(hw_dequeue & ~0xf)) {
+ next_trb(xhci, ep_ring, &state->new_deq_seg,
+ &state->new_deq_ptr);
+ if (state->new_deq_ptr == ep_ring->dequeue) {
+ WARN_ON(1);
+ return;
+ }
+ }
+ /*
+ * Find cycle state for last_trb, starting at old cycle state of
+ * hw_dequeue. If there is only one segment ring, find_trb_seg() will
+ * return immediately and cannot toggle the cycle state if this search
+ * wraps around, so add one more toggle manually in that case.
+ */
+ state->new_cycle_state = hw_dequeue & 0x1;
+ if (ep_ring->first_seg == ep_ring->first_seg->next &&
+ cur_td->last_trb < state->new_deq_ptr)
+ state->new_cycle_state ^= 0x1;
state->new_deq_ptr = cur_td->last_trb;
- xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Finding segment containing last TRB in TD.");
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
- state->new_deq_ptr,
- &state->new_cycle_state);
- if (!state->new_deq_seg)
- BUG();
+ state->new_deq_ptr, &state->new_cycle_state);
+ if (!state->new_deq_seg) {
+ WARN_ON(1);
+ return;
+ }
+ /* Increment to find next TRB after last_trb. Cycle if appropriate. */
trb = &state->new_deq_ptr->generic;
- if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
- (trb->field[3] & LINK_TOGGLE))
- state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
+ if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
+ (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
+ state->new_cycle_state ^= 0x1;
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
/* Don't update the ring cycle state for the producer (us). */
- xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Cycle state = 0x%x", state->new_cycle_state);
+
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "New dequeue segment = %p (virtual)",
state->new_deq_seg);
addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
- xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "New dequeue pointer = 0x%llx (DMA)",
(unsigned long long) addr);
- xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n");
- ep_ring->dequeue = state->new_deq_ptr;
- ep_ring->deq_seg = state->new_deq_seg;
}
+/* flip_cycle means flip the cycle bit of all but the first and last TRB.
+ * (The last TRB actually points to the ring enqueue pointer, which is not part
+ * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
+ */
static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
- struct xhci_td *cur_td)
+ struct xhci_td *cur_td, bool flip_cycle)
{
struct xhci_segment *cur_seg;
union xhci_trb *cur_trb;
@@ -520,15 +552,22 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
true;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) ==
- TRB_TYPE(TRB_LINK)) {
+ if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
/* Unchain any chained Link TRBs, but
* leave the pointers intact.
*/
- cur_trb->generic.field[3] &= ~TRB_CHAIN;
- xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
- xhci_dbg(xhci, "Address = %p (0x%llx dma); "
- "in seg %p (0x%llx dma)\n",
+ cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
+ /* Flip the cycle bit (link TRBs can't be the first
+ * or last TRB).
+ */
+ if (flip_cycle)
+ cur_trb->generic.field[3] ^=
+ cpu_to_le32(TRB_CYCLE);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Cancel (unchain) link TRB");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Address = %p (0x%llx dma); "
+ "in seg %p (0x%llx dma)",
cur_trb,
(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
cur_seg,
@@ -538,40 +577,47 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
cur_trb->generic.field[1] = 0;
cur_trb->generic.field[2] = 0;
/* Preserve only the cycle bit of this TRB */
- cur_trb->generic.field[3] &= TRB_CYCLE;
- cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP);
- xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
- "in seg %p (0x%llx dma)\n",
- cur_trb,
- (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
- cur_seg,
- (unsigned long long)cur_seg->dma);
+ cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
+ /* Flip the cycle bit except on the first or last TRB */
+ if (flip_cycle && cur_trb != cur_td->first_trb &&
+ cur_trb != cur_td->last_trb)
+ cur_trb->generic.field[3] ^=
+ cpu_to_le32(TRB_CYCLE);
+ cur_trb->generic.field[3] |= cpu_to_le32(
+ TRB_TYPE(TRB_TR_NOOP));
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "TRB to noop at offset 0x%llx",
+ (unsigned long long)
+ xhci_trb_virt_to_dma(cur_seg, cur_trb));
}
if (cur_trb == cur_td->last_trb)
break;
}
}
-static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
+static int queue_set_tr_deq(struct xhci_hcd *xhci,
+ struct xhci_command *cmd, int slot_id,
unsigned int ep_index, unsigned int stream_id,
struct xhci_segment *deq_seg,
union xhci_trb *deq_ptr, u32 cycle_state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
+ struct xhci_command *cmd,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id,
struct xhci_dequeue_state *deq_state)
{
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
- xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
- "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
+ "new deq ptr = %p (0x%llx dma), new cycle = %u",
deq_state->new_deq_seg,
(unsigned long long)deq_state->new_deq_seg->dma,
deq_state->new_deq_ptr,
(unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
deq_state->new_cycle_state);
- queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
+ queue_set_tr_deq(xhci, cmd, slot_id, ep_index, stream_id,
deq_state->new_deq_seg,
deq_state->new_deq_ptr,
(u32) deq_state->new_cycle_state);
@@ -583,7 +629,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
ep->ep_state |= SET_DEQ_PENDING;
}
-static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
+static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep)
{
ep->ep_state &= ~EP_HALT_PENDING;
@@ -597,26 +643,32 @@ static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
/* Must be called with xhci->lock held in interrupt context */
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
- struct xhci_td *cur_td, int status, char *adjective)
+ struct xhci_td *cur_td, int status)
{
- struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct usb_hcd *hcd;
struct urb *urb;
struct urb_priv *urb_priv;
urb = cur_td->urb;
urb_priv = urb->hcpriv;
urb_priv->td_cnt++;
+ hcd = bus_to_hcd(urb->dev->bus);
/* Only giveback urb when this is the last td in urb */
if (urb_priv->td_cnt == urb_priv->length) {
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
+ if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
+ if (xhci->quirks & XHCI_AMD_PLL_FIX)
+ usb_amd_quirk_pll_enable();
+ }
+ }
usb_hcd_unlink_urb_from_ep(hcd, urb);
- xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
spin_unlock(&xhci->lock);
usb_hcd_giveback_urb(hcd, urb, status);
xhci_urb_free_priv(xhci, urb_priv);
spin_lock(&xhci->lock);
- xhci_dbg(xhci, "%s URB given back\n", adjective);
}
}
@@ -630,12 +682,10 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
* 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
* bit cleared) so that the HW will skip over them.
*/
-static void handle_stopped_endpoint(struct xhci_hcd *xhci,
+static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
union xhci_trb *trb, struct xhci_event_cmd *event)
{
- unsigned int slot_id;
unsigned int ep_index;
- struct xhci_virt_device *virt_dev;
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
struct list_head *entry;
@@ -644,15 +694,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
struct xhci_dequeue_state deq_state;
- if (unlikely(TRB_TO_SUSPEND_PORT(
- xhci->cmd_ring->dequeue->generic.field[3]))) {
- slot_id = TRB_TO_SLOT_ID(
- xhci->cmd_ring->dequeue->generic.field[3]);
- virt_dev = xhci->devs[slot_id];
- if (virt_dev)
- handle_cmd_in_cmd_wait_list(xhci, virt_dev,
- event);
- else
+ if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
+ if (!xhci->devs[slot_id])
xhci_warn(xhci, "Stop endpoint command "
"completion for disabled slot %u\n",
slot_id);
@@ -660,12 +703,12 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
}
memset(&deq_state, 0, sizeof(deq_state));
- slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
- ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
+ ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
ep = &xhci->devs[slot_id]->eps[ep_index];
if (list_empty(&ep->cancelled_td_list)) {
xhci_stop_watchdog_timer_in_irq(xhci, ep);
+ ep->stopped_td = NULL;
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
return;
}
@@ -677,9 +720,10 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
*/
list_for_each(entry, &ep->cancelled_td_list) {
cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
- xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
- cur_td->first_trb,
- (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Removing canceled TD starting at 0x%llx (dma).",
+ (unsigned long long)xhci_trb_virt_to_dma(
+ cur_td->start_seg, cur_td->first_trb));
ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
if (!ep_ring) {
/* This shouldn't happen unless a driver is mucking
@@ -708,21 +752,23 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
cur_td->urb->stream_id,
cur_td, &deq_state);
else
- td_to_noop(xhci, ep_ring, cur_td);
+ td_to_noop(xhci, ep_ring, cur_td, false);
remove_finished_td:
/*
* The event handler won't see a completion for this TD anymore,
* so remove it from the endpoint ring's TD list. Keep it in
* the cancelled TD list for URB completion later.
*/
- list_del(&cur_td->td_list);
+ list_del_init(&cur_td->td_list);
}
last_unlinked_td = cur_td;
xhci_stop_watchdog_timer_in_irq(xhci, ep);
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
- xhci_queue_new_dequeue_state(xhci,
+ struct xhci_command *command;
+ command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
+ xhci_queue_new_dequeue_state(xhci, command,
slot_id, ep_index,
ep->stopped_td->urb->stream_id,
&deq_state);
@@ -731,8 +777,10 @@ remove_finished_td:
/* Otherwise ring the doorbell(s) to restart queued transfers */
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
- ep->stopped_td = NULL;
- ep->stopped_trb = NULL;
+
+ /* Clear stopped_td if endpoint is not halted */
+ if (!(ep->ep_state & EP_HALTED))
+ ep->stopped_td = NULL;
/*
* Drop the lock and complete the URBs in the cancelled TD list.
@@ -743,13 +791,13 @@ remove_finished_td:
do {
cur_td = list_entry(ep->cancelled_td_list.next,
struct xhci_td, cancelled_td_list);
- list_del(&cur_td->cancelled_td_list);
+ list_del_init(&cur_td->cancelled_td_list);
/* Clean up the cancelled URB */
/* Doesn't matter what we pass for status, since the core will
* just overwrite it (because the URB has been unlinked).
*/
- xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
+ xhci_giveback_urb_in_irq(xhci, cur_td, 0);
/* Stop processing the cancelled list if the watchdog timer is
* running.
@@ -761,6 +809,57 @@ remove_finished_td:
/* Return to the event handler with xhci->lock re-acquired */
}
+static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ struct xhci_td *cur_td;
+
+ while (!list_empty(&ring->td_list)) {
+ cur_td = list_first_entry(&ring->td_list,
+ struct xhci_td, td_list);
+ list_del_init(&cur_td->td_list);
+ if (!list_empty(&cur_td->cancelled_td_list))
+ list_del_init(&cur_td->cancelled_td_list);
+ xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
+ }
+}
+
+static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
+ int slot_id, int ep_index)
+{
+ struct xhci_td *cur_td;
+ struct xhci_virt_ep *ep;
+ struct xhci_ring *ring;
+
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ if ((ep->ep_state & EP_HAS_STREAMS) ||
+ (ep->ep_state & EP_GETTING_NO_STREAMS)) {
+ int stream_id;
+
+ for (stream_id = 0; stream_id < ep->stream_info->num_streams;
+ stream_id++) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Killing URBs for slot ID %u, ep index %u, stream %u",
+ slot_id, ep_index, stream_id + 1);
+ xhci_kill_ring_urbs(xhci,
+ ep->stream_info->stream_rings[stream_id]);
+ }
+ } else {
+ ring = ep->ring;
+ if (!ring)
+ return;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Killing URBs for slot ID %u, ep index %u",
+ slot_id, ep_index);
+ xhci_kill_ring_urbs(xhci, ring);
+ }
+ while (!list_empty(&ep->cancelled_td_list)) {
+ cur_td = list_first_entry(&ep->cancelled_td_list,
+ struct xhci_td, cancelled_td_list);
+ list_del_init(&cur_td->cancelled_td_list);
+ xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
+ }
+}
+
/* Watchdog timer function for when a stop endpoint command fails to complete.
* In this case, we assume the host controller is broken or dying or dead. The
* host may still be completing some other events, so we have to be careful to
@@ -784,27 +883,27 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
{
struct xhci_hcd *xhci;
struct xhci_virt_ep *ep;
- struct xhci_virt_ep *temp_ep;
- struct xhci_ring *ring;
- struct xhci_td *cur_td;
int ret, i, j;
+ unsigned long flags;
ep = (struct xhci_virt_ep *) arg;
xhci = ep->xhci;
- spin_lock(&xhci->lock);
+ spin_lock_irqsave(&xhci->lock, flags);
ep->stop_cmds_pending--;
if (xhci->xhc_state & XHCI_STATE_DYING) {
- xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
- "xHCI as DYING, exiting.\n");
- spin_unlock(&xhci->lock);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Stop EP timer ran, but another timer marked "
+ "xHCI as DYING, exiting.");
+ spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
- xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
- "exiting.\n");
- spin_unlock(&xhci->lock);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Stop EP timer ran, but no command pending, "
+ "exiting.");
+ spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
@@ -816,16 +915,15 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
xhci->xhc_state |= XHCI_STATE_DYING;
/* Disable interrupts from the host controller and start halting it */
xhci_quiesce(xhci);
- spin_unlock(&xhci->lock);
+ spin_unlock_irqrestore(&xhci->lock, flags);
ret = xhci_halt(xhci);
- spin_lock(&xhci->lock);
+ spin_lock_irqsave(&xhci->lock, flags);
if (ret < 0) {
/* This is bad; the host is not responding to commands and it's
* not allowing itself to be halted. At least interrupts are
- * disabled, so we can set HC_STATE_HALT and notify the
- * USB core. But if we call usb_hc_died(), it will attempt to
+ * disabled. If we call usb_hc_died(), it will attempt to
* disconnect all device drivers under this host. Those
* disconnect() methods will wait for all URBs to be unlinked,
* so we must complete them.
@@ -841,39 +939,63 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
for (i = 0; i < MAX_HC_SLOTS; i++) {
if (!xhci->devs[i])
continue;
- for (j = 0; j < 31; j++) {
- temp_ep = &xhci->devs[i]->eps[j];
- ring = temp_ep->ring;
- if (!ring)
- continue;
- xhci_dbg(xhci, "Killing URBs for slot ID %u, "
- "ep index %u\n", i, j);
- while (!list_empty(&ring->td_list)) {
- cur_td = list_first_entry(&ring->td_list,
- struct xhci_td,
- td_list);
- list_del(&cur_td->td_list);
- if (!list_empty(&cur_td->cancelled_td_list))
- list_del(&cur_td->cancelled_td_list);
- xhci_giveback_urb_in_irq(xhci, cur_td,
- -ESHUTDOWN, "killed");
- }
- while (!list_empty(&temp_ep->cancelled_td_list)) {
- cur_td = list_first_entry(
- &temp_ep->cancelled_td_list,
- struct xhci_td,
- cancelled_td_list);
- list_del(&cur_td->cancelled_td_list);
- xhci_giveback_urb_in_irq(xhci, cur_td,
- -ESHUTDOWN, "killed");
- }
+ for (j = 0; j < 31; j++)
+ xhci_kill_endpoint_urbs(xhci, i, j);
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Calling usb_hc_died()");
+ usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "xHCI host controller is dead.");
+}
+
+
+static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
+ struct xhci_virt_device *dev,
+ struct xhci_ring *ep_ring,
+ unsigned int ep_index)
+{
+ union xhci_trb *dequeue_temp;
+ int num_trbs_free_temp;
+ bool revert = false;
+
+ num_trbs_free_temp = ep_ring->num_trbs_free;
+ dequeue_temp = ep_ring->dequeue;
+
+ /* If we get two back-to-back stalls, and the first stalled transfer
+ * ends just before a link TRB, the dequeue pointer will be left on
+ * the link TRB by the code in the while loop. So we have to update
+ * the dequeue pointer one segment further, or we'll jump off
+ * the segment into la-la-land.
+ */
+ if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
+ ep_ring->deq_seg = ep_ring->deq_seg->next;
+ ep_ring->dequeue = ep_ring->deq_seg->trbs;
+ }
+
+ while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
+ /* We have more usable TRBs */
+ ep_ring->num_trbs_free++;
+ ep_ring->dequeue++;
+ if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
+ ep_ring->dequeue)) {
+ if (ep_ring->dequeue ==
+ dev->eps[ep_index].queued_deq_ptr)
+ break;
+ ep_ring->deq_seg = ep_ring->deq_seg->next;
+ ep_ring->dequeue = ep_ring->deq_seg->trbs;
+ }
+ if (ep_ring->dequeue == dequeue_temp) {
+ revert = true;
+ break;
}
}
- spin_unlock(&xhci->lock);
- xhci_to_hcd(xhci)->state = HC_STATE_HALT;
- xhci_dbg(xhci, "Calling usb_hc_died()\n");
- usb_hc_died(xhci_to_hcd(xhci));
- xhci_dbg(xhci, "xHCI host controller is dead.\n");
+
+ if (revert) {
+ xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
+ ep_ring->num_trbs_free = num_trbs_free_temp;
+ }
}
/*
@@ -883,27 +1005,25 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
* endpoint doorbell to restart the ring, but only if there aren't more
* cancellations pending.
*/
-static void handle_set_deq_completion(struct xhci_hcd *xhci,
- struct xhci_event_cmd *event,
- union xhci_trb *trb)
+static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
+ union xhci_trb *trb, u32 cmd_comp_code)
{
- unsigned int slot_id;
unsigned int ep_index;
unsigned int stream_id;
struct xhci_ring *ep_ring;
struct xhci_virt_device *dev;
+ struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
- slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
- ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
- stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]);
+ ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+ stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
dev = xhci->devs[slot_id];
+ ep = &dev->eps[ep_index];
ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
if (!ep_ring) {
- xhci_warn(xhci, "WARN Set TR deq ptr command for "
- "freed stream ID %u\n",
+ xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
stream_id);
/* XXX: Harmless??? */
dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
@@ -913,33 +1033,31 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
- if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
+ if (cmd_comp_code != COMP_SUCCESS) {
unsigned int ep_state;
unsigned int slot_state;
- switch (GET_COMP_CODE(event->status)) {
+ switch (cmd_comp_code) {
case COMP_TRB_ERR:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
- "of stream ID configuration\n");
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
break;
case COMP_CTX_STATE:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
- "to incorrect slot or ep state.\n");
- ep_state = ep_ctx->ep_info;
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
+ ep_state = le32_to_cpu(ep_ctx->ep_info);
ep_state &= EP_STATE_MASK;
- slot_state = slot_ctx->dev_state;
+ slot_state = le32_to_cpu(slot_ctx->dev_state);
slot_state = GET_SLOT_STATE(slot_state);
- xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Slot state = %u, EP state = %u",
slot_state, ep_state);
break;
case COMP_EBADSLT:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
- "slot %u was not enabled.\n", slot_id);
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
+ slot_id);
break;
default:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
- "completion code of %u.\n",
- GET_COMP_CODE(event->status));
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
+ cmd_comp_code);
break;
}
/* OK what do we do now? The endpoint state is hosed, and we
@@ -949,37 +1067,60 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
* cancelling URBs, which might not be an error...
*/
} else {
- xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
- ep_ctx->deq);
+ u64 deq;
+ /* 4.6.10 deq ptr is written to the stream ctx for streams */
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ struct xhci_stream_ctx *ctx =
+ &ep->stream_info->stream_ctx_array[stream_id];
+ deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
+ } else {
+ deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
+ }
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
+ if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
+ ep->queued_deq_ptr) == deq) {
+ /* Update the ring's dequeue segment and dequeue pointer
+ * to reflect the new position.
+ */
+ update_ring_for_set_deq_completion(xhci, dev,
+ ep_ring, ep_index);
+ } else {
+ xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
+ xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
+ ep->queued_deq_seg, ep->queued_deq_ptr);
+ }
}
dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
+ dev->eps[ep_index].queued_deq_seg = NULL;
+ dev->eps[ep_index].queued_deq_ptr = NULL;
/* Restart any rings with pending URBs */
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
-static void handle_reset_ep_completion(struct xhci_hcd *xhci,
- struct xhci_event_cmd *event,
- union xhci_trb *trb)
+static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
+ union xhci_trb *trb, u32 cmd_comp_code)
{
- int slot_id;
unsigned int ep_index;
- slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
- ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
+ ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
/* This command will only fail if the endpoint wasn't halted,
* but we don't care.
*/
- xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
- (unsigned int) GET_COMP_CODE(event->status));
+ xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+ "Ignoring reset ep completion code of %u", cmd_comp_code);
/* HW with the reset endpoint quirk needs to have a configure endpoint
* command complete before the endpoint can be used. Queue that here
* because the HW can't handle two commands being queued in a row.
*/
if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
- xhci_dbg(xhci, "Queueing configure endpoint command\n");
- xhci_queue_configure_endpoint(xhci,
+ struct xhci_command *command;
+ command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Queueing configure endpoint command");
+ xhci_queue_configure_endpoint(xhci, command,
xhci->devs[slot_id]->in_ctx->dma, slot_id,
false);
xhci_ring_cmd_db(xhci);
@@ -990,49 +1131,228 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
}
}
-/* Check to see if a command in the device's command queue matches this one.
- * Signal the completion or free the command, and return 1. Return 0 if the
- * completed command isn't at the head of the command list.
- */
-static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
- struct xhci_virt_device *virt_dev,
+static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
+ u32 cmd_comp_code)
+{
+ if (cmd_comp_code == COMP_SUCCESS)
+ xhci->slot_id = slot_id;
+ else
+ xhci->slot_id = 0;
+}
+
+static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
+{
+ struct xhci_virt_device *virt_dev;
+
+ virt_dev = xhci->devs[slot_id];
+ if (!virt_dev)
+ return;
+ if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
+ /* Delete default control endpoint resources */
+ xhci_free_device_endpoint_resources(xhci, virt_dev, true);
+ xhci_free_virt_device(xhci, slot_id);
+}
+
+static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
+ struct xhci_event_cmd *event, u32 cmd_comp_code)
+{
+ struct xhci_virt_device *virt_dev;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ unsigned int ep_index;
+ unsigned int ep_state;
+ u32 add_flags, drop_flags;
+
+ /*
+ * Configure endpoint commands can come from the USB core
+ * configuration or alt setting changes, or because the HW
+ * needed an extra configure endpoint command after a reset
+ * endpoint command or streams were being configured.
+ * If the command was for a halted endpoint, the xHCI driver
+ * is not waiting on the configure endpoint command.
+ */
+ virt_dev = xhci->devs[slot_id];
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+ if (!ctrl_ctx) {
+ xhci_warn(xhci, "Could not get input context, bad type.\n");
+ return;
+ }
+
+ add_flags = le32_to_cpu(ctrl_ctx->add_flags);
+ drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
+ /* Input ctx add_flags are the endpoint index plus one */
+ ep_index = xhci_last_valid_endpoint(add_flags) - 1;
+
+ /* A usb_set_interface() call directly after clearing a halted
+ * condition may race on this quirky hardware. Not worth
+ * worrying about, since this is prototype hardware. Not sure
+ * if this will work for streams, but streams support was
+ * untested on this prototype.
+ */
+ if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
+ ep_index != (unsigned int) -1 &&
+ add_flags - SLOT_FLAG == drop_flags) {
+ ep_state = virt_dev->eps[ep_index].ep_state;
+ if (!(ep_state & EP_HALTED))
+ return;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Completed config ep cmd - "
+ "last ep index = %d, state = %d",
+ ep_index, ep_state);
+ /* Clear internal halted state and restart ring(s) */
+ virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ return;
+ }
+ return;
+}
+
+static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
struct xhci_event_cmd *event)
{
- struct xhci_command *command;
+ xhci_dbg(xhci, "Completed reset device command.\n");
+ if (!xhci->devs[slot_id])
+ xhci_warn(xhci, "Reset device command completion "
+ "for disabled slot %u\n", slot_id);
+}
- if (list_empty(&virt_dev->cmd_list))
- return 0;
+static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
+ struct xhci_event_cmd *event)
+{
+ if (!(xhci->quirks & XHCI_NEC_HOST)) {
+ xhci->error_bitmask |= 1 << 6;
+ return;
+ }
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "NEC firmware version %2x.%02x",
+ NEC_FW_MAJOR(le32_to_cpu(event->status)),
+ NEC_FW_MINOR(le32_to_cpu(event->status)));
+}
- command = list_entry(virt_dev->cmd_list.next,
- struct xhci_command, cmd_list);
- if (xhci->cmd_ring->dequeue != command->command_trb)
- return 0;
+static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
+{
+ list_del(&cmd->cmd_list);
- command->status =
- GET_COMP_CODE(event->status);
- list_del(&command->cmd_list);
- if (command->completion)
- complete(command->completion);
- else
- xhci_free_command(xhci, command);
- return 1;
+ if (cmd->completion) {
+ cmd->status = status;
+ complete(cmd->completion);
+ } else {
+ kfree(cmd);
+ }
+}
+
+void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
+{
+ struct xhci_command *cur_cmd, *tmp_cmd;
+ list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
+ xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
+}
+
+/*
+ * Turn all commands on command ring with status set to "aborted" to no-op trbs.
+ * If there are other commands waiting then restart the ring and kick the timer.
+ * This must be called with command ring stopped and xhci->lock held.
+ */
+static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
+ struct xhci_command *cur_cmd)
+{
+ struct xhci_command *i_cmd, *tmp_cmd;
+ u32 cycle_state;
+
+ /* Turn all aborted commands in list to no-ops, then restart */
+ list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
+ cmd_list) {
+
+ if (i_cmd->status != COMP_CMD_ABORT)
+ continue;
+
+ i_cmd->status = COMP_CMD_STOP;
+
+ xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
+ i_cmd->command_trb);
+ /* get cycle state from the original cmd trb */
+ cycle_state = le32_to_cpu(
+ i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
+ /* modify the command trb to no-op command */
+ i_cmd->command_trb->generic.field[0] = 0;
+ i_cmd->command_trb->generic.field[1] = 0;
+ i_cmd->command_trb->generic.field[2] = 0;
+ i_cmd->command_trb->generic.field[3] = cpu_to_le32(
+ TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
+
+ /*
+ * caller waiting for completion is called when command
+ * completion event is received for these no-op commands
+ */
+ }
+
+ xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
+
+ /* ring command ring doorbell to restart the command ring */
+ if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
+ !(xhci->xhc_state & XHCI_STATE_DYING)) {
+ xhci->current_cmd = cur_cmd;
+ mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
+ xhci_ring_cmd_db(xhci);
+ }
+ return;
+}
+
+
+void xhci_handle_command_timeout(unsigned long data)
+{
+ struct xhci_hcd *xhci;
+ int ret;
+ unsigned long flags;
+ u64 hw_ring_state;
+ struct xhci_command *cur_cmd = NULL;
+ xhci = (struct xhci_hcd *) data;
+
+ /* mark this command to be cancelled */
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (xhci->current_cmd) {
+ cur_cmd = xhci->current_cmd;
+ cur_cmd->status = COMP_CMD_ABORT;
+ }
+
+
+ /* Make sure command ring is running before aborting it */
+ hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+ if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
+ (hw_ring_state & CMD_RING_RUNNING)) {
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "Command timeout\n");
+ ret = xhci_abort_cmd_ring(xhci);
+ if (unlikely(ret == -ESHUTDOWN)) {
+ xhci_err(xhci, "Abort command ring failed\n");
+ xhci_cleanup_command_queue(xhci);
+ usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
+ xhci_dbg(xhci, "xHCI host controller is dead.\n");
+ }
+ return;
+ }
+ /* command timeout on stopped ring, ring can't be aborted */
+ xhci_dbg(xhci, "Command timeout on stopped ring\n");
+ xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return;
}
static void handle_cmd_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event)
{
- int slot_id = TRB_TO_SLOT_ID(event->flags);
+ int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
u64 cmd_dma;
dma_addr_t cmd_dequeue_dma;
- struct xhci_input_control_ctx *ctrl_ctx;
- struct xhci_virt_device *virt_dev;
- unsigned int ep_index;
- struct xhci_ring *ep_ring;
- unsigned int ep_state;
+ u32 cmd_comp_code;
+ union xhci_trb *cmd_trb;
+ struct xhci_command *cmd;
+ u32 cmd_type;
- cmd_dma = event->cmd_trb;
+ cmd_dma = le64_to_cpu(event->cmd_trb);
+ cmd_trb = xhci->cmd_ring->dequeue;
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
- xhci->cmd_ring->dequeue);
+ cmd_trb);
/* Is the command ring deq ptr out of sync with the deq seg ptr? */
if (cmd_dequeue_dma == 0) {
xhci->error_bitmask |= 1 << 4;
@@ -1043,112 +1363,103 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci->error_bitmask |= 1 << 5;
return;
}
- switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
- case TRB_TYPE(TRB_ENABLE_SLOT):
- if (GET_COMP_CODE(event->status) == COMP_SUCCESS)
- xhci->slot_id = slot_id;
- else
- xhci->slot_id = 0;
- complete(&xhci->addr_dev);
+
+ cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
+
+ if (cmd->command_trb != xhci->cmd_ring->dequeue) {
+ xhci_err(xhci,
+ "Command completion event does not match command\n");
+ return;
+ }
+
+ del_timer(&xhci->cmd_timer);
+
+ trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
+
+ cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
+
+ /* If CMD ring stopped we own the trbs between enqueue and dequeue */
+ if (cmd_comp_code == COMP_CMD_STOP) {
+ xhci_handle_stopped_cmd_ring(xhci, cmd);
+ return;
+ }
+ /*
+ * Host aborted the command ring, check if the current command was
+ * supposed to be aborted, otherwise continue normally.
+ * The command ring is stopped now, but the xHC will issue a Command
+ * Ring Stopped event which will cause us to restart it.
+ */
+ if (cmd_comp_code == COMP_CMD_ABORT) {
+ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+ if (cmd->status == COMP_CMD_ABORT)
+ goto event_handled;
+ }
+
+ cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
+ switch (cmd_type) {
+ case TRB_ENABLE_SLOT:
+ xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
break;
- case TRB_TYPE(TRB_DISABLE_SLOT):
- if (xhci->devs[slot_id])
- xhci_free_virt_device(xhci, slot_id);
+ case TRB_DISABLE_SLOT:
+ xhci_handle_cmd_disable_slot(xhci, slot_id);
break;
- case TRB_TYPE(TRB_CONFIG_EP):
- virt_dev = xhci->devs[slot_id];
- if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
- break;
- /*
- * Configure endpoint commands can come from the USB core
- * configuration or alt setting changes, or because the HW
- * needed an extra configure endpoint command after a reset
- * endpoint command or streams were being configured.
- * If the command was for a halted endpoint, the xHCI driver
- * is not waiting on the configure endpoint command.
- */
- ctrl_ctx = xhci_get_input_control_ctx(xhci,
- virt_dev->in_ctx);
- /* Input ctx add_flags are the endpoint index plus one */
- ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
- /* A usb_set_interface() call directly after clearing a halted
- * condition may race on this quirky hardware. Not worth
- * worrying about, since this is prototype hardware. Not sure
- * if this will work for streams, but streams support was
- * untested on this prototype.
- */
- if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
- ep_index != (unsigned int) -1 &&
- ctrl_ctx->add_flags - SLOT_FLAG ==
- ctrl_ctx->drop_flags) {
- ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
- ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
- if (!(ep_state & EP_HALTED))
- goto bandwidth_change;
- xhci_dbg(xhci, "Completed config ep cmd - "
- "last ep index = %d, state = %d\n",
- ep_index, ep_state);
- /* Clear internal halted state and restart ring(s) */
- xhci->devs[slot_id]->eps[ep_index].ep_state &=
- ~EP_HALTED;
- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
- break;
- }
-bandwidth_change:
- xhci_dbg(xhci, "Completed config ep cmd\n");
- xhci->devs[slot_id]->cmd_status =
- GET_COMP_CODE(event->status);
- complete(&xhci->devs[slot_id]->cmd_completion);
+ case TRB_CONFIG_EP:
+ if (!cmd->completion)
+ xhci_handle_cmd_config_ep(xhci, slot_id, event,
+ cmd_comp_code);
break;
- case TRB_TYPE(TRB_EVAL_CONTEXT):
- virt_dev = xhci->devs[slot_id];
- if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
- break;
- xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
- complete(&xhci->devs[slot_id]->cmd_completion);
+ case TRB_EVAL_CONTEXT:
break;
- case TRB_TYPE(TRB_ADDR_DEV):
- xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
- complete(&xhci->addr_dev);
+ case TRB_ADDR_DEV:
break;
- case TRB_TYPE(TRB_STOP_RING):
- handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
+ case TRB_STOP_RING:
+ WARN_ON(slot_id != TRB_TO_SLOT_ID(
+ le32_to_cpu(cmd_trb->generic.field[3])));
+ xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
break;
- case TRB_TYPE(TRB_SET_DEQ):
- handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
+ case TRB_SET_DEQ:
+ WARN_ON(slot_id != TRB_TO_SLOT_ID(
+ le32_to_cpu(cmd_trb->generic.field[3])));
+ xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
break;
- case TRB_TYPE(TRB_CMD_NOOP):
- ++xhci->noops_handled;
+ case TRB_CMD_NOOP:
+ /* Is this an aborted command turned to NO-OP? */
+ if (cmd->status == COMP_CMD_STOP)
+ cmd_comp_code = COMP_CMD_STOP;
break;
- case TRB_TYPE(TRB_RESET_EP):
- handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
+ case TRB_RESET_EP:
+ WARN_ON(slot_id != TRB_TO_SLOT_ID(
+ le32_to_cpu(cmd_trb->generic.field[3])));
+ xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
break;
- case TRB_TYPE(TRB_RESET_DEV):
- xhci_dbg(xhci, "Completed reset device command.\n");
+ case TRB_RESET_DEV:
+ /* SLOT_ID field in reset device cmd completion event TRB is 0.
+ * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
+ */
slot_id = TRB_TO_SLOT_ID(
- xhci->cmd_ring->dequeue->generic.field[3]);
- virt_dev = xhci->devs[slot_id];
- if (virt_dev)
- handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
- else
- xhci_warn(xhci, "Reset device command completion "
- "for disabled slot %u\n", slot_id);
+ le32_to_cpu(cmd_trb->generic.field[3]));
+ xhci_handle_cmd_reset_dev(xhci, slot_id, event);
break;
- case TRB_TYPE(TRB_NEC_GET_FW):
- if (!(xhci->quirks & XHCI_NEC_HOST)) {
- xhci->error_bitmask |= 1 << 6;
- break;
- }
- xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
- NEC_FW_MAJOR(event->status),
- NEC_FW_MINOR(event->status));
+ case TRB_NEC_GET_FW:
+ xhci_handle_cmd_nec_get_fw(xhci, event);
break;
default:
/* Skip over unknown commands on the event ring */
xhci->error_bitmask |= 1 << 6;
break;
}
- inc_deq(xhci, xhci->cmd_ring, false);
+
+ /* restart timer if this wasn't the last command */
+ if (cmd->cmd_list.next != &xhci->cmd_list) {
+ xhci->current_cmd = list_entry(cmd->cmd_list.next,
+ struct xhci_command, cmd_list);
+ mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
+ }
+
+event_handled:
+ xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
+
+ inc_deq(xhci, xhci->cmd_ring);
}
static void handle_vendor_event(struct xhci_hcd *xhci,
@@ -1156,39 +1467,142 @@ static void handle_vendor_event(struct xhci_hcd *xhci,
{
u32 trb_type;
- trb_type = TRB_FIELD_TO_TYPE(event->generic.field[3]);
+ trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
handle_cmd_completion(xhci, &event->event_cmd);
}
+/* @port_id: the one-based port ID from the hardware (indexed from array of all
+ * port registers -- USB 3.0 and USB 2.0).
+ *
+ * Returns a zero-based port number, which is suitable for indexing into each of
+ * the split roothubs' port arrays and bus state arrays.
+ * Add one to it in order to call xhci_find_slot_id_by_port.
+ */
+static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
+ struct xhci_hcd *xhci, u32 port_id)
+{
+ unsigned int i;
+ unsigned int num_similar_speed_ports = 0;
+
+ /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
+ * and usb2_ports are 0-based indexes. Count the number of similar
+ * speed ports, up to 1 port before this port.
+ */
+ for (i = 0; i < (port_id - 1); i++) {
+ u8 port_speed = xhci->port_array[i];
+
+ /*
+ * Skip ports that don't have known speeds, or have duplicate
+ * Extended Capabilities port speed entries.
+ */
+ if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
+ continue;
+
+ /*
+ * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
+ * 1.1 ports are under the USB 2.0 hub. If the port speed
+ * matches the device speed, it's a similar speed port.
+ */
+ if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
+ num_similar_speed_ports++;
+ }
+ return num_similar_speed_ports;
+}
+
+static void handle_device_notification(struct xhci_hcd *xhci,
+ union xhci_trb *event)
+{
+ u32 slot_id;
+ struct usb_device *udev;
+
+ slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
+ if (!xhci->devs[slot_id]) {
+ xhci_warn(xhci, "Device Notification event for "
+ "unused slot %u\n", slot_id);
+ return;
+ }
+
+ xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
+ slot_id);
+ udev = xhci->devs[slot_id]->udev;
+ if (udev && udev->parent)
+ usb_wakeup_notification(udev->parent, udev->portnum);
+}
+
static void handle_port_status(struct xhci_hcd *xhci,
union xhci_trb *event)
{
- struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct usb_hcd *hcd;
u32 port_id;
u32 temp, temp1;
- u32 __iomem *addr;
- int ports;
+ int max_ports;
int slot_id;
+ unsigned int faked_port_index;
+ u8 major_revision;
+ struct xhci_bus_state *bus_state;
+ __le32 __iomem **port_array;
+ bool bogus_port_status = false;
/* Port status change events always have a successful completion code */
- if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
+ if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
xhci->error_bitmask |= 1 << 8;
}
- port_id = GET_PORT_ID(event->generic.field[0]);
+ port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
- ports = HCS_MAX_PORTS(xhci->hcs_params1);
- if ((port_id <= 0) || (port_id > ports)) {
+ max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ if ((port_id <= 0) || (port_id > max_ports)) {
xhci_warn(xhci, "Invalid port id %d\n", port_id);
+ inc_deq(xhci, xhci->event_ring);
+ return;
+ }
+
+ /* Figure out which usb_hcd this port is attached to:
+ * is it a USB 3.0 port or a USB 2.0/1.1 port?
+ */
+ major_revision = xhci->port_array[port_id - 1];
+
+ /* Find the right roothub. */
+ hcd = xhci_to_hcd(xhci);
+ if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
+ hcd = xhci->shared_hcd;
+
+ if (major_revision == 0) {
+ xhci_warn(xhci, "Event for port %u not in "
+ "Extended Capabilities, ignoring.\n",
+ port_id);
+ bogus_port_status = true;
goto cleanup;
}
+ if (major_revision == DUPLICATE_ENTRY) {
+ xhci_warn(xhci, "Event for port %u duplicated in"
+ "Extended Capabilities, ignoring.\n",
+ port_id);
+ bogus_port_status = true;
+ goto cleanup;
+ }
+
+ /*
+ * Hardware port IDs reported by a Port Status Change Event include USB
+ * 3.0 and USB 2.0 ports. We want to check if the port has reported a
+ * resume event, but we first need to translate the hardware port ID
+ * into the index into the ports on the correct split roothub, and the
+ * correct bus_state structure.
+ */
+ bus_state = &xhci->bus_state[hcd_index(hcd)];
+ if (hcd->speed == HCD_USB3)
+ port_array = xhci->usb3_ports;
+ else
+ port_array = xhci->usb2_ports;
+ /* Find the faked port hub number */
+ faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
+ port_id);
- addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1);
- temp = xhci_readl(xhci, addr);
- if ((temp & PORT_CONNECT) && (hcd->state == HC_STATE_SUSPENDED)) {
+ temp = readl(port_array[faked_port_index]);
+ if (hcd->state == HC_STATE_SUSPENDED) {
xhci_dbg(xhci, "resume root hub\n");
usb_hcd_resume_root_hub(hcd);
}
@@ -1196,47 +1610,105 @@ static void handle_port_status(struct xhci_hcd *xhci,
if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
xhci_dbg(xhci, "port resume event for port %d\n", port_id);
- temp1 = xhci_readl(xhci, &xhci->op_regs->command);
+ temp1 = readl(&xhci->op_regs->command);
if (!(temp1 & CMD_RUN)) {
xhci_warn(xhci, "xHC is not running.\n");
goto cleanup;
}
if (DEV_SUPERSPEED(temp)) {
- xhci_dbg(xhci, "resume SS port %d\n", port_id);
- temp = xhci_port_state_to_neutral(temp);
- temp &= ~PORT_PLS_MASK;
- temp |= PORT_LINK_STROBE | XDEV_U0;
- xhci_writel(xhci, temp, addr);
- slot_id = xhci_find_slot_id_by_port(xhci, port_id);
- if (!slot_id) {
- xhci_dbg(xhci, "slot_id is zero\n");
- goto cleanup;
- }
- xhci_ring_device(xhci, slot_id);
- xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
- /* Clear PORT_PLC */
- temp = xhci_readl(xhci, addr);
- temp = xhci_port_state_to_neutral(temp);
- temp |= PORT_PLC;
- xhci_writel(xhci, temp, addr);
+ xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
+ /* Set a flag to say the port signaled remote wakeup,
+ * so we can tell the difference between the end of
+ * device and host initiated resume.
+ */
+ bus_state->port_remote_wakeup |= 1 << faked_port_index;
+ xhci_test_and_clear_bit(xhci, port_array,
+ faked_port_index, PORT_PLC);
+ xhci_set_link_state(xhci, port_array, faked_port_index,
+ XDEV_U0);
+ /* Need to wait until the next link state change
+ * indicates the device is actually in U0.
+ */
+ bogus_port_status = true;
+ goto cleanup;
} else {
xhci_dbg(xhci, "resume HS port %d\n", port_id);
- xhci->resume_done[port_id - 1] = jiffies +
+ bus_state->resume_done[faked_port_index] = jiffies +
msecs_to_jiffies(20);
+ set_bit(faked_port_index, &bus_state->resuming_ports);
mod_timer(&hcd->rh_timer,
- xhci->resume_done[port_id - 1]);
+ bus_state->resume_done[faked_port_index]);
/* Do the rest in GetPortStatus */
}
}
+ if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
+ DEV_SUPERSPEED(temp)) {
+ xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
+ /* We've just brought the device into U0 through either the
+ * Resume state after a device remote wakeup, or through the
+ * U3Exit state after a host-initiated resume. If it's a device
+ * initiated remote wake, don't pass up the link state change,
+ * so the roothub behavior is consistent with external
+ * USB 3.0 hub behavior.
+ */
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ faked_port_index + 1);
+ if (slot_id && xhci->devs[slot_id])
+ xhci_ring_device(xhci, slot_id);
+ if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
+ bus_state->port_remote_wakeup &=
+ ~(1 << faked_port_index);
+ xhci_test_and_clear_bit(xhci, port_array,
+ faked_port_index, PORT_PLC);
+ usb_wakeup_notification(hcd->self.root_hub,
+ faked_port_index + 1);
+ bogus_port_status = true;
+ goto cleanup;
+ }
+ }
+
+ /*
+ * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
+ * RExit to a disconnect state). If so, let the the driver know it's
+ * out of the RExit state.
+ */
+ if (!DEV_SUPERSPEED(temp) &&
+ test_and_clear_bit(faked_port_index,
+ &bus_state->rexit_ports)) {
+ complete(&bus_state->rexit_done[faked_port_index]);
+ bogus_port_status = true;
+ goto cleanup;
+ }
+
+ if (hcd->speed != HCD_USB3)
+ xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
+ PORT_PLC);
+
cleanup:
/* Update event ring dequeue pointer before dropping the lock */
- inc_deq(xhci, xhci->event_ring, true);
+ inc_deq(xhci, xhci->event_ring);
+
+ /* Don't make the USB core poll the roothub if we got a bad port status
+ * change event. Besides, at that point we can't tell which roothub
+ * (USB 2.0 or USB 3.0) to kick.
+ */
+ if (bogus_port_status)
+ return;
+ /*
+ * xHCI port-status-change events occur when the "or" of all the
+ * status-change bits in the portsc register changes from 0 to 1.
+ * New status changes won't cause an event if any other change
+ * bits are still set. When an event occurs, switch over to
+ * polling to avoid losing status changes.
+ */
+ xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
spin_unlock(&xhci->lock);
/* Pass this up to the core */
- usb_hcd_poll_rh_status(xhci_to_hcd(xhci));
+ usb_hcd_poll_rh_status(hcd);
spin_lock(&xhci->lock);
}
@@ -1302,16 +1774,19 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
struct xhci_td *td, union xhci_trb *event_trb)
{
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+ struct xhci_command *command;
+ command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
+ if (!command)
+ return;
+
ep->ep_state |= EP_HALTED;
ep->stopped_td = td;
- ep->stopped_trb = event_trb;
ep->stopped_stream = stream_id;
- xhci_queue_reset_ep(xhci, slot_id, ep_index);
+ xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
ep->stopped_td = NULL;
- ep->stopped_trb = NULL;
ep->stopped_stream = 0;
xhci_ring_cmd_db(xhci);
@@ -1337,7 +1812,8 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
* endpoint anyway. Check if a babble halted the
* endpoint.
*/
- if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED)
+ if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
+ cpu_to_le32(EP_STATE_HALTED))
return 1;
return 0;
@@ -1375,12 +1851,12 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
struct urb_priv *urb_priv;
u32 trb_comp_code;
- slot_id = TRB_TO_SLOT_ID(event->flags);
+ slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
- ep_index = TRB_TO_EP_ID(event->flags) - 1;
- ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
+ ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+ ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
- trb_comp_code = GET_COMP_CODE(event->transfer_len);
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
if (skip)
goto td_cleanup;
@@ -1392,7 +1868,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
* the ring dequeue pointer or take this TD off any lists yet.
*/
ep->stopped_td = td;
- ep->stopped_trb = event_trb;
return 0;
} else {
if (trb_comp_code == COMP_STALL) {
@@ -1404,7 +1879,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
* USB class driver clear the stall later.
*/
ep->stopped_td = td;
- ep->stopped_trb = event_trb;
ep->stopped_stream = ep_ring->stream_id;
} else if (xhci_requires_manual_halt_cleanup(xhci,
ep_ctx, trb_comp_code)) {
@@ -1419,8 +1893,8 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
} else {
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
- inc_deq(xhci, ep_ring, false);
- inc_deq(xhci, ep_ring, false);
+ inc_deq(xhci, ep_ring);
+ inc_deq(xhci, ep_ring);
}
td_cleanup:
@@ -1446,15 +1920,24 @@ td_cleanup:
else
*status = 0;
}
- list_del(&td->td_list);
+ list_del_init(&td->td_list);
/* Was this TD slated to be cancelled but completed anyway? */
if (!list_empty(&td->cancelled_td_list))
- list_del(&td->cancelled_td_list);
+ list_del_init(&td->cancelled_td_list);
urb_priv->td_cnt++;
/* Giveback the urb when all the tds are completed */
- if (urb_priv->td_cnt == urb_priv->length)
+ if (urb_priv->td_cnt == urb_priv->length) {
ret = 1;
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
+ if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
+ == 0) {
+ if (xhci->quirks & XHCI_AMD_PLL_FIX)
+ usb_amd_quirk_pll_enable();
+ }
+ }
+ }
}
return ret;
@@ -1474,14 +1957,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
- slot_id = TRB_TO_SLOT_ID(event->flags);
+ slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
- ep_index = TRB_TO_EP_ID(event->flags) - 1;
- ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
+ ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+ ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
- trb_comp_code = GET_COMP_CODE(event->transfer_len);
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
- xhci_debug_trb(xhci, xhci->event_ring->dequeue);
switch (trb_comp_code) {
case COMP_SUCCESS:
if (event_trb == ep_ring->dequeue) {
@@ -1493,17 +1975,18 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
"without IOC set??\n");
*status = -ESHUTDOWN;
} else {
- xhci_dbg(xhci, "Successful control transfer!\n");
*status = 0;
}
break;
case COMP_SHORT_TX:
- xhci_warn(xhci, "WARN: short transfer on control ep\n");
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
else
*status = 0;
break;
+ case COMP_STOP_INVAL:
+ case COMP_STOP:
+ return finish_td(xhci, td, event_trb, event, ep, status, false);
default:
if (!xhci_requires_manual_halt_cleanup(xhci,
ep_ctx, trb_comp_code))
@@ -1517,8 +2000,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
if (event_trb != ep_ring->dequeue &&
event_trb != td->last_trb)
td->urb->actual_length =
- td->urb->transfer_buffer_length
- - TRB_LEN(event->transfer_len);
+ td->urb->transfer_buffer_length -
+ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
else
td->urb->actual_length = 0;
@@ -1548,15 +2031,12 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
}
} else {
/* Maybe the event was for the data stage? */
- if (trb_comp_code != COMP_STOP_INVAL) {
- /* We didn't stop on a link TRB in the middle */
- td->urb->actual_length =
- td->urb->transfer_buffer_length -
- TRB_LEN(event->transfer_len);
- xhci_dbg(xhci, "Waiting for status "
- "stage event\n");
- return 0;
- }
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length -
+ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ xhci_dbg(xhci, "Waiting for status "
+ "stage event\n");
+ return 0;
}
}
@@ -1574,97 +2054,105 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
struct urb_priv *urb_priv;
int idx;
int len = 0;
- int skip_td = 0;
union xhci_trb *cur_trb;
struct xhci_segment *cur_seg;
+ struct usb_iso_packet_descriptor *frame;
u32 trb_comp_code;
+ bool skip_td = false;
- ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
- trb_comp_code = GET_COMP_CODE(event->transfer_len);
+ ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
urb_priv = td->urb->hcpriv;
idx = urb_priv->td_cnt;
+ frame = &td->urb->iso_frame_desc[idx];
- if (ep->skip) {
- /* The transfer is partly done */
- *status = -EXDEV;
- td->urb->iso_frame_desc[idx].status = -EXDEV;
- } else {
- /* handle completion code */
- switch (trb_comp_code) {
- case COMP_SUCCESS:
- td->urb->iso_frame_desc[idx].status = 0;
- xhci_dbg(xhci, "Successful isoc transfer!\n");
- break;
- case COMP_SHORT_TX:
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- td->urb->iso_frame_desc[idx].status =
- -EREMOTEIO;
- else
- td->urb->iso_frame_desc[idx].status = 0;
- break;
- case COMP_BW_OVER:
- td->urb->iso_frame_desc[idx].status = -ECOMM;
- skip_td = 1;
- break;
- case COMP_BUFF_OVER:
- case COMP_BABBLE:
- td->urb->iso_frame_desc[idx].status = -EOVERFLOW;
- skip_td = 1;
- break;
- case COMP_STALL:
- td->urb->iso_frame_desc[idx].status = -EPROTO;
- skip_td = 1;
- break;
- case COMP_STOP:
- case COMP_STOP_INVAL:
- break;
- default:
- td->urb->iso_frame_desc[idx].status = -1;
+ /* handle completion code */
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+ if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
+ frame->status = 0;
break;
}
+ if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
+ trb_comp_code = COMP_SHORT_TX;
+ case COMP_SHORT_TX:
+ frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
+ -EREMOTEIO : 0;
+ break;
+ case COMP_BW_OVER:
+ frame->status = -ECOMM;
+ skip_td = true;
+ break;
+ case COMP_BUFF_OVER:
+ case COMP_BABBLE:
+ frame->status = -EOVERFLOW;
+ skip_td = true;
+ break;
+ case COMP_DEV_ERR:
+ case COMP_STALL:
+ case COMP_TX_ERR:
+ frame->status = -EPROTO;
+ skip_td = true;
+ break;
+ case COMP_STOP:
+ case COMP_STOP_INVAL:
+ break;
+ default:
+ frame->status = -1;
+ break;
}
- /* calc actual length */
- if (ep->skip) {
- td->urb->iso_frame_desc[idx].actual_length = 0;
- /* Update ring dequeue pointer */
- while (ep_ring->dequeue != td->last_trb)
- inc_deq(xhci, ep_ring, false);
- inc_deq(xhci, ep_ring, false);
- return finish_td(xhci, td, event_trb, event, ep, status, true);
- }
-
- if (trb_comp_code == COMP_SUCCESS || skip_td == 1) {
- td->urb->iso_frame_desc[idx].actual_length =
- td->urb->iso_frame_desc[idx].length;
- td->urb->actual_length +=
- td->urb->iso_frame_desc[idx].length;
+ if (trb_comp_code == COMP_SUCCESS || skip_td) {
+ frame->actual_length = frame->length;
+ td->urb->actual_length += frame->length;
} else {
for (cur_trb = ep_ring->dequeue,
cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if ((cur_trb->generic.field[3] &
- TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
- (cur_trb->generic.field[3] &
- TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
- len +=
- TRB_LEN(cur_trb->generic.field[2]);
+ if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
+ !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
+ len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
}
- len += TRB_LEN(cur_trb->generic.field[2]) -
- TRB_LEN(event->transfer_len);
+ len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
+ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
if (trb_comp_code != COMP_STOP_INVAL) {
- td->urb->iso_frame_desc[idx].actual_length = len;
+ frame->actual_length = len;
td->urb->actual_length += len;
}
}
- if ((idx == urb_priv->length - 1) && *status == -EINPROGRESS)
- *status = 0;
-
return finish_td(xhci, td, event_trb, event, ep, status, false);
}
+static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ struct xhci_transfer_event *event,
+ struct xhci_virt_ep *ep, int *status)
+{
+ struct xhci_ring *ep_ring;
+ struct urb_priv *urb_priv;
+ struct usb_iso_packet_descriptor *frame;
+ int idx;
+
+ ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+ urb_priv = td->urb->hcpriv;
+ idx = urb_priv->td_cnt;
+ frame = &td->urb->iso_frame_desc[idx];
+
+ /* The transfer is partly done. */
+ frame->status = -EXDEV;
+
+ /* calc actual length */
+ frame->actual_length = 0;
+
+ /* Update ring dequeue pointer */
+ while (ep_ring->dequeue != td->last_trb)
+ inc_deq(xhci, ep_ring);
+ inc_deq(xhci, ep_ring);
+
+ return finish_td(xhci, td, NULL, event, ep, status, true);
+}
+
/*
* Process bulk and interrupt tds, update urb status and actual_length.
*/
@@ -1677,26 +2165,23 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
struct xhci_segment *cur_seg;
u32 trb_comp_code;
- ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
- trb_comp_code = GET_COMP_CODE(event->transfer_len);
+ ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
switch (trb_comp_code) {
case COMP_SUCCESS:
/* Double check that the HW transferred everything. */
- if (event_trb != td->last_trb) {
+ if (event_trb != td->last_trb ||
+ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
xhci_warn(xhci, "WARN Successful completion "
"on short TX\n");
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
else
*status = 0;
+ if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
+ trb_comp_code = COMP_SHORT_TX;
} else {
- if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
- xhci_dbg(xhci, "Successful bulk "
- "transfer!\n");
- else
- xhci_dbg(xhci, "Successful interrupt "
- "transfer!\n");
*status = 0;
}
break;
@@ -1710,23 +2195,23 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
/* Others already handled above */
break;
}
- dev_dbg(&td->urb->dev->dev,
- "ep %#x - asked for %d bytes, "
- "%d bytes untransferred\n",
- td->urb->ep->desc.bEndpointAddress,
- td->urb->transfer_buffer_length,
- TRB_LEN(event->transfer_len));
+ if (trb_comp_code == COMP_SHORT_TX)
+ xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
+ "%d bytes untransferred\n",
+ td->urb->ep->desc.bEndpointAddress,
+ td->urb->transfer_buffer_length,
+ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
/* Fast path - was this the last TRB in the TD for this URB? */
if (event_trb == td->last_trb) {
- if (TRB_LEN(event->transfer_len) != 0) {
+ if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
td->urb->actual_length =
td->urb->transfer_buffer_length -
- TRB_LEN(event->transfer_len);
+ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
if (td->urb->transfer_buffer_length <
td->urb->actual_length) {
xhci_warn(xhci, "HC gave bad length "
"of %d bytes left\n",
- TRB_LEN(event->transfer_len));
+ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
td->urb->actual_length = 0;
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
@@ -1757,20 +2242,18 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
cur_trb != event_trb;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if ((cur_trb->generic.field[3] &
- TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
- (cur_trb->generic.field[3] &
- TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
+ if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
+ !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
td->urb->actual_length +=
- TRB_LEN(cur_trb->generic.field[2]);
+ TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
}
/* If the ring didn't stop on a Link or No-op TRB, add
* in the actual bytes transferred from the Normal TRB
*/
if (trb_comp_code != COMP_STOP_INVAL)
td->urb->actual_length +=
- TRB_LEN(cur_trb->generic.field[2]) -
- TRB_LEN(event->transfer_len);
+ TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
+ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
}
return finish_td(xhci, td, event_trb, event, ep, status, false);
@@ -1783,6 +2266,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
*/
static int handle_tx_event(struct xhci_hcd *xhci,
struct xhci_transfer_event *event)
+ __releases(&xhci->lock)
+ __acquires(&xhci->lock)
{
struct xhci_virt_device *xdev;
struct xhci_virt_ep *ep;
@@ -1797,37 +2282,72 @@ static int handle_tx_event(struct xhci_hcd *xhci,
int status = -EINPROGRESS;
struct urb_priv *urb_priv;
struct xhci_ep_ctx *ep_ctx;
+ struct list_head *tmp;
u32 trb_comp_code;
int ret = 0;
+ int td_num = 0;
- slot_id = TRB_TO_SLOT_ID(event->flags);
+ slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
if (!xdev) {
xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
+ xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
+ (unsigned long long) xhci_trb_virt_to_dma(
+ xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue),
+ lower_32_bits(le64_to_cpu(event->buffer)),
+ upper_32_bits(le64_to_cpu(event->buffer)),
+ le32_to_cpu(event->transfer_len),
+ le32_to_cpu(event->flags));
+ xhci_dbg(xhci, "Event ring:\n");
+ xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
return -ENODEV;
}
/* Endpoint ID is 1 based, our index is zero based */
- ep_index = TRB_TO_EP_ID(event->flags) - 1;
- xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
+ ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
ep = &xdev->eps[ep_index];
- ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
+ ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
if (!ep_ring ||
- (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
+ (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
+ EP_STATE_DISABLED) {
xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
"or incorrect stream ring\n");
+ xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
+ (unsigned long long) xhci_trb_virt_to_dma(
+ xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue),
+ lower_32_bits(le64_to_cpu(event->buffer)),
+ upper_32_bits(le64_to_cpu(event->buffer)),
+ le32_to_cpu(event->transfer_len),
+ le32_to_cpu(event->flags));
+ xhci_dbg(xhci, "Event ring:\n");
+ xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
return -ENODEV;
}
- event_dma = event->buffer;
- trb_comp_code = GET_COMP_CODE(event->transfer_len);
+ /* Count current td numbers if ep->skip is set */
+ if (ep->skip) {
+ list_for_each(tmp, &ep_ring->td_list)
+ td_num++;
+ }
+
+ event_dma = le64_to_cpu(event->buffer);
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
/* Look for common error cases */
switch (trb_comp_code) {
/* Skip codes that require special handling depending on
* transfer type
*/
case COMP_SUCCESS:
+ if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
+ break;
+ if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
+ trb_comp_code = COMP_SHORT_TX;
+ else
+ xhci_warn_ratelimited(xhci,
+ "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
case COMP_SHORT_TX:
break;
case COMP_STOP:
@@ -1837,7 +2357,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
break;
case COMP_STALL:
- xhci_warn(xhci, "WARN: Stalled endpoint\n");
+ xhci_dbg(xhci, "Stalled endpoint\n");
ep->ep_state |= EP_HALTED;
status = -EPIPE;
break;
@@ -1847,11 +2367,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
break;
case COMP_SPLIT_ERR:
case COMP_TX_ERR:
- xhci_warn(xhci, "WARN: transfer error on endpoint\n");
+ xhci_dbg(xhci, "Transfer error on endpoint\n");
status = -EPROTO;
break;
case COMP_BABBLE:
- xhci_warn(xhci, "WARN: babble error on endpoint\n");
+ xhci_dbg(xhci, "Babble error on endpoint\n");
status = -EOVERFLOW;
break;
case COMP_DB_ERR:
@@ -1874,15 +2394,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (!list_empty(&ep_ring->td_list))
xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
"still with TDs queued?\n",
- TRB_TO_SLOT_ID(event->flags), ep_index);
+ TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
+ ep_index);
goto cleanup;
case COMP_OVERRUN:
xhci_dbg(xhci, "overrun event on endpoint\n");
if (!list_empty(&ep_ring->td_list))
xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
"still with TDs queued?\n",
- TRB_TO_SLOT_ID(event->flags), ep_index);
+ TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
+ ep_index);
goto cleanup;
+ case COMP_DEV_ERR:
+ xhci_warn(xhci, "WARN: detect an incompatible device");
+ status = -EPROTO;
+ break;
case COMP_MISSED_INT:
/*
* When encounter missed service error, one or more isoc tds
@@ -1908,12 +2434,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* TD list.
*/
if (list_empty(&ep_ring->td_list)) {
- xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
- "with no TDs queued?\n",
- TRB_TO_SLOT_ID(event->flags), ep_index);
- xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
- (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
- xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
+ /*
+ * A stopped endpoint may generate an extra completion
+ * event if the device was suspended. Don't print
+ * warnings.
+ */
+ if (!(trb_comp_code == COMP_STOP ||
+ trb_comp_code == COMP_STOP_INVAL)) {
+ xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
+ TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
+ ep_index);
+ xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+ (le32_to_cpu(event->flags) &
+ TRB_TYPE_BITMASK)>>10);
+ xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
+ }
if (ep->skip) {
ep->skip = false;
xhci_dbg(xhci, "td_list is empty while skip "
@@ -1923,37 +2458,81 @@ static int handle_tx_event(struct xhci_hcd *xhci,
goto cleanup;
}
+ /* We've skipped all the TDs on the ep ring when ep->skip set */
+ if (ep->skip && td_num == 0) {
+ ep->skip = false;
+ xhci_dbg(xhci, "All tds on the ep_ring skipped. "
+ "Clear skip flag.\n");
+ ret = 0;
+ goto cleanup;
+ }
+
td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+ if (ep->skip)
+ td_num--;
+
/* Is this a TRB in the currently executing TD? */
event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
td->last_trb, event_dma);
- if (event_seg && ep->skip) {
- xhci_dbg(xhci, "Found td. Clear skip flag.\n");
- ep->skip = false;
+
+ /*
+ * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
+ * is not in the current TD pointed by ep_ring->dequeue because
+ * that the hardware dequeue pointer still at the previous TRB
+ * of the current TD. The previous TRB maybe a Link TD or the
+ * last TRB of the previous TD. The command completion handle
+ * will take care the rest.
+ */
+ if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
+ ret = 0;
+ goto cleanup;
}
- if (!event_seg &&
- (!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc))) {
- /* HC is busted, give up! */
- xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not "
+
+ if (!event_seg) {
+ if (!ep->skip ||
+ !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
+ /* Some host controllers give a spurious
+ * successful event after a short transfer.
+ * Ignore it.
+ */
+ if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
+ ep_ring->last_td_was_short) {
+ ep_ring->last_td_was_short = false;
+ ret = 0;
+ goto cleanup;
+ }
+ /* HC is busted, give up! */
+ xhci_err(xhci,
+ "ERROR Transfer event TRB DMA ptr not "
"part of current TD\n");
- return -ESHUTDOWN;
+ return -ESHUTDOWN;
+ }
+
+ ret = skip_isoc_td(xhci, td, event, ep, &status);
+ goto cleanup;
}
+ if (trb_comp_code == COMP_SHORT_TX)
+ ep_ring->last_td_was_short = true;
+ else
+ ep_ring->last_td_was_short = false;
- if (event_seg) {
- event_trb = &event_seg->trbs[(event_dma -
- event_seg->dma) / sizeof(*event_trb)];
- /*
- * No-op TRB should not trigger interrupts.
- * If event_trb is a no-op TRB, it means the
- * corresponding TD has been cancelled. Just ignore
- * the TD.
- */
- if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK)
- == TRB_TYPE(TRB_TR_NOOP)) {
- xhci_dbg(xhci, "event_trb is a no-op TRB. "
- "Skip it\n");
- goto cleanup;
- }
+ if (ep->skip) {
+ xhci_dbg(xhci, "Found td. Clear skip flag.\n");
+ ep->skip = false;
+ }
+
+ event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
+ sizeof(*event_trb)];
+ /*
+ * No-op TRB should not trigger interrupts.
+ * If event_trb is a no-op TRB, it means the
+ * corresponding TD has been cancelled. Just ignore
+ * the TD.
+ */
+ if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
+ xhci_dbg(xhci,
+ "event_trb is a no-op TRB. Skip it\n");
+ goto cleanup;
}
/* Now update the urb's actual_length and give back to
@@ -1975,7 +2554,7 @@ cleanup:
* Will roll back to continue process missed tds.
*/
if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
- inc_deq(xhci, xhci->event_ring, true);
+ inc_deq(xhci, xhci->event_ring);
}
if (ret) {
@@ -1990,13 +2569,27 @@ cleanup:
(trb_comp_code != COMP_STALL &&
trb_comp_code != COMP_BABBLE))
xhci_urb_free_priv(xhci, urb_priv);
-
- usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
- xhci_dbg(xhci, "Giveback URB %p, len = %d, "
- "status = %d\n",
- urb, urb->actual_length, status);
+ else
+ kfree(urb_priv);
+
+ usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
+ if ((urb->actual_length != urb->transfer_buffer_length &&
+ (urb->transfer_flags &
+ URB_SHORT_NOT_OK)) ||
+ (status != 0 &&
+ !usb_endpoint_xfer_isoc(&urb->ep->desc)))
+ xhci_dbg(xhci, "Giveback URB %p, len = %d, "
+ "expected = %d, status = %d\n",
+ urb, urb->actual_length,
+ urb->transfer_buffer_length,
+ status);
spin_unlock(&xhci->lock);
- usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
+ /* EHCI, UHCI, and OHCI always unconditionally set the
+ * urb->status of an isochronous endpoint to 0.
+ */
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+ status = 0;
+ usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
spin_lock(&xhci->lock);
}
@@ -2014,52 +2607,55 @@ cleanup:
/*
* This function handles all OS-owned events on the event ring. It may drop
* xhci->lock between event processing (e.g. to pass up port status changes).
+ * Returns >0 for "possibly more events to process" (caller should call again),
+ * otherwise 0 if done. In future, <0 returns should indicate error code.
*/
-static void xhci_handle_event(struct xhci_hcd *xhci)
+static int xhci_handle_event(struct xhci_hcd *xhci)
{
union xhci_trb *event;
int update_ptrs = 1;
int ret;
- xhci_dbg(xhci, "In %s\n", __func__);
if (!xhci->event_ring || !xhci->event_ring->dequeue) {
xhci->error_bitmask |= 1 << 1;
- return;
+ return 0;
}
event = xhci->event_ring->dequeue;
/* Does the HC or OS own the TRB? */
- if ((event->event_cmd.flags & TRB_CYCLE) !=
- xhci->event_ring->cycle_state) {
+ if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
+ xhci->event_ring->cycle_state) {
xhci->error_bitmask |= 1 << 2;
- return;
+ return 0;
}
- xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
+ /*
+ * Barrier between reading the TRB_CYCLE (valid) flag above and any
+ * speculative reads of the event's flags/data below.
+ */
+ rmb();
/* FIXME: Handle more event types. */
- switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
+ switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
case TRB_TYPE(TRB_COMPLETION):
- xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
handle_cmd_completion(xhci, &event->event_cmd);
- xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
break;
case TRB_TYPE(TRB_PORT_STATUS):
- xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
handle_port_status(xhci, event);
- xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
update_ptrs = 0;
break;
case TRB_TYPE(TRB_TRANSFER):
- xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
ret = handle_tx_event(xhci, &event->trans_event);
- xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
if (ret < 0)
xhci->error_bitmask |= 1 << 9;
else
update_ptrs = 0;
break;
+ case TRB_TYPE(TRB_DEV_NOTE):
+ handle_device_notification(xhci, event);
+ break;
default:
- if ((event->event_cmd.flags & TRB_TYPE_BITMASK) >= TRB_TYPE(48))
+ if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
+ TRB_TYPE(48))
handle_vendor_event(xhci, event);
else
xhci->error_bitmask |= 1 << 3;
@@ -2070,15 +2666,17 @@ static void xhci_handle_event(struct xhci_hcd *xhci)
if (xhci->xhc_state & XHCI_STATE_DYING) {
xhci_dbg(xhci, "xHCI host dying, returning from "
"event handler.\n");
- return;
+ return 0;
}
if (update_ptrs)
/* Update SW event ring dequeue pointer */
- inc_deq(xhci, xhci->event_ring, true);
+ inc_deq(xhci, xhci->event_ring);
- /* Are there more items on the event ring? */
- xhci_handle_event(xhci);
+ /* Are there more items on the event ring? Caller will call us again to
+ * check.
+ */
+ return 1;
}
/*
@@ -2090,38 +2688,24 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
u32 status;
- union xhci_trb *trb;
u64 temp_64;
union xhci_trb *event_ring_deq;
dma_addr_t deq;
spin_lock(&xhci->lock);
- trb = xhci->event_ring->dequeue;
/* Check if the xHC generated the interrupt, or the irq is shared */
- status = xhci_readl(xhci, &xhci->op_regs->status);
+ status = readl(&xhci->op_regs->status);
if (status == 0xffffffff)
goto hw_died;
if (!(status & STS_EINT)) {
spin_unlock(&xhci->lock);
- xhci_warn(xhci, "Spurious interrupt.\n");
return IRQ_NONE;
}
- xhci_dbg(xhci, "op reg status = %08x\n", status);
- xhci_dbg(xhci, "Event ring dequeue ptr:\n");
- xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
- (unsigned long long)
- xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
- lower_32_bits(trb->link.segment_ptr),
- upper_32_bits(trb->link.segment_ptr),
- (unsigned int) trb->link.intr_target,
- (unsigned int) trb->link.control);
-
if (status & STS_FATAL) {
xhci_warn(xhci, "WARNING: Host System Error\n");
xhci_halt(xhci);
hw_died:
- xhci_to_hcd(xhci)->state = HC_STATE_HALT;
spin_unlock(&xhci->lock);
return -ESHUTDOWN;
}
@@ -2132,16 +2716,16 @@ hw_died:
* Write 1 to clear the interrupt status.
*/
status |= STS_EINT;
- xhci_writel(xhci, status, &xhci->op_regs->status);
+ writel(status, &xhci->op_regs->status);
/* FIXME when MSI-X is supported and there are multiple vectors */
/* Clear the MSI-X event interrupt status */
- if (hcd->irq != -1) {
+ if (hcd->irq) {
u32 irq_pending;
/* Acknowledge the PCI interrupt */
- irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- irq_pending |= 0x3;
- xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
+ irq_pending = readl(&xhci->ir_set->irq_pending);
+ irq_pending |= IMAN_IP;
+ writel(irq_pending, &xhci->ir_set->irq_pending);
}
if (xhci->xhc_state & XHCI_STATE_DYING) {
@@ -2162,7 +2746,7 @@ hw_died:
/* FIXME this should be a delayed service routine
* that clears the EHB.
*/
- xhci_handle_event(xhci);
+ while (xhci_handle_event(xhci) > 0) {}
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
/* If necessary, update the HW's version of the event ring deq ptr. */
@@ -2186,15 +2770,9 @@ hw_died:
return IRQ_HANDLED;
}
-irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
+irqreturn_t xhci_msi_irq(int irq, void *hcd)
{
- irqreturn_t ret;
-
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
-
- ret = xhci_irq(hcd);
-
- return ret;
+ return xhci_irq(hcd);
}
/**** Endpoint Ring Operations ****/
@@ -2207,17 +2785,17 @@ irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
* prepare_transfer()?
*/
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
- bool consumer, bool more_trbs_coming,
+ bool more_trbs_coming,
u32 field1, u32 field2, u32 field3, u32 field4)
{
struct xhci_generic_trb *trb;
trb = &ring->enqueue->generic;
- trb->field[0] = field1;
- trb->field[1] = field2;
- trb->field[2] = field3;
- trb->field[3] = field4;
- inc_enq(xhci, ring, consumer, more_trbs_coming);
+ trb->field[0] = cpu_to_le32(field1);
+ trb->field[1] = cpu_to_le32(field2);
+ trb->field[2] = cpu_to_le32(field3);
+ trb->field[3] = cpu_to_le32(field4);
+ inc_enq(xhci, ring, more_trbs_coming);
}
/*
@@ -2227,8 +2805,9 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
+ unsigned int num_trbs_needed;
+
/* Make sure the endpoint has been added to xHC schedule */
- xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
switch (ep_state) {
case EP_STATE_DISABLED:
/*
@@ -2255,40 +2834,49 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
*/
return -EINVAL;
}
- if (!room_on_ring(xhci, ep_ring, num_trbs)) {
- /* FIXME allocate more room */
- xhci_err(xhci, "ERROR no room on ep ring\n");
- return -ENOMEM;
+
+ while (1) {
+ if (room_on_ring(xhci, ep_ring, num_trbs))
+ break;
+
+ if (ep_ring == xhci->cmd_ring) {
+ xhci_err(xhci, "Do not support expand command ring\n");
+ return -ENOMEM;
+ }
+
+ xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
+ "ERROR no room on ep ring, try ring expansion");
+ num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
+ if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
+ mem_flags)) {
+ xhci_err(xhci, "Ring expansion failed\n");
+ return -ENOMEM;
+ }
}
if (enqueue_is_link_trb(ep_ring)) {
struct xhci_ring *ring = ep_ring;
union xhci_trb *next;
- xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
next = ring->enqueue;
while (last_trb(xhci, ring, ring->enq_seg, next)) {
-
- /* If we're not dealing with 0.95 hardware,
- * clear the chain bit.
+ /* If we're not dealing with 0.95 hardware or isoc rings
+ * on AMD 0.96 host, clear the chain bit.
*/
- if (!xhci_link_trb_quirk(xhci))
- next->link.control &= ~TRB_CHAIN;
+ if (!xhci_link_trb_quirk(xhci) &&
+ !(ring->type == TYPE_ISOC &&
+ (xhci->quirks & XHCI_AMD_0x96_HOST)))
+ next->link.control &= cpu_to_le32(~TRB_CHAIN);
else
- next->link.control |= TRB_CHAIN;
+ next->link.control |= cpu_to_le32(TRB_CHAIN);
wmb();
- next->link.control ^= (u32) TRB_CYCLE;
+ next->link.control ^= cpu_to_le32(TRB_CYCLE);
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
- if (!in_interrupt()) {
- xhci_dbg(xhci, "queue_trb: Toggle cycle "
- "state for ring %p = %i\n",
- ring, (unsigned int)ring->cycle_state);
- }
}
ring->enq_seg = ring->enq_seg->next;
ring->enqueue = ring->enq_seg->trbs;
@@ -2322,8 +2910,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
}
ret = prepare_ring(xhci, ep_ring,
- ep_ctx->ep_info & EP_STATE_MASK,
- num_trbs, mem_flags);
+ le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
+ num_trbs, mem_flags);
if (ret)
return ret;
@@ -2334,12 +2922,9 @@ static int prepare_transfer(struct xhci_hcd *xhci,
INIT_LIST_HEAD(&td->cancelled_td_list);
if (td_index == 0) {
- ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
- if (unlikely(ret)) {
- xhci_urb_free_priv(xhci, urb_priv);
- urb->hcpriv = NULL;
+ ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
+ if (unlikely(ret))
return ret;
- }
}
td->urb = urb;
@@ -2359,52 +2944,41 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
struct scatterlist *sg;
sg = NULL;
- num_sgs = urb->num_sgs;
+ num_sgs = urb->num_mapped_sgs;
temp = urb->transfer_buffer_length;
- xhci_dbg(xhci, "count sg list trbs: \n");
num_trbs = 0;
for_each_sg(urb->sg, sg, num_sgs, i) {
- unsigned int previous_total_trbs = num_trbs;
unsigned int len = sg_dma_len(sg);
/* Scatter gather list entries may cross 64KB boundaries */
running_total = TRB_MAX_BUFF_SIZE -
- (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
if (running_total != 0)
num_trbs++;
/* How many more 64KB chunks to transfer, how many more TRBs? */
- while (running_total < sg_dma_len(sg)) {
+ while (running_total < sg_dma_len(sg) && running_total < temp) {
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
- xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
- i, (unsigned long long)sg_dma_address(sg),
- len, len, num_trbs - previous_total_trbs);
-
len = min_t(int, len, temp);
temp -= len;
if (temp == 0)
break;
}
- xhci_dbg(xhci, "\n");
- if (!in_interrupt())
- dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
- urb->ep->desc.bEndpointAddress,
- urb->transfer_buffer_length,
- num_trbs);
return num_trbs;
}
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
{
if (num_trbs != 0)
- dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+ dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
"TRBs, %d left\n", __func__,
urb->ep->desc.bEndpointAddress, num_trbs);
if (running_total != urb->transfer_buffer_length)
- dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+ dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
"queued %#x (%d), asked for %#x (%d)\n",
__func__,
urb->ep->desc.bEndpointAddress,
@@ -2415,14 +2989,17 @@ static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index, unsigned int stream_id, int start_cycle,
- struct xhci_generic_trb *start_trb, struct xhci_td *td)
+ struct xhci_generic_trb *start_trb)
{
/*
* Pass all the TRBs to the hardware at once and make sure this write
* isn't reordered.
*/
wmb();
- start_trb->field[3] |= start_cycle;
+ if (start_cycle)
+ start_trb->field[3] |= cpu_to_le32(start_cycle);
+ else
+ start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
}
@@ -2440,7 +3017,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
int xhci_interval;
int ep_interval;
- xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
+ xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
ep_interval = urb->interval;
/* Convert to microframes */
if (urb->dev->speed == USB_SPEED_LOW ||
@@ -2450,21 +3027,17 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
* to set the polling interval (once the API is added).
*/
if (xhci_interval != ep_interval) {
- if (!printk_ratelimit())
- dev_dbg(&urb->dev->dev, "Driver uses different interval"
- " (%d microframe%s) than xHCI "
- "(%d microframe%s)\n",
- ep_interval,
- ep_interval == 1 ? "" : "s",
- xhci_interval,
- xhci_interval == 1 ? "" : "s");
+ dev_dbg_ratelimited(&urb->dev->dev,
+ "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
+ ep_interval, ep_interval == 1 ? "" : "s",
+ xhci_interval, xhci_interval == 1 ? "" : "s");
urb->interval = xhci_interval;
/* Convert back to frames for LS/FS devices */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
urb->interval /= 8;
}
- return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
+ return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
}
/*
@@ -2482,6 +3055,42 @@ static u32 xhci_td_remainder(unsigned int remainder)
return (remainder >> 10) << 17;
}
+/*
+ * For xHCI 1.0 host controllers, TD size is the number of max packet sized
+ * packets remaining in the TD (*not* including this TRB).
+ *
+ * Total TD packet count = total_packet_count =
+ * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
+ *
+ * Packets transferred up to and including this TRB = packets_transferred =
+ * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
+ *
+ * TD size = total_packet_count - packets_transferred
+ *
+ * It must fit in bits 21:17, so it can't be bigger than 31.
+ * The last TRB in a TD must have the TD size set to zero.
+ */
+static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
+ unsigned int total_packet_count, struct urb *urb,
+ unsigned int num_trbs_left)
+{
+ int packets_transferred;
+
+ /* One TRB with a zero-length data packet. */
+ if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
+ return 0;
+
+ /* All the TRB queueing functions don't count the current TRB in
+ * running_total.
+ */
+ packets_transferred = (running_total + trb_buff_len) /
+ GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+
+ if ((total_packet_count - packets_transferred) > 31)
+ return 31 << 17;
+ return (total_packet_count - packets_transferred) << 17;
+}
+
static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
@@ -2492,6 +3101,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct scatterlist *sg;
int num_sgs;
int trb_buff_len, this_sg_len, running_total;
+ unsigned int total_packet_count;
bool first_trb;
u64 addr;
bool more_trbs_coming;
@@ -2504,7 +3114,9 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
return -EINVAL;
num_trbs = count_sg_trbs_needed(xhci, urb);
- num_sgs = urb->num_sgs;
+ num_sgs = urb->num_mapped_sgs;
+ total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
+ usb_endpoint_maxp(&urb->ep->desc));
trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
@@ -2536,13 +3148,10 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
sg = urb->sg;
addr = (u64) sg_dma_address(sg);
this_sg_len = sg_dma_len(sg);
- trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
- xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
- trb_buff_len);
first_trb = true;
/* Queue the first TRB, even if it's zero-length */
@@ -2552,9 +3161,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
u32 remainder = 0;
/* Don't change the cycle bit of the first TRB until later */
- if (first_trb)
+ if (first_trb) {
first_trb = false;
- else
+ if (start_cycle == 0)
+ field |= 0x1;
+ } else
field |= ep_ring->cycle_state;
/* Chain all the TRBs together; clear the chain bit in the last
@@ -2567,37 +3178,42 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
td->last_trb = ep_ring->enqueue;
field |= TRB_IOC;
}
- xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
- "64KB boundary at %#x, end dma = %#x\n",
- (unsigned int) addr, trb_buff_len, trb_buff_len,
- (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
- (unsigned int) addr + trb_buff_len);
+
+ /* Only set interrupt on short packet for IN endpoints */
+ if (usb_urb_dir_in(urb))
+ field |= TRB_ISP;
+
if (TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+ (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
(unsigned int) addr + trb_buff_len);
}
- remainder = xhci_td_remainder(urb->transfer_buffer_length -
- running_total) ;
+
+ /* Set the TRB length, TD size, and interrupter fields. */
+ if (xhci->hci_version < 0x100) {
+ remainder = xhci_td_remainder(
+ urb->transfer_buffer_length -
+ running_total);
+ } else {
+ remainder = xhci_v1_0_td_remainder(running_total,
+ trb_buff_len, total_packet_count, urb,
+ num_trbs - 1);
+ }
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
+
if (num_trbs > 1)
more_trbs_coming = true;
else
more_trbs_coming = false;
- queue_trb(xhci, ep_ring, false, more_trbs_coming,
+ queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
- /* We always want to know if the TRB was short,
- * or we won't get an event when it completes.
- * (Unless we use event data TRBs, which are a
- * waste of space and HC resources.)
- */
- field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
+ field | TRB_TYPE(TRB_NORMAL));
--num_trbs;
running_total += trb_buff_len;
@@ -2617,7 +3233,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (running_total + trb_buff_len > urb->transfer_buffer_length)
trb_buff_len =
@@ -2626,7 +3242,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
check_trb_math(urb, num_trbs, running_total);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
- start_cycle, start_trb, td);
+ start_cycle, start_trb);
return 0;
}
@@ -2645,6 +3261,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
u32 field, length_field;
int running_total, trb_buff_len, ret;
+ unsigned int total_packet_count;
u64 addr;
if (urb->num_sgs)
@@ -2657,7 +3274,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs = 0;
/* How much data is (potentially) left before the 64KB boundary? */
running_total = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
/* If there's some data on this 64KB chunk, or we have to send a
* zero-length transfer, we need at least one TRB
@@ -2671,14 +3289,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
- if (!in_interrupt())
- dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
- urb->ep->desc.bEndpointAddress,
- urb->transfer_buffer_length,
- urb->transfer_buffer_length,
- (unsigned long long)urb->transfer_dma,
- num_trbs);
-
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags);
@@ -2697,11 +3307,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
start_cycle = ep_ring->cycle_state;
running_total = 0;
+ total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
+ usb_endpoint_maxp(&urb->ep->desc));
/* How much data is in the first TRB? */
addr = (u64) urb->transfer_dma;
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
- if (urb->transfer_buffer_length < trb_buff_len)
+ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+ if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
first_trb = true;
@@ -2712,9 +3324,11 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field = 0;
/* Don't change the cycle bit of the first TRB until later */
- if (first_trb)
+ if (first_trb) {
first_trb = false;
- else
+ if (start_cycle == 0)
+ field |= 0x1;
+ } else
field |= ep_ring->cycle_state;
/* Chain all the TRBs together; clear the chain bit in the last
@@ -2727,25 +3341,34 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
td->last_trb = ep_ring->enqueue;
field |= TRB_IOC;
}
- remainder = xhci_td_remainder(urb->transfer_buffer_length -
- running_total);
+
+ /* Only set interrupt on short packet for IN endpoints */
+ if (usb_urb_dir_in(urb))
+ field |= TRB_ISP;
+
+ /* Set the TRB length, TD size, and interrupter fields. */
+ if (xhci->hci_version < 0x100) {
+ remainder = xhci_td_remainder(
+ urb->transfer_buffer_length -
+ running_total);
+ } else {
+ remainder = xhci_v1_0_td_remainder(running_total,
+ trb_buff_len, total_packet_count, urb,
+ num_trbs - 1);
+ }
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
+
if (num_trbs > 1)
more_trbs_coming = true;
else
more_trbs_coming = false;
- queue_trb(xhci, ep_ring, false, more_trbs_coming,
+ queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
- /* We always want to know if the TRB was short,
- * or we won't get an event when it completes.
- * (Unless we use event data TRBs, which are a
- * waste of space and HC resources.)
- */
- field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
+ field | TRB_TYPE(TRB_NORMAL));
--num_trbs;
running_total += trb_buff_len;
@@ -2758,7 +3381,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
check_trb_math(urb, num_trbs, running_total);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
- start_cycle, start_trb, td);
+ start_cycle, start_trb);
return 0;
}
@@ -2787,9 +3410,6 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (!urb->setup_packet)
return -EINVAL;
- if (!in_interrupt())
- xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
- slot_id, ep_index);
/* 1 TRB for setup, 1 for status */
num_trbs = 2;
/*
@@ -2819,28 +3439,46 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Queue setup TRB - see section 6.4.1.2.1 */
/* FIXME better way to translate setup_packet into two u32 fields? */
setup = (struct usb_ctrlrequest *) urb->setup_packet;
- queue_trb(xhci, ep_ring, false, true,
- /* FIXME endianness is probably going to bite my ass here. */
- setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
- setup->wIndex | setup->wLength << 16,
- TRB_LEN(8) | TRB_INTR_TARGET(0),
- /* Immediate data in pointer */
- TRB_IDT | TRB_TYPE(TRB_SETUP));
+ field = 0;
+ field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
+ if (start_cycle == 0)
+ field |= 0x1;
+
+ /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
+ if (xhci->hci_version == 0x100) {
+ if (urb->transfer_buffer_length > 0) {
+ if (setup->bRequestType & USB_DIR_IN)
+ field |= TRB_TX_TYPE(TRB_DATA_IN);
+ else
+ field |= TRB_TX_TYPE(TRB_DATA_OUT);
+ }
+ }
+
+ queue_trb(xhci, ep_ring, true,
+ setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
+ le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
+ TRB_LEN(8) | TRB_INTR_TARGET(0),
+ /* Immediate data in pointer */
+ field);
/* If there's data, queue data TRBs */
- field = 0;
+ /* Only set interrupt on short packet for IN endpoints */
+ if (usb_urb_dir_in(urb))
+ field = TRB_ISP | TRB_TYPE(TRB_DATA);
+ else
+ field = TRB_TYPE(TRB_DATA);
+
length_field = TRB_LEN(urb->transfer_buffer_length) |
xhci_td_remainder(urb->transfer_buffer_length) |
TRB_INTR_TARGET(0);
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_DIR_IN;
- queue_trb(xhci, ep_ring, false, true,
+ queue_trb(xhci, ep_ring, true,
lower_32_bits(urb->transfer_dma),
upper_32_bits(urb->transfer_dma),
length_field,
- /* Event on short tx */
- field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
+ field | ep_ring->cycle_state);
}
/* Save the DMA address of the last TRB in the TD */
@@ -2852,7 +3490,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field = 0;
else
field = TRB_DIR_IN;
- queue_trb(xhci, ep_ring, false, false,
+ queue_trb(xhci, ep_ring, false,
0,
0,
TRB_INTR_TARGET(0),
@@ -2860,7 +3498,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
giveback_first_trb(xhci, slot_id, ep_index, 0,
- start_cycle, start_trb, td);
+ start_cycle, start_trb);
return 0;
}
@@ -2868,24 +3506,76 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
struct urb *urb, int i)
{
int num_trbs = 0;
- u64 addr, td_len, running_total;
+ u64 addr, td_len;
addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
td_len = urb->iso_frame_desc[i].length;
- running_total = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
- if (running_total != 0)
+ num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
+ TRB_MAX_BUFF_SIZE);
+ if (num_trbs == 0)
num_trbs++;
- while (running_total < td_len) {
- num_trbs++;
- running_total += TRB_MAX_BUFF_SIZE;
- }
-
return num_trbs;
}
+/*
+ * The transfer burst count field of the isochronous TRB defines the number of
+ * bursts that are required to move all packets in this TD. Only SuperSpeed
+ * devices can burst up to bMaxBurst number of packets per service interval.
+ * This field is zero based, meaning a value of zero in the field means one
+ * burst. Basically, for everything but SuperSpeed devices, this field will be
+ * zero. Only xHCI 1.0 host controllers support this field.
+ */
+static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
+ struct usb_device *udev,
+ struct urb *urb, unsigned int total_packet_count)
+{
+ unsigned int max_burst;
+
+ if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
+ return 0;
+
+ max_burst = urb->ep->ss_ep_comp.bMaxBurst;
+ return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
+}
+
+/*
+ * Returns the number of packets in the last "burst" of packets. This field is
+ * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
+ * the last burst packet count is equal to the total number of packets in the
+ * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
+ * must contain (bMaxBurst + 1) number of packets, but the last burst can
+ * contain 1 to (bMaxBurst + 1) packets.
+ */
+static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
+ struct usb_device *udev,
+ struct urb *urb, unsigned int total_packet_count)
+{
+ unsigned int max_burst;
+ unsigned int residue;
+
+ if (xhci->hci_version < 0x100)
+ return 0;
+
+ switch (udev->speed) {
+ case USB_SPEED_SUPER:
+ /* bMaxBurst is zero based: 0 means 1 packet per burst */
+ max_burst = urb->ep->ss_ep_comp.bMaxBurst;
+ residue = total_packet_count % (max_burst + 1);
+ /* If residue is zero, the last burst contains (max_burst + 1)
+ * number of packets, but the TLBPC field is zero-based.
+ */
+ if (residue == 0)
+ return max_burst;
+ return residue - 1;
+ default:
+ if (total_packet_count == 0)
+ return 0;
+ return total_packet_count - 1;
+ }
+}
+
/* This is for isoc transfer */
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
@@ -2901,6 +3591,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
int running_total, trb_buff_len, td_len, td_remain_len, ret;
u64 start_addr, addr;
int i, j;
+ bool more_trbs_coming;
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
@@ -2910,48 +3601,59 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
return -EINVAL;
}
- if (!in_interrupt())
- dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d),"
- " addr = %#llx, num_tds = %d\n",
- urb->ep->desc.bEndpointAddress,
- urb->transfer_buffer_length,
- urb->transfer_buffer_length,
- (unsigned long long)urb->transfer_dma,
- num_tds);
-
start_addr = (u64) urb->transfer_dma;
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
+ urb_priv = urb->hcpriv;
/* Queue the first TRB, even if it's zero-length */
for (i = 0; i < num_tds; i++) {
- first_trb = true;
+ unsigned int total_packet_count;
+ unsigned int burst_count;
+ unsigned int residue;
+ first_trb = true;
running_total = 0;
addr = start_addr + urb->iso_frame_desc[i].offset;
td_len = urb->iso_frame_desc[i].length;
td_remain_len = td_len;
+ total_packet_count = DIV_ROUND_UP(td_len,
+ GET_MAX_PACKET(
+ usb_endpoint_maxp(&urb->ep->desc)));
+ /* A zero-length transfer still involves at least one packet. */
+ if (total_packet_count == 0)
+ total_packet_count++;
+ burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
+ total_packet_count);
+ residue = xhci_get_last_burst_packet_count(xhci,
+ urb->dev, urb, total_packet_count);
trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
urb->stream_id, trbs_per_td, urb, i, mem_flags);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ if (i == 0)
+ return ret;
+ goto cleanup;
+ }
- urb_priv = urb->hcpriv;
td = urb_priv->td[i];
-
for (j = 0; j < trbs_per_td; j++) {
u32 remainder = 0;
field = 0;
if (first_trb) {
+ field = TRB_TBC(burst_count) |
+ TRB_TLBPC(residue);
/* Queue the isoc TRB */
field |= TRB_TYPE(TRB_ISOC);
/* Assume URB_ISO_ASAP is set */
field |= TRB_SIA;
- if (i > 0)
+ if (i == 0) {
+ if (start_cycle == 0)
+ field |= 0x1;
+ } else
field |= ep_ring->cycle_state;
first_trb = false;
} else {
@@ -2960,15 +3662,28 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field |= ep_ring->cycle_state;
}
+ /* Only set interrupt on short packet for IN EPs */
+ if (usb_urb_dir_in(urb))
+ field |= TRB_ISP;
+
/* Chain all the TRBs together; clear the chain bit in
* the last TRB to indicate it's the last TRB in the
* chain.
*/
if (j < trbs_per_td - 1) {
field |= TRB_CHAIN;
+ more_trbs_coming = true;
} else {
td->last_trb = ep_ring->enqueue;
field |= TRB_IOC;
+ if (xhci->hci_version == 0x100 &&
+ !(xhci->quirks &
+ XHCI_AVOID_BEI)) {
+ /* Set BEI bit except for the last td */
+ if (i < num_tds - 1)
+ field |= TRB_BEI;
+ }
+ more_trbs_coming = false;
}
/* Calculate TRB length */
@@ -2977,20 +3692,25 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (trb_buff_len > td_remain_len)
trb_buff_len = td_remain_len;
- remainder = xhci_td_remainder(td_len - running_total);
+ /* Set the TRB length, TD size, & interrupter fields. */
+ if (xhci->hci_version < 0x100) {
+ remainder = xhci_td_remainder(
+ td_len - running_total);
+ } else {
+ remainder = xhci_v1_0_td_remainder(
+ running_total, trb_buff_len,
+ total_packet_count, urb,
+ (trbs_per_td - j - 1));
+ }
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
- queue_trb(xhci, ep_ring, false, false,
+
+ queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
- /* We always want to know if the TRB was short,
- * or we won't get an event when it completes.
- * (Unless we use event data TRBs, which are a
- * waste of space and HC resources.)
- */
- field | TRB_ISP);
+ field);
running_total += trb_buff_len;
addr += trb_buff_len;
@@ -3000,15 +3720,42 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Check TD length */
if (running_total != td_len) {
xhci_err(xhci, "ISOC TD length unmatch\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto cleanup;
}
}
- wmb();
- start_trb->field[3] |= start_cycle;
+ if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
+ if (xhci->quirks & XHCI_AMD_PLL_FIX)
+ usb_amd_quirk_pll_disable();
+ }
+ xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
- xhci_ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
+ giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+ start_cycle, start_trb);
return 0;
+cleanup:
+ /* Clean up a partially enqueued isoc transfer. */
+
+ for (i--; i >= 0; i--)
+ list_del_init(&urb_priv->td[i]->td_list);
+
+ /* Use the first TD as a temporary variable to turn the TDs we've queued
+ * into No-ops with a software-owned cycle bit. That way the hardware
+ * won't accidentally start executing bogus TDs when we partially
+ * overwrite them. td->first_trb and td->start_seg are already set.
+ */
+ urb_priv->td[0]->last_trb = ep_ring->enqueue;
+ /* Every TRB except the first & last will have its cycle bit flipped. */
+ td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
+
+ /* Reset the ring enqueue back to the first TRB and its cycle bit. */
+ ep_ring->enqueue = urb_priv->td[0]->first_trb;
+ ep_ring->enq_seg = urb_priv->td[0]->start_seg;
+ ep_ring->cycle_state = start_cycle;
+ ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
+ usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
+ return ret;
}
/*
@@ -3042,12 +3789,12 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Check the ring to guarantee there is enough room for the whole urb.
* Do not insert any td of the urb to the ring if the check failed.
*/
- ret = prepare_ring(xhci, ep_ring, ep_ctx->ep_info & EP_STATE_MASK,
- num_trbs, mem_flags);
+ ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
+ num_trbs, mem_flags);
if (ret)
return ret;
- start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
+ start_frame = readl(&xhci->run_regs->microframe_index);
start_frame &= 0x3fff;
urb->start_frame = start_frame;
@@ -3055,7 +3802,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
urb->dev->speed == USB_SPEED_FULL)
urb->start_frame >>= 3;
- xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
+ xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
ep_interval = urb->interval;
/* Convert to microframes */
if (urb->dev->speed == USB_SPEED_LOW ||
@@ -3065,21 +3812,19 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
* to set the polling interval (once the API is added).
*/
if (xhci_interval != ep_interval) {
- if (!printk_ratelimit())
- dev_dbg(&urb->dev->dev, "Driver uses different interval"
- " (%d microframe%s) than xHCI "
- "(%d microframe%s)\n",
- ep_interval,
- ep_interval == 1 ? "" : "s",
- xhci_interval,
- xhci_interval == 1 ? "" : "s");
+ dev_dbg_ratelimited(&urb->dev->dev,
+ "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
+ ep_interval, ep_interval == 1 ? "" : "s",
+ xhci_interval, xhci_interval == 1 ? "" : "s");
urb->interval = xhci_interval;
/* Convert back to frames for LS/FS devices */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
urb->interval /= 8;
}
- return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
+ ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
+
+ return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
}
/**** Command Ring Operations ****/
@@ -3092,11 +3837,14 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
* Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
* because the command event handler may want to resubmit a failed command.
*/
-static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
- u32 field3, u32 field4, bool command_must_succeed)
+static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ u32 field1, u32 field2,
+ u32 field3, u32 field4, bool command_must_succeed)
{
int reserved_trbs = xhci->cmd_ring_reserved_trbs;
int ret;
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ESHUTDOWN;
if (!command_must_succeed)
reserved_trbs++;
@@ -3110,109 +3858,108 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
"unfailable commands failed.\n");
return ret;
}
- queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
- field4 | xhci->cmd_ring->cycle_state);
- return 0;
-}
-/* Queue a no-op command on the command ring */
-static int queue_cmd_noop(struct xhci_hcd *xhci)
-{
- return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false);
-}
+ cmd->command_trb = xhci->cmd_ring->enqueue;
+ list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
-/*
- * Place a no-op command on the command ring to test the command and
- * event ring.
- */
-void *xhci_setup_one_noop(struct xhci_hcd *xhci)
-{
- if (queue_cmd_noop(xhci) < 0)
- return NULL;
- xhci->noops_submitted++;
- return xhci_ring_cmd_db;
+ /* if there are no other commands queued we start the timeout timer */
+ if (xhci->cmd_list.next == &cmd->cmd_list &&
+ !timer_pending(&xhci->cmd_timer)) {
+ xhci->current_cmd = cmd;
+ mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
+ }
+
+ queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
+ field4 | xhci->cmd_ring->cycle_state);
+ return 0;
}
/* Queue a slot enable or disable request on the command ring */
-int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
+int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ u32 trb_type, u32 slot_id)
{
- return queue_command(xhci, 0, 0, 0,
+ return queue_command(xhci, cmd, 0, 0, 0,
TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
}
/* Queue an address device command TRB */
-int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
- u32 slot_id)
+int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
{
- return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+ return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
- TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
- false);
+ TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
+ | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
}
-int xhci_queue_vendor_command(struct xhci_hcd *xhci,
+int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
u32 field1, u32 field2, u32 field3, u32 field4)
{
- return queue_command(xhci, field1, field2, field3, field4, false);
+ return queue_command(xhci, cmd, field1, field2, field3, field4, false);
}
/* Queue a reset device command TRB */
-int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
+int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ u32 slot_id)
{
- return queue_command(xhci, 0, 0, 0,
+ return queue_command(xhci, cmd, 0, 0, 0,
TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
false);
}
/* Queue a configure endpoint command TRB */
-int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
+ struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
u32 slot_id, bool command_must_succeed)
{
- return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+ return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
command_must_succeed);
}
/* Queue an evaluate context command TRB */
-int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
- u32 slot_id)
+int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
{
- return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+ return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
- false);
+ command_must_succeed);
}
/*
* Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
* activity on an endpoint that is about to be suspended.
*/
-int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index, int suspend)
+int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ int slot_id, unsigned int ep_index, int suspend)
{
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 type = TRB_TYPE(TRB_STOP_RING);
u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
- return queue_command(xhci, 0, 0, 0,
+ return queue_command(xhci, cmd, 0, 0, 0,
trb_slot_id | trb_ep_index | type | trb_suspend, false);
}
/* Set Transfer Ring Dequeue Pointer command.
* This should not be used for endpoints that have streams enabled.
*/
-static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index, unsigned int stream_id,
- struct xhci_segment *deq_seg,
- union xhci_trb *deq_ptr, u32 cycle_state)
+static int queue_set_tr_deq(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ int slot_id,
+ unsigned int ep_index, unsigned int stream_id,
+ struct xhci_segment *deq_seg,
+ union xhci_trb *deq_ptr, u32 cycle_state)
{
dma_addr_t addr;
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
+ u32 trb_sct = 0;
u32 type = TRB_TYPE(TRB_SET_DEQ);
+ struct xhci_virt_ep *ep;
addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
if (addr == 0) {
@@ -3221,18 +3968,29 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
deq_seg, deq_ptr);
return 0;
}
- return queue_command(xhci, lower_32_bits(addr) | cycle_state,
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ if ((ep->ep_state & SET_DEQ_PENDING)) {
+ xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
+ xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
+ return 0;
+ }
+ ep->queued_deq_seg = deq_seg;
+ ep->queued_deq_ptr = deq_ptr;
+ if (stream_id)
+ trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
+ return queue_command(xhci, cmd,
+ lower_32_bits(addr) | trb_sct | cycle_state,
upper_32_bits(addr), trb_stream_id,
trb_slot_id | trb_ep_index | type, false);
}
-int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index)
+int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ int slot_id, unsigned int ep_index)
{
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 type = TRB_TYPE(TRB_RESET_EP);
- return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
- false);
+ return queue_command(xhci, cmd, 0, 0, 0,
+ trb_slot_id | trb_ep_index | type, false);
}
diff --git a/drivers/usb/host/xhci-trace.c b/drivers/usb/host/xhci-trace.c
new file mode 100644
index 00000000000..7cf30c83dcf
--- /dev/null
+++ b/drivers/usb/host/xhci-trace.c
@@ -0,0 +1,15 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2013 Xenia Ragiadakou
+ *
+ * Author: Xenia Ragiadakou
+ * Email : burzalodowa@gmail.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define CREATE_TRACE_POINTS
+#include "xhci-trace.h"
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
new file mode 100644
index 00000000000..dde3959b7a3
--- /dev/null
+++ b/drivers/usb/host/xhci-trace.h
@@ -0,0 +1,151 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2013 Xenia Ragiadakou
+ *
+ * Author: Xenia Ragiadakou
+ * Email : burzalodowa@gmail.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM xhci-hcd
+
+#if !defined(__XHCI_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __XHCI_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "xhci.h"
+
+#define XHCI_MSG_MAX 500
+
+DECLARE_EVENT_CLASS(xhci_log_msg,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf),
+ TP_STRUCT__entry(__dynamic_array(char, msg, XHCI_MSG_MAX)),
+ TP_fast_assign(
+ vsnprintf(__get_str(msg), XHCI_MSG_MAX, vaf->fmt, *vaf->va);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_address,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_context_change,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_quirks,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_reset_ep,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_cancel_urb,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_init,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_ring_expansion,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DECLARE_EVENT_CLASS(xhci_log_ctx,
+ TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
+ unsigned int ep_num),
+ TP_ARGS(xhci, ctx, ep_num),
+ TP_STRUCT__entry(
+ __field(int, ctx_64)
+ __field(unsigned, ctx_type)
+ __field(dma_addr_t, ctx_dma)
+ __field(u8 *, ctx_va)
+ __field(unsigned, ctx_ep_num)
+ __field(int, slot_id)
+ __dynamic_array(u32, ctx_data,
+ ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 8) *
+ ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1))
+ ),
+ TP_fast_assign(
+ struct usb_device *udev;
+
+ udev = to_usb_device(xhci_to_hcd(xhci)->self.controller);
+ __entry->ctx_64 = HCC_64BYTE_CONTEXT(xhci->hcc_params);
+ __entry->ctx_type = ctx->type;
+ __entry->ctx_dma = ctx->dma;
+ __entry->ctx_va = ctx->bytes;
+ __entry->slot_id = udev->slot_id;
+ __entry->ctx_ep_num = ep_num;
+ memcpy(__get_dynamic_array(ctx_data), ctx->bytes,
+ ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 32) *
+ ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1));
+ ),
+ TP_printk("\nctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%p",
+ __entry->ctx_64, __entry->ctx_type,
+ (unsigned long long) __entry->ctx_dma, __entry->ctx_va
+ )
+);
+
+DEFINE_EVENT(xhci_log_ctx, xhci_address_ctx,
+ TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
+ unsigned int ep_num),
+ TP_ARGS(xhci, ctx, ep_num)
+);
+
+DECLARE_EVENT_CLASS(xhci_log_event,
+ TP_PROTO(void *trb_va, struct xhci_generic_trb *ev),
+ TP_ARGS(trb_va, ev),
+ TP_STRUCT__entry(
+ __field(void *, va)
+ __field(u64, dma)
+ __field(u32, status)
+ __field(u32, flags)
+ __dynamic_array(u8, trb, sizeof(struct xhci_generic_trb))
+ ),
+ TP_fast_assign(
+ __entry->va = trb_va;
+ __entry->dma = ((u64)le32_to_cpu(ev->field[1])) << 32 |
+ le32_to_cpu(ev->field[0]);
+ __entry->status = le32_to_cpu(ev->field[2]);
+ __entry->flags = le32_to_cpu(ev->field[3]);
+ memcpy(__get_dynamic_array(trb), trb_va,
+ sizeof(struct xhci_generic_trb));
+ ),
+ TP_printk("\ntrb_dma=@%llx, trb_va=@%p, status=%08x, flags=%08x",
+ (unsigned long long) __entry->dma, __entry->va,
+ __entry->status, __entry->flags
+ )
+);
+
+DEFINE_EVENT(xhci_log_event, xhci_cmd_completion,
+ TP_PROTO(void *trb_va, struct xhci_generic_trb *ev),
+ TP_ARGS(trb_va, ev)
+);
+
+#endif /* __XHCI_TRACE_H */
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE xhci-trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 5d7d4e951ea..7436d5f5e67 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -26,8 +26,11 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
+#include <linux/dmi.h>
+#include <linux/dma-mapping.h>
#include "xhci.h"
+#include "xhci-trace.h"
#define DRIVER_AUTHOR "Sarah Sharp"
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
@@ -37,9 +40,13 @@ static int link_quirk;
module_param(link_quirk, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
+static unsigned int quirks;
+module_param(quirks, uint, S_IRUGO);
+MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
+
/* TODO: copied from ehci-hcd.c - can this be refactored? */
/*
- * handshake - spin reading hc until handshake completes or fails
+ * xhci_handshake - spin reading hc until handshake completes or fails
* @ptr: address of hc register to be read
* @mask: bits to look at in result of read
* @done: value of those bits when handshake succeeds
@@ -51,13 +58,13 @@ MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
* handshake done). There are two failure modes: "usec" have passed (major
* hardware flakeout), or the register reads as all-ones (hardware removed).
*/
-static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
+int xhci_handshake(struct xhci_hcd *xhci, void __iomem *ptr,
u32 mask, u32 done, int usec)
{
u32 result;
do {
- result = xhci_readl(xhci, ptr);
+ result = readl(ptr);
if (result == ~(u32)0) /* card removed */
return -ENODEV;
result &= mask;
@@ -79,13 +86,13 @@ void xhci_quiesce(struct xhci_hcd *xhci)
u32 mask;
mask = ~(XHCI_IRQS);
- halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
+ halted = readl(&xhci->op_regs->status) & STS_HALT;
if (!halted)
mask &= ~CMD_RUN;
- cmd = xhci_readl(xhci, &xhci->op_regs->command);
+ cmd = readl(&xhci->op_regs->command);
cmd &= mask;
- xhci_writel(xhci, cmd, &xhci->op_regs->command);
+ writel(cmd, &xhci->op_regs->command);
}
/*
@@ -93,48 +100,57 @@ void xhci_quiesce(struct xhci_hcd *xhci)
*
* Disable any IRQs and clear the run/stop bit.
* HC will complete any current and actively pipelined transactions, and
- * should halt within 16 microframes of the run/stop bit being cleared.
+ * should halt within 16 ms of the run/stop bit being cleared.
* Read HC Halted bit in the status register to see when the HC is finished.
- * XXX: shouldn't we set HC_STATE_HALT here somewhere?
*/
int xhci_halt(struct xhci_hcd *xhci)
{
- xhci_dbg(xhci, "// Halt the HC\n");
+ int ret;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
xhci_quiesce(xhci);
- return handshake(xhci, &xhci->op_regs->status,
+ ret = xhci_handshake(xhci, &xhci->op_regs->status,
STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
+ if (!ret) {
+ xhci->xhc_state |= XHCI_STATE_HALTED;
+ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+ } else
+ xhci_warn(xhci, "Host not halted after %u microseconds.\n",
+ XHCI_MAX_HALT_USEC);
+ return ret;
}
/*
* Set the run bit and wait for the host to be running.
*/
-int xhci_start(struct xhci_hcd *xhci)
+static int xhci_start(struct xhci_hcd *xhci)
{
u32 temp;
int ret;
- temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp = readl(&xhci->op_regs->command);
temp |= (CMD_RUN);
- xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
temp);
- xhci_writel(xhci, temp, &xhci->op_regs->command);
+ writel(temp, &xhci->op_regs->command);
/*
* Wait for the HCHalted Status bit to be 0 to indicate the host is
* running.
*/
- ret = handshake(xhci, &xhci->op_regs->status,
+ ret = xhci_handshake(xhci, &xhci->op_regs->status,
STS_HALT, 0, XHCI_MAX_HALT_USEC);
if (ret == -ETIMEDOUT)
xhci_err(xhci, "Host took too long to start, "
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
+ if (!ret)
+ xhci->xhc_state &= ~XHCI_STATE_HALTED;
return ret;
}
/*
- * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
+ * Reset a halted HC.
*
* This resets pipelines, timers, counters, state machines, etc.
* Transactions will be terminated immediately, and operational registers
@@ -144,56 +160,55 @@ int xhci_reset(struct xhci_hcd *xhci)
{
u32 command;
u32 state;
- int ret;
+ int ret, i;
- state = xhci_readl(xhci, &xhci->op_regs->status);
+ state = readl(&xhci->op_regs->status);
if ((state & STS_HALT) == 0) {
xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
return 0;
}
- xhci_dbg(xhci, "// Reset the HC\n");
- command = xhci_readl(xhci, &xhci->op_regs->command);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
+ command = readl(&xhci->op_regs->command);
command |= CMD_RESET;
- xhci_writel(xhci, command, &xhci->op_regs->command);
- /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
- xhci_to_hcd(xhci)->state = HC_STATE_HALT;
+ writel(command, &xhci->op_regs->command);
- ret = handshake(xhci, &xhci->op_regs->command,
- CMD_RESET, 0, 250 * 1000);
+ ret = xhci_handshake(xhci, &xhci->op_regs->command,
+ CMD_RESET, 0, 10 * 1000 * 1000);
if (ret)
return ret;
- xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Wait for controller to be ready for doorbell rings");
/*
* xHCI cannot write to any doorbells or operational registers other
* than status until the "Controller Not Ready" flag is cleared.
*/
- return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
+ ret = xhci_handshake(xhci, &xhci->op_regs->status,
+ STS_CNR, 0, 10 * 1000 * 1000);
+
+ for (i = 0; i < 2; ++i) {
+ xhci->bus_state[i].port_c_suspend = 0;
+ xhci->bus_state[i].suspended_ports = 0;
+ xhci->bus_state[i].resuming_ports = 0;
+ }
+
+ return ret;
}
-/*
- * Free IRQs
- * free all IRQs request
- */
-static void xhci_free_irq(struct xhci_hcd *xhci)
+#ifdef CONFIG_PCI
+static int xhci_free_msi(struct xhci_hcd *xhci)
{
int i;
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
-
- /* return if using legacy interrupt */
- if (xhci_to_hcd(xhci)->irq >= 0)
- return;
- if (xhci->msix_entries) {
- for (i = 0; i < xhci->msix_count; i++)
- if (xhci->msix_entries[i].vector)
- free_irq(xhci->msix_entries[i].vector,
- xhci_to_hcd(xhci));
- } else if (pdev->irq >= 0)
- free_irq(pdev->irq, xhci_to_hcd(xhci));
+ if (!xhci->msix_entries)
+ return -EINVAL;
- return;
+ for (i = 0; i < xhci->msix_count; i++)
+ if (xhci->msix_entries[i].vector)
+ free_irq(xhci->msix_entries[i].vector,
+ xhci_to_hcd(xhci));
+ return 0;
}
/*
@@ -206,14 +221,16 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
ret = pci_enable_msi(pdev);
if (ret) {
- xhci_err(xhci, "failed to allocate MSI entry\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "failed to allocate MSI entry");
return ret;
}
- ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
+ ret = request_irq(pdev->irq, xhci_msi_irq,
0, "xhci_hcd", xhci_to_hcd(xhci));
if (ret) {
- xhci_err(xhci, "disable MSI interrupt\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "disable MSI interrupt");
pci_disable_msi(pdev);
}
@@ -221,12 +238,35 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
}
/*
+ * Free IRQs
+ * free all IRQs request
+ */
+static void xhci_free_irq(struct xhci_hcd *xhci)
+{
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ int ret;
+
+ /* return if using legacy interrupt */
+ if (xhci_to_hcd(xhci)->irq > 0)
+ return;
+
+ ret = xhci_free_msi(xhci);
+ if (!ret)
+ return;
+ if (pdev->irq > 0)
+ free_irq(pdev->irq, xhci_to_hcd(xhci));
+
+ return;
+}
+
+/*
* Set up MSI-X
*/
static int xhci_setup_msix(struct xhci_hcd *xhci)
{
int i, ret = 0;
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
/*
* calculate number of msi-x vectors supported.
@@ -251,24 +291,26 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
xhci->msix_entries[i].vector = 0;
}
- ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
+ ret = pci_enable_msix_exact(pdev, xhci->msix_entries, xhci->msix_count);
if (ret) {
- xhci_err(xhci, "Failed to enable MSI-X\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Failed to enable MSI-X");
goto free_entries;
}
for (i = 0; i < xhci->msix_count; i++) {
ret = request_irq(xhci->msix_entries[i].vector,
- (irq_handler_t)xhci_msi_irq,
+ xhci_msi_irq,
0, "xhci_hcd", xhci_to_hcd(xhci));
if (ret)
goto disable_msix;
}
+ hcd->msix_enabled = 1;
return ret;
disable_msix:
- xhci_err(xhci, "disable MSI-X interrupt\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
xhci_free_irq(xhci);
pci_disable_msix(pdev);
free_entries:
@@ -280,7 +322,11 @@ free_entries:
/* Free any IRQs and disable MSI-X */
static void xhci_cleanup_msix(struct xhci_hcd *xhci)
{
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+
+ if (xhci->quirks & XHCI_PLAT)
+ return;
xhci_free_irq(xhci);
@@ -292,9 +338,186 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
pci_disable_msi(pdev);
}
+ hcd->msix_enabled = 0;
return;
}
+static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
+{
+ int i;
+
+ if (xhci->msix_entries) {
+ for (i = 0; i < xhci->msix_count; i++)
+ synchronize_irq(xhci->msix_entries[i].vector);
+ }
+}
+
+static int xhci_try_enable_msi(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev;
+ int ret;
+
+ /* The xhci platform device has set up IRQs through usb_add_hcd. */
+ if (xhci->quirks & XHCI_PLAT)
+ return 0;
+
+ pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ /*
+ * Some Fresco Logic host controllers advertise MSI, but fail to
+ * generate interrupts. Don't even try to enable MSI.
+ */
+ if (xhci->quirks & XHCI_BROKEN_MSI)
+ goto legacy_irq;
+
+ /* unregister the legacy interrupt */
+ if (hcd->irq)
+ free_irq(hcd->irq, hcd);
+ hcd->irq = 0;
+
+ ret = xhci_setup_msix(xhci);
+ if (ret)
+ /* fall back to msi*/
+ ret = xhci_setup_msi(xhci);
+
+ if (!ret)
+ /* hcd->irq is 0, we have MSI */
+ return 0;
+
+ if (!pdev->irq) {
+ xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
+ return -EINVAL;
+ }
+
+ legacy_irq:
+ if (!strlen(hcd->irq_descr))
+ snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
+ hcd->driver->description, hcd->self.busnum);
+
+ /* fall back to legacy interrupt*/
+ ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
+ hcd->irq_descr, hcd);
+ if (ret) {
+ xhci_err(xhci, "request interrupt %d failed\n",
+ pdev->irq);
+ return ret;
+ }
+ hcd->irq = pdev->irq;
+ return 0;
+}
+
+#else
+
+static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
+{
+ return 0;
+}
+
+static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
+{
+}
+
+static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
+{
+}
+
+#endif
+
+static void compliance_mode_recovery(unsigned long arg)
+{
+ struct xhci_hcd *xhci;
+ struct usb_hcd *hcd;
+ u32 temp;
+ int i;
+
+ xhci = (struct xhci_hcd *)arg;
+
+ for (i = 0; i < xhci->num_usb3_ports; i++) {
+ temp = readl(xhci->usb3_ports[i]);
+ if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
+ /*
+ * Compliance Mode Detected. Letting USB Core
+ * handle the Warm Reset
+ */
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Compliance mode detected->port %d",
+ i + 1);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Attempting compliance mode recovery");
+ hcd = xhci->shared_hcd;
+
+ if (hcd->state == HC_STATE_SUSPENDED)
+ usb_hcd_resume_root_hub(hcd);
+
+ usb_hcd_poll_rh_status(hcd);
+ }
+ }
+
+ if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
+ mod_timer(&xhci->comp_mode_recovery_timer,
+ jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
+}
+
+/*
+ * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
+ * that causes ports behind that hardware to enter compliance mode sometimes.
+ * The quirk creates a timer that polls every 2 seconds the link state of
+ * each host controller's port and recovers it by issuing a Warm reset
+ * if Compliance mode is detected, otherwise the port will become "dead" (no
+ * device connections or disconnections will be detected anymore). Becasue no
+ * status event is generated when entering compliance mode (per xhci spec),
+ * this quirk is needed on systems that have the failing hardware installed.
+ */
+static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
+{
+ xhci->port_status_u0 = 0;
+ init_timer(&xhci->comp_mode_recovery_timer);
+
+ xhci->comp_mode_recovery_timer.data = (unsigned long) xhci;
+ xhci->comp_mode_recovery_timer.function = compliance_mode_recovery;
+ xhci->comp_mode_recovery_timer.expires = jiffies +
+ msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
+
+ set_timer_slack(&xhci->comp_mode_recovery_timer,
+ msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
+ add_timer(&xhci->comp_mode_recovery_timer);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Compliance mode recovery timer initialized");
+}
+
+/*
+ * This function identifies the systems that have installed the SN65LVPE502CP
+ * USB3.0 re-driver and that need the Compliance Mode Quirk.
+ * Systems:
+ * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
+ */
+bool xhci_compliance_mode_recovery_timer_quirk_check(void)
+{
+ const char *dmi_product_name, *dmi_sys_vendor;
+
+ dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
+ dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+ if (!dmi_product_name || !dmi_sys_vendor)
+ return false;
+
+ if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
+ return false;
+
+ if (strstr(dmi_product_name, "Z420") ||
+ strstr(dmi_product_name, "Z620") ||
+ strstr(dmi_product_name, "Z820") ||
+ strstr(dmi_product_name, "Z1 Workstation"))
+ return true;
+
+ return false;
+}
+
+static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
+{
+ return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
+}
+
+
/*
* Initialize memory for HCD and xHC (one-time init).
*
@@ -307,16 +530,24 @@ int xhci_init(struct usb_hcd *hcd)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int retval = 0;
- xhci_dbg(xhci, "xhci_init\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
spin_lock_init(&xhci->lock);
- if (link_quirk) {
- xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
+ if (xhci->hci_version == 0x95 && link_quirk) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "QUIRK: Not clearing Link TRB chain bits.");
xhci->quirks |= XHCI_LINK_TRB_QUIRK;
} else {
- xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xHCI doesn't need link TRB QUIRK");
}
retval = xhci_mem_init(xhci, GFP_KERNEL);
- xhci_dbg(xhci, "Finished xhci_init\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
+
+ /* Initializing Compliance Mode Recovery Data If Needed */
+ if (xhci_compliance_mode_recovery_timer_quirk_check()) {
+ xhci->quirks |= XHCI_COMP_MODE_QUIRK;
+ compliance_mode_recovery_timer_init(xhci);
+ }
return retval;
}
@@ -324,60 +555,22 @@ int xhci_init(struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
-#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-void xhci_event_ring_work(unsigned long arg)
+static int xhci_run_finished(struct xhci_hcd *xhci)
{
- unsigned long flags;
- int temp;
- u64 temp_64;
- struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
- int i, j;
-
- xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
-
- spin_lock_irqsave(&xhci->lock, flags);
- temp = xhci_readl(xhci, &xhci->op_regs->status);
- xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
- if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
- xhci_dbg(xhci, "HW died, polling stopped.\n");
- spin_unlock_irqrestore(&xhci->lock, flags);
- return;
- }
-
- temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
- xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled);
- xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
- xhci->error_bitmask = 0;
- xhci_dbg(xhci, "Event ring:\n");
- xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
- xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
- temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
- temp_64 &= ~ERST_PTR_MASK;
- xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
- xhci_dbg(xhci, "Command ring:\n");
- xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
- xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
- xhci_dbg_cmd_ptrs(xhci);
- for (i = 0; i < MAX_HC_SLOTS; ++i) {
- if (!xhci->devs[i])
- continue;
- for (j = 0; j < 31; ++j) {
- xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
- }
+ if (xhci_start(xhci)) {
+ xhci_halt(xhci);
+ return -ENODEV;
}
+ xhci->shared_hcd->state = HC_STATE_RUNNING;
+ xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
- if (xhci->noops_submitted != NUM_TEST_NOOPS)
- if (xhci_setup_one_noop(xhci))
- xhci_ring_cmd_db(xhci);
- spin_unlock_irqrestore(&xhci->lock, flags);
+ if (xhci->quirks & XHCI_NEC_HOST)
+ xhci_ring_cmd_db(xhci);
- if (!xhci->zombie)
- mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
- else
- xhci_dbg(xhci, "Quit polling the event ring.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Finished xhci_run for USB3 roothub");
+ return 0;
}
-#endif
/*
* Start the HC after it was halted.
@@ -395,46 +588,22 @@ int xhci_run(struct usb_hcd *hcd)
{
u32 temp;
u64 temp_64;
- u32 ret;
+ int ret;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
- void (*doorbell)(struct xhci_hcd *) = NULL;
+
+ /* Start the xHCI host controller running only after the USB 2.0 roothub
+ * is setup.
+ */
hcd->uses_new_polling = 1;
+ if (!usb_hcd_is_primary_hcd(hcd))
+ return xhci_run_finished(xhci);
- xhci_dbg(xhci, "xhci_run\n");
- /* unregister the legacy interrupt */
- if (hcd->irq)
- free_irq(hcd->irq, hcd);
- hcd->irq = -1;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
- ret = xhci_setup_msix(xhci);
+ ret = xhci_try_enable_msi(hcd);
if (ret)
- /* fall back to msi*/
- ret = xhci_setup_msi(xhci);
-
- if (ret) {
- /* fall back to legacy interrupt*/
- ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
- hcd->irq_descr, hcd);
- if (ret) {
- xhci_err(xhci, "request interrupt %d failed\n",
- pdev->irq);
- return ret;
- }
- hcd->irq = pdev->irq;
- }
-
-#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
- init_timer(&xhci->event_ring_timer);
- xhci->event_ring_timer.data = (unsigned long) xhci;
- xhci->event_ring_timer.function = xhci_event_ring_work;
- /* Poll the event ring */
- xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
- xhci->zombie = 0;
- xhci_dbg(xhci, "Setting event ring polling timer\n");
- add_timer(&xhci->event_ring_timer);
-#endif
+ return ret;
xhci_dbg(xhci, "Command ring memory map follows:\n");
xhci_debug_ring(xhci, xhci->cmd_ring);
@@ -448,47 +617,56 @@ int xhci_run(struct usb_hcd *hcd)
xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp_64 &= ~ERST_PTR_MASK;
- xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
- xhci_dbg(xhci, "// Set the interrupt modulation register\n");
- temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Set the interrupt modulation register");
+ temp = readl(&xhci->ir_set->irq_control);
temp &= ~ER_IRQ_INTERVAL_MASK;
temp |= (u32) 160;
- xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
+ writel(temp, &xhci->ir_set->irq_control);
/* Set the HCD state before we enable the irqs */
- hcd->state = HC_STATE_RUNNING;
- temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp = readl(&xhci->op_regs->command);
temp |= (CMD_EIE);
- xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
- temp);
- xhci_writel(xhci, temp, &xhci->op_regs->command);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Enable interrupts, cmd = 0x%x.", temp);
+ writel(temp, &xhci->op_regs->command);
- temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
+ temp = readl(&xhci->ir_set->irq_pending);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
- xhci_writel(xhci, ER_IRQ_ENABLE(temp),
- &xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
+ xhci_print_ir_set(xhci, 0);
- if (NUM_TEST_NOOPS > 0)
- doorbell = xhci_setup_one_noop(xhci);
- if (xhci->quirks & XHCI_NEC_HOST)
- xhci_queue_vendor_command(xhci, 0, 0, 0,
+ if (xhci->quirks & XHCI_NEC_HOST) {
+ struct xhci_command *command;
+ command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+ if (!command)
+ return -ENOMEM;
+ xhci_queue_vendor_command(xhci, command, 0, 0, 0,
TRB_TYPE(TRB_NEC_GET_FW));
-
- if (xhci_start(xhci)) {
- xhci_halt(xhci);
- return -ENODEV;
}
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Finished xhci_run for USB2 roothub");
+ return 0;
+}
- if (doorbell)
- (*doorbell)(xhci);
- if (xhci->quirks & XHCI_NEC_HOST)
- xhci_ring_cmd_db(xhci);
+static void xhci_only_stop_hcd(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- xhci_dbg(xhci, "Finished xhci_run\n");
- return 0;
+ spin_lock_irq(&xhci->lock);
+ xhci_halt(xhci);
+
+ /* The shared_hcd is going to be deallocated shortly (the USB core only
+ * calls this function when allocation fails in usb_add_hcd(), or
+ * usb_remove_hcd() is called). So we need to unset xHCI's pointer.
+ */
+ xhci->shared_hcd = NULL;
+ spin_unlock_irq(&xhci->lock);
}
/*
@@ -505,30 +683,46 @@ void xhci_stop(struct usb_hcd *hcd)
u32 temp;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ if (!usb_hcd_is_primary_hcd(hcd)) {
+ xhci_only_stop_hcd(xhci->shared_hcd);
+ return;
+ }
+
spin_lock_irq(&xhci->lock);
+ /* Make sure the xHC is halted for a USB3 roothub
+ * (xhci_stop() could be called as part of failed init).
+ */
xhci_halt(xhci);
xhci_reset(xhci);
- xhci_cleanup_msix(xhci);
spin_unlock_irq(&xhci->lock);
-#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
- /* Tell the event ring poll function not to reschedule */
- xhci->zombie = 1;
- del_timer_sync(&xhci->event_ring_timer);
-#endif
+ xhci_cleanup_msix(xhci);
+
+ /* Deleting Compliance Mode Recovery Timer */
+ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+ (!(xhci_all_ports_seen_u0(xhci)))) {
+ del_timer_sync(&xhci->comp_mode_recovery_timer);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "%s: compliance mode recovery timer deleted",
+ __func__);
+ }
+
+ if (xhci->quirks & XHCI_AMD_PLL_FIX)
+ usb_amd_dev_put();
- xhci_dbg(xhci, "// Disabling event ring interrupts\n");
- temp = xhci_readl(xhci, &xhci->op_regs->status);
- xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
- temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- xhci_writel(xhci, ER_IRQ_DISABLE(temp),
- &xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Disabling event ring interrupts");
+ temp = readl(&xhci->op_regs->status);
+ writel(temp & ~STS_EINT, &xhci->op_regs->status);
+ temp = readl(&xhci->ir_set->irq_pending);
+ writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
+ xhci_print_ir_set(xhci, 0);
- xhci_dbg(xhci, "cleaning up memory\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
xhci_mem_cleanup(xhci);
- xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
- xhci_readl(xhci, &xhci->op_regs->status));
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xhci_stop completed - status = %x",
+ readl(&xhci->op_regs->status));
}
/*
@@ -537,44 +731,123 @@ void xhci_stop(struct usb_hcd *hcd)
* This is called when the machine is rebooting or halting. We assume that the
* machine will be powered off, and the HC's internal state will be reset.
* Don't bother to free memory.
+ *
+ * This will only ever be called with the main usb_hcd (the USB3 roothub).
*/
void xhci_shutdown(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
+ usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
+
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
- xhci_cleanup_msix(xhci);
+ /* Workaround for spurious wakeups at shutdown with HSW */
+ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+ xhci_reset(xhci);
spin_unlock_irq(&xhci->lock);
- xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
- xhci_readl(xhci, &xhci->op_regs->status));
+ xhci_cleanup_msix(xhci);
+
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xhci_shutdown completed - status = %x",
+ readl(&xhci->op_regs->status));
+
+ /* Yet another workaround for spurious wakeups at shutdown with HSW */
+ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+ pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
}
#ifdef CONFIG_PM
static void xhci_save_registers(struct xhci_hcd *xhci)
{
- xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
- xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
+ xhci->s3.command = readl(&xhci->op_regs->command);
+ xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
- xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
- xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
- xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
+ xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
+ xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
+ xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
}
static void xhci_restore_registers(struct xhci_hcd *xhci)
{
- xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
- xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
+ writel(xhci->s3.command, &xhci->op_regs->command);
+ writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
- xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
- xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
- xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
- xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
+ writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
+ writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
+ xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
+ writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
+ writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
+}
+
+static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
+{
+ u64 val_64;
+
+ /* step 2: initialize command ring buffer */
+ val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+ val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
+ (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
+ xhci->cmd_ring->dequeue) &
+ (u64) ~CMD_RING_RSVD_BITS) |
+ xhci->cmd_ring->cycle_state;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Setting command ring address to 0x%llx",
+ (long unsigned long) val_64);
+ xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
+}
+
+/*
+ * The whole command ring must be cleared to zero when we suspend the host.
+ *
+ * The host doesn't save the command ring pointer in the suspend well, so we
+ * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
+ * aligned, because of the reserved bits in the command ring dequeue pointer
+ * register. Therefore, we can't just set the dequeue pointer back in the
+ * middle of the ring (TRBs are 16-byte aligned).
+ */
+static void xhci_clear_command_ring(struct xhci_hcd *xhci)
+{
+ struct xhci_ring *ring;
+ struct xhci_segment *seg;
+
+ ring = xhci->cmd_ring;
+ seg = ring->deq_seg;
+ do {
+ memset(seg->trbs, 0,
+ sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+ seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
+ cpu_to_le32(~TRB_CYCLE);
+ seg = seg->next;
+ } while (seg != ring->deq_seg);
+
+ /* Reset the software enqueue and dequeue pointers */
+ ring->deq_seg = ring->first_seg;
+ ring->dequeue = ring->first_seg->trbs;
+ ring->enq_seg = ring->deq_seg;
+ ring->enqueue = ring->dequeue;
+
+ ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
+ /*
+ * Ring is now zeroed, so the HW should look for change of ownership
+ * when the cycle bit is set to 1.
+ */
+ ring->cycle_state = 1;
+
+ /*
+ * Reset the hardware dequeue pointer.
+ * Yes, this will need to be re-written after resume, but we're paranoid
+ * and want to make sure the hardware doesn't access bogus memory
+ * because, say, the BIOS or an SMI started the host without changing
+ * the command ring pointers.
+ */
+ xhci_set_cmd_ring_deq(xhci);
}
/*
@@ -586,41 +859,72 @@ static void xhci_restore_registers(struct xhci_hcd *xhci)
int xhci_suspend(struct xhci_hcd *xhci)
{
int rc = 0;
+ unsigned int delay = XHCI_MAX_HALT_USEC;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
u32 command;
+ if (hcd->state != HC_STATE_SUSPENDED ||
+ xhci->shared_hcd->state != HC_STATE_SUSPENDED)
+ return -EINVAL;
+
+ /* Don't poll the roothubs on bus suspend. */
+ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ del_timer_sync(&hcd->rh_timer);
+
spin_lock_irq(&xhci->lock);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
/* step 1: stop endpoint */
/* skipped assuming that port suspend has done */
/* step 2: clear Run/Stop bit */
- command = xhci_readl(xhci, &xhci->op_regs->command);
+ command = readl(&xhci->op_regs->command);
command &= ~CMD_RUN;
- xhci_writel(xhci, command, &xhci->op_regs->command);
- if (handshake(xhci, &xhci->op_regs->status,
- STS_HALT, STS_HALT, 100*100)) {
+ writel(command, &xhci->op_regs->command);
+
+ /* Some chips from Fresco Logic need an extraordinary delay */
+ delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
+
+ if (xhci_handshake(xhci, &xhci->op_regs->status,
+ STS_HALT, STS_HALT, delay)) {
xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
+ xhci_clear_command_ring(xhci);
/* step 3: save registers */
xhci_save_registers(xhci);
/* step 4: set CSS flag */
- command = xhci_readl(xhci, &xhci->op_regs->command);
+ command = readl(&xhci->op_regs->command);
command |= CMD_CSS;
- xhci_writel(xhci, command, &xhci->op_regs->command);
- if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
- xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
+ writel(command, &xhci->op_regs->command);
+ if (xhci_handshake(xhci, &xhci->op_regs->status,
+ STS_SAVE, 0, 10 * 1000)) {
+ xhci_warn(xhci, "WARN: xHC save state timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
- /* step 5: remove core well power */
- xhci_cleanup_msix(xhci);
spin_unlock_irq(&xhci->lock);
+ /*
+ * Deleting Compliance Mode Recovery Timer because the xHCI Host
+ * is about to be suspended.
+ */
+ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+ (!(xhci_all_ports_seen_u0(xhci)))) {
+ del_timer_sync(&xhci->comp_mode_recovery_timer);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "%s: compliance mode recovery timer deleted",
+ __func__);
+ }
+
+ /* step 5: remove core well power */
+ /* synchronize irq when using MSI-X */
+ xhci_msix_sync_irqs(xhci);
+
return rc;
}
@@ -632,115 +936,109 @@ int xhci_suspend(struct xhci_hcd *xhci)
*/
int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
{
- u32 command, temp = 0;
+ u32 command, temp = 0, status;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
- struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- u64 val_64;
- int old_state, retval;
+ struct usb_hcd *secondary_hcd;
+ int retval = 0;
+ bool comp_timer_running = false;
- old_state = hcd->state;
- if (time_before(jiffies, xhci->next_statechange))
+ /* Wait a bit if either of the roothubs need to settle from the
+ * transition into bus suspend.
+ */
+ if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
+ time_before(jiffies,
+ xhci->bus_state[1].next_statechange))
msleep(100);
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
+
spin_lock_irq(&xhci->lock);
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ hibernated = true;
if (!hibernated) {
/* step 1: restore register */
xhci_restore_registers(xhci);
/* step 2: initialize command ring buffer */
- val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
- val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
- (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
- xhci->cmd_ring->dequeue) &
- (u64) ~CMD_RING_RSVD_BITS) |
- xhci->cmd_ring->cycle_state;
- xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
- (long unsigned long) val_64);
- xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
+ xhci_set_cmd_ring_deq(xhci);
/* step 3: restore state and start state*/
/* step 3: set CRS flag */
- command = xhci_readl(xhci, &xhci->op_regs->command);
+ command = readl(&xhci->op_regs->command);
command |= CMD_CRS;
- xhci_writel(xhci, command, &xhci->op_regs->command);
- if (handshake(xhci, &xhci->op_regs->status,
- STS_RESTORE, 0, 10*100)) {
- xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
+ writel(command, &xhci->op_regs->command);
+ if (xhci_handshake(xhci, &xhci->op_regs->status,
+ STS_RESTORE, 0, 10 * 1000)) {
+ xhci_warn(xhci, "WARN: xHC restore state timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
- temp = xhci_readl(xhci, &xhci->op_regs->status);
+ temp = readl(&xhci->op_regs->status);
}
/* If restore operation fails, re-initialize the HC during resume */
if ((temp & STS_SRE) || hibernated) {
- usb_root_hub_lost_power(hcd->self.root_hub);
+
+ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+ !(xhci_all_ports_seen_u0(xhci))) {
+ del_timer_sync(&xhci->comp_mode_recovery_timer);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Compliance Mode Recovery Timer deleted!");
+ }
+
+ /* Let the USB core know _both_ roothubs lost power. */
+ usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
+ usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
xhci_dbg(xhci, "Stop HCD\n");
xhci_halt(xhci);
xhci_reset(xhci);
- if (hibernated)
- xhci_cleanup_msix(xhci);
spin_unlock_irq(&xhci->lock);
-
-#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
- /* Tell the event ring poll function not to reschedule */
- xhci->zombie = 1;
- del_timer_sync(&xhci->event_ring_timer);
-#endif
+ xhci_cleanup_msix(xhci);
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
- temp = xhci_readl(xhci, &xhci->op_regs->status);
- xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
- temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- xhci_writel(xhci, ER_IRQ_DISABLE(temp),
- &xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ temp = readl(&xhci->op_regs->status);
+ writel(temp & ~STS_EINT, &xhci->op_regs->status);
+ temp = readl(&xhci->ir_set->irq_pending);
+ writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
+ xhci_print_ir_set(xhci, 0);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
- xhci_readl(xhci, &xhci->op_regs->status));
+ readl(&xhci->op_regs->status));
+
+ /* USB core calls the PCI reinit and start functions twice:
+ * first with the primary HCD, and then with the secondary HCD.
+ * If we don't do the same, the host will never be started.
+ */
+ if (!usb_hcd_is_primary_hcd(hcd))
+ secondary_hcd = hcd;
+ else
+ secondary_hcd = xhci->shared_hcd;
- xhci_dbg(xhci, "Initialize the HCD\n");
- retval = xhci_init(hcd);
+ xhci_dbg(xhci, "Initialize the xhci_hcd\n");
+ retval = xhci_init(hcd->primary_hcd);
if (retval)
return retval;
+ comp_timer_running = true;
- xhci_dbg(xhci, "Start the HCD\n");
- retval = xhci_run(hcd);
- if (!retval)
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- hcd->state = HC_STATE_SUSPENDED;
- return retval;
- }
-
- /* Re-setup MSI-X */
- if (hcd->irq)
- free_irq(hcd->irq, hcd);
- hcd->irq = -1;
-
- retval = xhci_setup_msix(xhci);
- if (retval)
- /* fall back to msi*/
- retval = xhci_setup_msi(xhci);
-
- if (retval) {
- /* fall back to legacy interrupt*/
- retval = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
- hcd->irq_descr, hcd);
- if (retval) {
- xhci_err(xhci, "request interrupt %d failed\n",
- pdev->irq);
- return retval;
+ xhci_dbg(xhci, "Start the primary HCD\n");
+ retval = xhci_run(hcd->primary_hcd);
+ if (!retval) {
+ xhci_dbg(xhci, "Start the secondary HCD\n");
+ retval = xhci_run(secondary_hcd);
}
- hcd->irq = pdev->irq;
+ hcd->state = HC_STATE_SUSPENDED;
+ xhci->shared_hcd->state = HC_STATE_SUSPENDED;
+ goto done;
}
/* step 4: set Run/Stop bit */
- command = xhci_readl(xhci, &xhci->op_regs->command);
+ command = readl(&xhci->op_regs->command);
command |= CMD_RUN;
- xhci_writel(xhci, command, &xhci->op_regs->command);
- handshake(xhci, &xhci->op_regs->status, STS_HALT,
+ writel(command, &xhci->op_regs->command);
+ xhci_handshake(xhci, &xhci->op_regs->status, STS_HALT,
0, 250 * 1000);
/* step 5: walk topology and initialize portsc,
@@ -752,14 +1050,33 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
* Running endpoints by ringing their doorbells
*/
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- if (!hibernated)
- hcd->state = old_state;
- else
- hcd->state = HC_STATE_SUSPENDED;
-
spin_unlock_irq(&xhci->lock);
- return 0;
+
+ done:
+ if (retval == 0) {
+ /* Resume root hubs only when have pending events. */
+ status = readl(&xhci->op_regs->status);
+ if (status & STS_EINT) {
+ usb_hcd_resume_root_hub(hcd);
+ usb_hcd_resume_root_hub(xhci->shared_hcd);
+ }
+ }
+
+ /*
+ * If system is subject to the Quirk, Compliance Mode Timer needs to
+ * be re-initialized Always after a system resume. Ports are subject
+ * to suffer the Compliance Mode issue again. It doesn't matter if
+ * ports have entered previously to U0 before system's suspension.
+ */
+ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
+ compliance_mode_recovery_timer_init(xhci);
+
+ /* Re-enable port polling. */
+ xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ usb_hcd_poll_rh_status(hcd);
+
+ return retval;
}
#endif /* CONFIG_PM */
@@ -786,6 +1103,16 @@ unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
return index;
}
+/* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
+ * address from the XHCI endpoint index.
+ */
+unsigned int xhci_get_endpoint_address(unsigned int ep_index)
+{
+ unsigned int number = DIV_ROUND_UP(ep_index, 2);
+ unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
+ return direction | number;
+}
+
/* Find the flag for this endpoint (for use in the control context). Use the
* endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
* bit 1, etc.
@@ -818,40 +1145,40 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
/* Returns 1 if the arguments are OK;
* returns 0 this is a root hub; returns -EINVAL for NULL pointers.
*/
-int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
const char *func) {
struct xhci_hcd *xhci;
struct xhci_virt_device *virt_dev;
if (!hcd || (check_ep && !ep) || !udev) {
- printk(KERN_DEBUG "xHCI %s called with invalid args\n",
- func);
+ pr_debug("xHCI %s called with invalid args\n", func);
return -EINVAL;
}
if (!udev->parent) {
- printk(KERN_DEBUG "xHCI %s called for root hub\n",
- func);
+ pr_debug("xHCI %s called for root hub\n", func);
return 0;
}
+ xhci = hcd_to_xhci(hcd);
if (check_virt_dev) {
- xhci = hcd_to_xhci(hcd);
- if (!udev->slot_id || !xhci->devs
- || !xhci->devs[udev->slot_id]) {
- printk(KERN_DEBUG "xHCI %s called with unaddressed "
- "device\n", func);
+ if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
+ xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
+ func);
return -EINVAL;
}
virt_dev = xhci->devs[udev->slot_id];
if (virt_dev->udev != udev) {
- printk(KERN_DEBUG "xHCI %s called with udev and "
+ xhci_dbg(xhci, "xHCI %s called with udev and "
"virt_dev does not match\n", func);
return -EINVAL;
}
}
+ if (xhci->xhc_state & XHCI_STATE_HALTED)
+ return -ENODEV;
+
return 1;
}
@@ -868,54 +1195,73 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, struct urb *urb)
{
- struct xhci_container_ctx *in_ctx;
struct xhci_container_ctx *out_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_ep_ctx *ep_ctx;
+ struct xhci_command *command;
int max_packet_size;
int hw_max_packet_size;
int ret = 0;
out_ctx = xhci->devs[slot_id]->out_ctx;
ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
- hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2);
- max_packet_size = urb->dev->ep0.desc.wMaxPacketSize;
+ hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
+ max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
if (hw_max_packet_size != max_packet_size) {
- xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
- xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Max Packet Size for ep 0 changed.");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Max packet size in usb_device = %d",
max_packet_size);
- xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Max packet size in xHCI HW = %d",
hw_max_packet_size);
- xhci_dbg(xhci, "Issuing evaluate context command.\n");
-
- /* Set up the modified control endpoint 0 */
- xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
- xhci->devs[slot_id]->out_ctx, ep_index);
- in_ctx = xhci->devs[slot_id]->in_ctx;
- ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
- ep_ctx->ep_info2 &= ~MAX_PACKET_MASK;
- ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Issuing evaluate context command.");
/* Set up the input context flags for the command */
/* FIXME: This won't work if a non-default control endpoint
* changes max packet sizes.
*/
- ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
- ctrl_ctx->add_flags = EP0_FLAG;
+
+ command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
+ if (!command)
+ return -ENOMEM;
+
+ command->in_ctx = xhci->devs[slot_id]->in_ctx;
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
+ if (!ctrl_ctx) {
+ xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+ __func__);
+ ret = -ENOMEM;
+ goto command_cleanup;
+ }
+ /* Set up the modified control endpoint 0 */
+ xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
+ xhci->devs[slot_id]->out_ctx, ep_index);
+
+ ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
+ ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
+ ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
+
+ ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
ctrl_ctx->drop_flags = 0;
xhci_dbg(xhci, "Slot %d input context\n", slot_id);
- xhci_dbg_ctx(xhci, in_ctx, ep_index);
+ xhci_dbg_ctx(xhci, command->in_ctx, ep_index);
xhci_dbg(xhci, "Slot %d output context\n", slot_id);
xhci_dbg_ctx(xhci, out_ctx, ep_index);
- ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
+ ret = xhci_configure_endpoint(xhci, urb->dev, command,
true, false);
/* Clean up the input context for later use by bandwidth
* functions.
*/
- ctrl_ctx->add_flags = SLOT_FLAG;
+ ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
+command_cleanup:
+ kfree(command->completion);
+ kfree(command);
}
return ret;
}
@@ -927,6 +1273,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_td *buffer;
unsigned long flags;
int ret = 0;
unsigned int slot_id, ep_index;
@@ -957,13 +1304,15 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
if (!urb_priv)
return -ENOMEM;
+ buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
+ if (!buffer) {
+ kfree(urb_priv);
+ return -ENOMEM;
+ }
+
for (i = 0; i < size; i++) {
- urb_priv->td[i] = kzalloc(sizeof(struct xhci_td), mem_flags);
- if (!urb_priv->td[i]) {
- urb_priv->length = i;
- xhci_urb_free_priv(xhci, urb_priv);
- return -ENOMEM;
- }
+ urb_priv->td[i] = buffer;
+ buffer++;
}
urb_priv->length = size;
@@ -977,8 +1326,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
if (urb->dev->speed == USB_SPEED_FULL) {
ret = xhci_check_maxpacket(xhci, slot_id,
ep_index, urb);
- if (ret < 0)
+ if (ret < 0) {
+ xhci_urb_free_priv(xhci, urb_priv);
+ urb->hcpriv = NULL;
return ret;
+ }
}
/* We have a spinlock and interrupts disabled, so we must pass
@@ -989,6 +1341,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
goto dying;
ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
+ if (ret)
+ goto free_priv;
spin_unlock_irqrestore(&xhci->lock, flags);
} else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
spin_lock_irqsave(&xhci->lock, flags);
@@ -1009,6 +1363,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
}
+ if (ret)
+ goto free_priv;
spin_unlock_irqrestore(&xhci->lock, flags);
} else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
spin_lock_irqsave(&xhci->lock, flags);
@@ -1016,6 +1372,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
goto dying;
ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
+ if (ret)
+ goto free_priv;
spin_unlock_irqrestore(&xhci->lock, flags);
} else {
spin_lock_irqsave(&xhci->lock, flags);
@@ -1023,18 +1381,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
goto dying;
ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
+ if (ret)
+ goto free_priv;
spin_unlock_irqrestore(&xhci->lock, flags);
}
exit:
return ret;
dying:
- xhci_urb_free_priv(xhci, urb_priv);
- urb->hcpriv = NULL;
xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
"non-responsive xHCI host.\n",
urb->ep->desc.bEndpointAddress, urb);
+ ret = -ESHUTDOWN;
+free_priv:
+ xhci_urb_free_priv(xhci, urb_priv);
+ urb->hcpriv = NULL;
spin_unlock_irqrestore(&xhci->lock, flags);
- return -ESHUTDOWN;
+ return ret;
}
/* Get the right ring for the given URB.
@@ -1120,6 +1482,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
unsigned int ep_index;
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
+ struct xhci_command *command;
xhci = hcd_to_xhci(hcd);
spin_lock_irqsave(&xhci->lock, flags);
@@ -1127,20 +1490,30 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
if (ret || !urb->hcpriv)
goto done;
- temp = xhci_readl(xhci, &xhci->op_regs->status);
- if (temp == 0xffffffff) {
- xhci_dbg(xhci, "HW died, freeing TD.\n");
+ temp = readl(&xhci->op_regs->status);
+ if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "HW died, freeing TD.");
urb_priv = urb->hcpriv;
+ for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
+ td = urb_priv->td[i];
+ if (!list_empty(&td->td_list))
+ list_del_init(&td->td_list);
+ if (!list_empty(&td->cancelled_td_list))
+ list_del_init(&td->cancelled_td_list);
+ }
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&xhci->lock, flags);
- usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN);
+ usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
xhci_urb_free_priv(xhci, urb_priv);
return ret;
}
- if (xhci->xhc_state & XHCI_STATE_DYING) {
- xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
- "non-responsive xHCI host.\n",
+ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_HALTED)) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Ep 0x%x: URB %p to be canceled on "
+ "non-responsive xHCI host.",
urb->ep->desc.bEndpointAddress, urb);
/* Let the stop endpoint command watchdog timer (which set this
* state) finish cleaning up the endpoint TD lists. We must
@@ -1150,9 +1523,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto done;
}
- xhci_dbg(xhci, "Cancel URB %p\n", urb);
- xhci_dbg(xhci, "Event ring:\n");
- xhci_debug_ring(xhci, xhci->event_ring);
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
@@ -1161,12 +1531,19 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto done;
}
- xhci_dbg(xhci, "Endpoint ring:\n");
- xhci_debug_ring(xhci, ep_ring);
-
urb_priv = urb->hcpriv;
-
- for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
+ i = urb_priv->td_cnt;
+ if (i < urb_priv->length)
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Cancel URB %p, dev %s, ep 0x%x, "
+ "starting at offset 0x%llx",
+ urb, urb->dev->devpath,
+ urb->ep->desc.bEndpointAddress,
+ (unsigned long long) xhci_trb_virt_to_dma(
+ urb_priv->td[i]->start_seg,
+ urb_priv->td[i]->first_trb));
+
+ for (; i < urb_priv->length; i++) {
td = urb_priv->td[i];
list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
}
@@ -1175,12 +1552,14 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
* the first cancellation to be handled.
*/
if (!(ep->ep_state & EP_HALT_PENDING)) {
+ command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
ep->ep_state |= EP_HALT_PENDING;
ep->stop_cmds_pending++;
ep->stop_cmd_timer.expires = jiffies +
XHCI_STOP_EP_CMD_TIMEOUT * HZ;
add_timer(&ep->stop_cmd_timer);
- xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
+ xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
+ ep_index, 0);
xhci_ring_cmd_db(xhci);
}
done:
@@ -1219,8 +1598,10 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
- xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ENODEV;
+ xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
drop_flag = xhci_get_endpoint_flag(&ep->desc);
if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
@@ -1231,32 +1612,41 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
in_ctx = xhci->devs[udev->slot_id]->in_ctx;
out_ctx = xhci->devs[udev->slot_id]->out_ctx;
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ if (!ctrl_ctx) {
+ xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+ __func__);
+ return 0;
+ }
+
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
/* If the HC already knows the endpoint is disabled,
* or the HCD has noted it is disabled, ignore this request
*/
- if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
- ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
+ if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
+ cpu_to_le32(EP_STATE_DISABLED)) ||
+ le32_to_cpu(ctrl_ctx->drop_flags) &
+ xhci_get_endpoint_flag(&ep->desc)) {
xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
__func__, ep);
return 0;
}
- ctrl_ctx->drop_flags |= drop_flag;
- new_drop_flags = ctrl_ctx->drop_flags;
+ ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
+ new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
- ctrl_ctx->add_flags &= ~drop_flag;
- new_add_flags = ctrl_ctx->add_flags;
+ ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
+ new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
- last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags);
+ last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
/* Update the last valid endpoint context, if we deleted the last one */
- if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
- slot_ctx->dev_info &= ~LAST_CTX_MASK;
- slot_ctx->dev_info |= LAST_CTX(last_ctx);
+ if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
+ LAST_CTX(last_ctx)) {
+ slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
+ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
}
- new_slot_info = slot_ctx->dev_info;
+ new_slot_info = le32_to_cpu(slot_ctx->dev_info);
xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
@@ -1288,12 +1678,12 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
struct xhci_hcd *xhci;
struct xhci_container_ctx *in_ctx, *out_ctx;
unsigned int ep_index;
- struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
u32 added_ctxs;
unsigned int last_ctx;
u32 new_add_flags, new_drop_flags, new_slot_info;
+ struct xhci_virt_device *virt_dev;
int ret = 0;
ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
@@ -1303,6 +1693,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
return ret;
}
xhci = hcd_to_xhci(hcd);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ENODEV;
added_ctxs = xhci_get_endpoint_flag(&ep->desc);
last_ctx = xhci_last_valid_endpoint(added_ctxs);
@@ -1316,15 +1708,34 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
return 0;
}
- in_ctx = xhci->devs[udev->slot_id]->in_ctx;
- out_ctx = xhci->devs[udev->slot_id]->out_ctx;
+ virt_dev = xhci->devs[udev->slot_id];
+ in_ctx = virt_dev->in_ctx;
+ out_ctx = virt_dev->out_ctx;
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ if (!ctrl_ctx) {
+ xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+ __func__);
+ return 0;
+ }
+
ep_index = xhci_get_endpoint_index(&ep->desc);
- ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
+ /* If this endpoint is already in use, and the upper layers are trying
+ * to add it again without dropping it, reject the addition.
+ */
+ if (virt_dev->eps[ep_index].ring &&
+ !(le32_to_cpu(ctrl_ctx->drop_flags) &
+ xhci_get_endpoint_flag(&ep->desc))) {
+ xhci_warn(xhci, "Trying to add endpoint 0x%x "
+ "without dropping it.\n",
+ (unsigned int) ep->desc.bEndpointAddress);
+ return -EINVAL;
+ }
+
/* If the HCD has already noted the endpoint is enabled,
* ignore this request.
*/
- if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
+ if (le32_to_cpu(ctrl_ctx->add_flags) &
+ xhci_get_endpoint_flag(&ep->desc)) {
xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
__func__, ep);
return 0;
@@ -1335,15 +1746,14 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
* process context, not interrupt context (or so documenation
* for usb_set_interface() and usb_set_configuration() claim).
*/
- if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
- udev, ep, GFP_NOIO) < 0) {
+ if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
__func__, ep->desc.bEndpointAddress);
return -ENOMEM;
}
- ctrl_ctx->add_flags |= added_ctxs;
- new_add_flags = ctrl_ctx->add_flags;
+ ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
+ new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
/* If xhci_endpoint_disable() was called for this endpoint, but the
* xHC hasn't been notified yet through the check_bandwidth() call,
@@ -1351,15 +1761,16 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
* descriptors. We must drop and re-add this endpoint, so we leave the
* drop flags alone.
*/
- new_drop_flags = ctrl_ctx->drop_flags;
+ new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
/* Update the last valid endpoint context, if we just added one past */
- if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
- slot_ctx->dev_info &= ~LAST_CTX_MASK;
- slot_ctx->dev_info |= LAST_CTX(last_ctx);
+ if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
+ LAST_CTX(last_ctx)) {
+ slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
+ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
}
- new_slot_info = slot_ctx->dev_info;
+ new_slot_info = le32_to_cpu(slot_ctx->dev_info);
/* Store the usb_device pointer for later use */
ep->hcpriv = udev;
@@ -1380,18 +1791,24 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir
struct xhci_slot_ctx *slot_ctx;
int i;
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+ if (!ctrl_ctx) {
+ xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+ __func__);
+ return;
+ }
+
/* When a device's add flag and drop flag are zero, any subsequent
* configure endpoint command will leave that endpoint's state
* untouched. Make sure we don't leave any old state in the input
* endpoint contexts.
*/
- ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
ctrl_ctx->drop_flags = 0;
ctrl_ctx->add_flags = 0;
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
- slot_ctx->dev_info &= ~LAST_CTX_MASK;
+ slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
/* Endpoint 0 is always valid */
- slot_ctx->dev_info |= LAST_CTX(1);
+ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
for (i = 1; i < 31; ++i) {
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
ep_ctx->ep_info = 0;
@@ -1402,11 +1819,16 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir
}
static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
- struct usb_device *udev, int *cmd_status)
+ struct usb_device *udev, u32 *cmd_status)
{
int ret;
switch (*cmd_status) {
+ case COMP_CMD_ABORT:
+ case COMP_CMD_STOP:
+ xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
+ ret = -ETIME;
+ break;
case COMP_ENOMEM:
dev_warn(&udev->dev, "Not enough host controller resources "
"for new device state.\n");
@@ -1414,6 +1836,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
/* FIXME: can we allocate more resources for the HC? */
break;
case COMP_BW_ERR:
+ case COMP_2ND_BW_ERR:
dev_warn(&udev->dev, "Not enough bandwidth "
"for new device state.\n");
ret = -ENOSPC;
@@ -1426,8 +1849,14 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
"and endpoint is not disabled.\n");
ret = -EINVAL;
break;
+ case COMP_DEV_ERR:
+ dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
+ "configure command.\n");
+ ret = -ENODEV;
+ break;
case COMP_SUCCESS:
- dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Successful Endpoint Configure command");
ret = 0;
break;
default:
@@ -1440,12 +1869,17 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
}
static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
- struct usb_device *udev, int *cmd_status)
+ struct usb_device *udev, u32 *cmd_status)
{
int ret;
struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
switch (*cmd_status) {
+ case COMP_CMD_ABORT:
+ case COMP_CMD_STOP:
+ xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
+ ret = -ETIME;
+ break;
case COMP_EINVAL:
dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
"context command.\n");
@@ -1454,14 +1888,27 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
case COMP_EBADSLT:
dev_warn(&udev->dev, "WARN: slot not enabled for"
"evaluate context command.\n");
+ ret = -EINVAL;
+ break;
case COMP_CTX_STATE:
dev_warn(&udev->dev, "WARN: invalid context state for "
"evaluate context command.\n");
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
ret = -EINVAL;
break;
+ case COMP_DEV_ERR:
+ dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
+ "context command.\n");
+ ret = -ENODEV;
+ break;
+ case COMP_MEL_ERR:
+ /* Max Exit Latency too large error */
+ dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
+ ret = -EINVAL;
+ break;
case COMP_SUCCESS:
- dev_dbg(&udev->dev, "Successful evaluate context command\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Successful evaluate context command");
ret = 0;
break;
default:
@@ -1473,6 +1920,681 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
return ret;
}
+static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
+ struct xhci_input_control_ctx *ctrl_ctx)
+{
+ u32 valid_add_flags;
+ u32 valid_drop_flags;
+
+ /* Ignore the slot flag (bit 0), and the default control endpoint flag
+ * (bit 1). The default control endpoint is added during the Address
+ * Device command and is never removed until the slot is disabled.
+ */
+ valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
+ valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
+
+ /* Use hweight32 to count the number of ones in the add flags, or
+ * number of endpoints added. Don't count endpoints that are changed
+ * (both added and dropped).
+ */
+ return hweight32(valid_add_flags) -
+ hweight32(valid_add_flags & valid_drop_flags);
+}
+
+static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
+ struct xhci_input_control_ctx *ctrl_ctx)
+{
+ u32 valid_add_flags;
+ u32 valid_drop_flags;
+
+ valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
+ valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
+
+ return hweight32(valid_drop_flags) -
+ hweight32(valid_add_flags & valid_drop_flags);
+}
+
+/*
+ * We need to reserve the new number of endpoints before the configure endpoint
+ * command completes. We can't subtract the dropped endpoints from the number
+ * of active endpoints until the command completes because we can oversubscribe
+ * the host in this case:
+ *
+ * - the first configure endpoint command drops more endpoints than it adds
+ * - a second configure endpoint command that adds more endpoints is queued
+ * - the first configure endpoint command fails, so the config is unchanged
+ * - the second command may succeed, even though there isn't enough resources
+ *
+ * Must be called with xhci->lock held.
+ */
+static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
+ struct xhci_input_control_ctx *ctrl_ctx)
+{
+ u32 added_eps;
+
+ added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
+ if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Not enough ep ctxs: "
+ "%u active, need to add %u, limit is %u.",
+ xhci->num_active_eps, added_eps,
+ xhci->limit_active_eps);
+ return -ENOMEM;
+ }
+ xhci->num_active_eps += added_eps;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Adding %u ep ctxs, %u now active.", added_eps,
+ xhci->num_active_eps);
+ return 0;
+}
+
+/*
+ * The configure endpoint was failed by the xHC for some other reason, so we
+ * need to revert the resources that failed configuration would have used.
+ *
+ * Must be called with xhci->lock held.
+ */
+static void xhci_free_host_resources(struct xhci_hcd *xhci,
+ struct xhci_input_control_ctx *ctrl_ctx)
+{
+ u32 num_failed_eps;
+
+ num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
+ xhci->num_active_eps -= num_failed_eps;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Removing %u failed ep ctxs, %u now active.",
+ num_failed_eps,
+ xhci->num_active_eps);
+}
+
+/*
+ * Now that the command has completed, clean up the active endpoint count by
+ * subtracting out the endpoints that were dropped (but not changed).
+ *
+ * Must be called with xhci->lock held.
+ */
+static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
+ struct xhci_input_control_ctx *ctrl_ctx)
+{
+ u32 num_dropped_eps;
+
+ num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
+ xhci->num_active_eps -= num_dropped_eps;
+ if (num_dropped_eps)
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Removing %u dropped ep ctxs, %u now active.",
+ num_dropped_eps,
+ xhci->num_active_eps);
+}
+
+static unsigned int xhci_get_block_size(struct usb_device *udev)
+{
+ switch (udev->speed) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ return FS_BLOCK;
+ case USB_SPEED_HIGH:
+ return HS_BLOCK;
+ case USB_SPEED_SUPER:
+ return SS_BLOCK;
+ case USB_SPEED_UNKNOWN:
+ case USB_SPEED_WIRELESS:
+ default:
+ /* Should never happen */
+ return 1;
+ }
+}
+
+static unsigned int
+xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
+{
+ if (interval_bw->overhead[LS_OVERHEAD_TYPE])
+ return LS_OVERHEAD;
+ if (interval_bw->overhead[FS_OVERHEAD_TYPE])
+ return FS_OVERHEAD;
+ return HS_OVERHEAD;
+}
+
+/* If we are changing a LS/FS device under a HS hub,
+ * make sure (if we are activating a new TT) that the HS bus has enough
+ * bandwidth for this new TT.
+ */
+static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int old_active_eps)
+{
+ struct xhci_interval_bw_table *bw_table;
+ struct xhci_tt_bw_info *tt_info;
+
+ /* Find the bandwidth table for the root port this TT is attached to. */
+ bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
+ tt_info = virt_dev->tt_info;
+ /* If this TT already had active endpoints, the bandwidth for this TT
+ * has already been added. Removing all periodic endpoints (and thus
+ * making the TT enactive) will only decrease the bandwidth used.
+ */
+ if (old_active_eps)
+ return 0;
+ if (old_active_eps == 0 && tt_info->active_eps != 0) {
+ if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
+ return -ENOMEM;
+ return 0;
+ }
+ /* Not sure why we would have no new active endpoints...
+ *
+ * Maybe because of an Evaluate Context change for a hub update or a
+ * control endpoint 0 max packet size change?
+ * FIXME: skip the bandwidth calculation in that case.
+ */
+ return 0;
+}
+
+static int xhci_check_ss_bw(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev)
+{
+ unsigned int bw_reserved;
+
+ bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
+ if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
+ return -ENOMEM;
+
+ bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
+ if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * This algorithm is a very conservative estimate of the worst-case scheduling
+ * scenario for any one interval. The hardware dynamically schedules the
+ * packets, so we can't tell which microframe could be the limiting factor in
+ * the bandwidth scheduling. This only takes into account periodic endpoints.
+ *
+ * Obviously, we can't solve an NP complete problem to find the minimum worst
+ * case scenario. Instead, we come up with an estimate that is no less than
+ * the worst case bandwidth used for any one microframe, but may be an
+ * over-estimate.
+ *
+ * We walk the requirements for each endpoint by interval, starting with the
+ * smallest interval, and place packets in the schedule where there is only one
+ * possible way to schedule packets for that interval. In order to simplify
+ * this algorithm, we record the largest max packet size for each interval, and
+ * assume all packets will be that size.
+ *
+ * For interval 0, we obviously must schedule all packets for each interval.
+ * The bandwidth for interval 0 is just the amount of data to be transmitted
+ * (the sum of all max ESIT payload sizes, plus any overhead per packet times
+ * the number of packets).
+ *
+ * For interval 1, we have two possible microframes to schedule those packets
+ * in. For this algorithm, if we can schedule the same number of packets for
+ * each possible scheduling opportunity (each microframe), we will do so. The
+ * remaining number of packets will be saved to be transmitted in the gaps in
+ * the next interval's scheduling sequence.
+ *
+ * As we move those remaining packets to be scheduled with interval 2 packets,
+ * we have to double the number of remaining packets to transmit. This is
+ * because the intervals are actually powers of 2, and we would be transmitting
+ * the previous interval's packets twice in this interval. We also have to be
+ * sure that when we look at the largest max packet size for this interval, we
+ * also look at the largest max packet size for the remaining packets and take
+ * the greater of the two.
+ *
+ * The algorithm continues to evenly distribute packets in each scheduling
+ * opportunity, and push the remaining packets out, until we get to the last
+ * interval. Then those packets and their associated overhead are just added
+ * to the bandwidth used.
+ */
+static int xhci_check_bw_table(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int old_active_eps)
+{
+ unsigned int bw_reserved;
+ unsigned int max_bandwidth;
+ unsigned int bw_used;
+ unsigned int block_size;
+ struct xhci_interval_bw_table *bw_table;
+ unsigned int packet_size = 0;
+ unsigned int overhead = 0;
+ unsigned int packets_transmitted = 0;
+ unsigned int packets_remaining = 0;
+ unsigned int i;
+
+ if (virt_dev->udev->speed == USB_SPEED_SUPER)
+ return xhci_check_ss_bw(xhci, virt_dev);
+
+ if (virt_dev->udev->speed == USB_SPEED_HIGH) {
+ max_bandwidth = HS_BW_LIMIT;
+ /* Convert percent of bus BW reserved to blocks reserved */
+ bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
+ } else {
+ max_bandwidth = FS_BW_LIMIT;
+ bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
+ }
+
+ bw_table = virt_dev->bw_table;
+ /* We need to translate the max packet size and max ESIT payloads into
+ * the units the hardware uses.
+ */
+ block_size = xhci_get_block_size(virt_dev->udev);
+
+ /* If we are manipulating a LS/FS device under a HS hub, double check
+ * that the HS bus has enough bandwidth if we are activing a new TT.
+ */
+ if (virt_dev->tt_info) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Recalculating BW for rootport %u",
+ virt_dev->real_port);
+ if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
+ xhci_warn(xhci, "Not enough bandwidth on HS bus for "
+ "newly activated TT.\n");
+ return -ENOMEM;
+ }
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Recalculating BW for TT slot %u port %u",
+ virt_dev->tt_info->slot_id,
+ virt_dev->tt_info->ttport);
+ } else {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Recalculating BW for rootport %u",
+ virt_dev->real_port);
+ }
+
+ /* Add in how much bandwidth will be used for interval zero, or the
+ * rounded max ESIT payload + number of packets * largest overhead.
+ */
+ bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
+ bw_table->interval_bw[0].num_packets *
+ xhci_get_largest_overhead(&bw_table->interval_bw[0]);
+
+ for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
+ unsigned int bw_added;
+ unsigned int largest_mps;
+ unsigned int interval_overhead;
+
+ /*
+ * How many packets could we transmit in this interval?
+ * If packets didn't fit in the previous interval, we will need
+ * to transmit that many packets twice within this interval.
+ */
+ packets_remaining = 2 * packets_remaining +
+ bw_table->interval_bw[i].num_packets;
+
+ /* Find the largest max packet size of this or the previous
+ * interval.
+ */
+ if (list_empty(&bw_table->interval_bw[i].endpoints))
+ largest_mps = 0;
+ else {
+ struct xhci_virt_ep *virt_ep;
+ struct list_head *ep_entry;
+
+ ep_entry = bw_table->interval_bw[i].endpoints.next;
+ virt_ep = list_entry(ep_entry,
+ struct xhci_virt_ep, bw_endpoint_list);
+ /* Convert to blocks, rounding up */
+ largest_mps = DIV_ROUND_UP(
+ virt_ep->bw_info.max_packet_size,
+ block_size);
+ }
+ if (largest_mps > packet_size)
+ packet_size = largest_mps;
+
+ /* Use the larger overhead of this or the previous interval. */
+ interval_overhead = xhci_get_largest_overhead(
+ &bw_table->interval_bw[i]);
+ if (interval_overhead > overhead)
+ overhead = interval_overhead;
+
+ /* How many packets can we evenly distribute across
+ * (1 << (i + 1)) possible scheduling opportunities?
+ */
+ packets_transmitted = packets_remaining >> (i + 1);
+
+ /* Add in the bandwidth used for those scheduled packets */
+ bw_added = packets_transmitted * (overhead + packet_size);
+
+ /* How many packets do we have remaining to transmit? */
+ packets_remaining = packets_remaining % (1 << (i + 1));
+
+ /* What largest max packet size should those packets have? */
+ /* If we've transmitted all packets, don't carry over the
+ * largest packet size.
+ */
+ if (packets_remaining == 0) {
+ packet_size = 0;
+ overhead = 0;
+ } else if (packets_transmitted > 0) {
+ /* Otherwise if we do have remaining packets, and we've
+ * scheduled some packets in this interval, take the
+ * largest max packet size from endpoints with this
+ * interval.
+ */
+ packet_size = largest_mps;
+ overhead = interval_overhead;
+ }
+ /* Otherwise carry over packet_size and overhead from the last
+ * time we had a remainder.
+ */
+ bw_used += bw_added;
+ if (bw_used > max_bandwidth) {
+ xhci_warn(xhci, "Not enough bandwidth. "
+ "Proposed: %u, Max: %u\n",
+ bw_used, max_bandwidth);
+ return -ENOMEM;
+ }
+ }
+ /*
+ * Ok, we know we have some packets left over after even-handedly
+ * scheduling interval 15. We don't know which microframes they will
+ * fit into, so we over-schedule and say they will be scheduled every
+ * microframe.
+ */
+ if (packets_remaining > 0)
+ bw_used += overhead + packet_size;
+
+ if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
+ unsigned int port_index = virt_dev->real_port - 1;
+
+ /* OK, we're manipulating a HS device attached to a
+ * root port bandwidth domain. Include the number of active TTs
+ * in the bandwidth used.
+ */
+ bw_used += TT_HS_OVERHEAD *
+ xhci->rh_bw[port_index].num_active_tts;
+ }
+
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Final bandwidth: %u, Limit: %u, Reserved: %u, "
+ "Available: %u " "percent",
+ bw_used, max_bandwidth, bw_reserved,
+ (max_bandwidth - bw_used - bw_reserved) * 100 /
+ max_bandwidth);
+
+ bw_used += bw_reserved;
+ if (bw_used > max_bandwidth) {
+ xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
+ bw_used, max_bandwidth);
+ return -ENOMEM;
+ }
+
+ bw_table->bw_used = bw_used;
+ return 0;
+}
+
+static bool xhci_is_async_ep(unsigned int ep_type)
+{
+ return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
+ ep_type != ISOC_IN_EP &&
+ ep_type != INT_IN_EP);
+}
+
+static bool xhci_is_sync_in_ep(unsigned int ep_type)
+{
+ return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
+}
+
+static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
+{
+ unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
+
+ if (ep_bw->ep_interval == 0)
+ return SS_OVERHEAD_BURST +
+ (ep_bw->mult * ep_bw->num_packets *
+ (SS_OVERHEAD + mps));
+ return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
+ (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
+ 1 << ep_bw->ep_interval);
+
+}
+
+void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
+ struct xhci_bw_info *ep_bw,
+ struct xhci_interval_bw_table *bw_table,
+ struct usb_device *udev,
+ struct xhci_virt_ep *virt_ep,
+ struct xhci_tt_bw_info *tt_info)
+{
+ struct xhci_interval_bw *interval_bw;
+ int normalized_interval;
+
+ if (xhci_is_async_ep(ep_bw->type))
+ return;
+
+ if (udev->speed == USB_SPEED_SUPER) {
+ if (xhci_is_sync_in_ep(ep_bw->type))
+ xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
+ xhci_get_ss_bw_consumed(ep_bw);
+ else
+ xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
+ xhci_get_ss_bw_consumed(ep_bw);
+ return;
+ }
+
+ /* SuperSpeed endpoints never get added to intervals in the table, so
+ * this check is only valid for HS/FS/LS devices.
+ */
+ if (list_empty(&virt_ep->bw_endpoint_list))
+ return;
+ /* For LS/FS devices, we need to translate the interval expressed in
+ * microframes to frames.
+ */
+ if (udev->speed == USB_SPEED_HIGH)
+ normalized_interval = ep_bw->ep_interval;
+ else
+ normalized_interval = ep_bw->ep_interval - 3;
+
+ if (normalized_interval == 0)
+ bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
+ interval_bw = &bw_table->interval_bw[normalized_interval];
+ interval_bw->num_packets -= ep_bw->num_packets;
+ switch (udev->speed) {
+ case USB_SPEED_LOW:
+ interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
+ break;
+ case USB_SPEED_FULL:
+ interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
+ break;
+ case USB_SPEED_HIGH:
+ interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
+ break;
+ case USB_SPEED_SUPER:
+ case USB_SPEED_UNKNOWN:
+ case USB_SPEED_WIRELESS:
+ /* Should never happen because only LS/FS/HS endpoints will get
+ * added to the endpoint list.
+ */
+ return;
+ }
+ if (tt_info)
+ tt_info->active_eps -= 1;
+ list_del_init(&virt_ep->bw_endpoint_list);
+}
+
+static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
+ struct xhci_bw_info *ep_bw,
+ struct xhci_interval_bw_table *bw_table,
+ struct usb_device *udev,
+ struct xhci_virt_ep *virt_ep,
+ struct xhci_tt_bw_info *tt_info)
+{
+ struct xhci_interval_bw *interval_bw;
+ struct xhci_virt_ep *smaller_ep;
+ int normalized_interval;
+
+ if (xhci_is_async_ep(ep_bw->type))
+ return;
+
+ if (udev->speed == USB_SPEED_SUPER) {
+ if (xhci_is_sync_in_ep(ep_bw->type))
+ xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
+ xhci_get_ss_bw_consumed(ep_bw);
+ else
+ xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
+ xhci_get_ss_bw_consumed(ep_bw);
+ return;
+ }
+
+ /* For LS/FS devices, we need to translate the interval expressed in
+ * microframes to frames.
+ */
+ if (udev->speed == USB_SPEED_HIGH)
+ normalized_interval = ep_bw->ep_interval;
+ else
+ normalized_interval = ep_bw->ep_interval - 3;
+
+ if (normalized_interval == 0)
+ bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
+ interval_bw = &bw_table->interval_bw[normalized_interval];
+ interval_bw->num_packets += ep_bw->num_packets;
+ switch (udev->speed) {
+ case USB_SPEED_LOW:
+ interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
+ break;
+ case USB_SPEED_FULL:
+ interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
+ break;
+ case USB_SPEED_HIGH:
+ interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
+ break;
+ case USB_SPEED_SUPER:
+ case USB_SPEED_UNKNOWN:
+ case USB_SPEED_WIRELESS:
+ /* Should never happen because only LS/FS/HS endpoints will get
+ * added to the endpoint list.
+ */
+ return;
+ }
+
+ if (tt_info)
+ tt_info->active_eps += 1;
+ /* Insert the endpoint into the list, largest max packet size first. */
+ list_for_each_entry(smaller_ep, &interval_bw->endpoints,
+ bw_endpoint_list) {
+ if (ep_bw->max_packet_size >=
+ smaller_ep->bw_info.max_packet_size) {
+ /* Add the new ep before the smaller endpoint */
+ list_add_tail(&virt_ep->bw_endpoint_list,
+ &smaller_ep->bw_endpoint_list);
+ return;
+ }
+ }
+ /* Add the new endpoint at the end of the list. */
+ list_add_tail(&virt_ep->bw_endpoint_list,
+ &interval_bw->endpoints);
+}
+
+void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int old_active_eps)
+{
+ struct xhci_root_port_bw_info *rh_bw_info;
+ if (!virt_dev->tt_info)
+ return;
+
+ rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
+ if (old_active_eps == 0 &&
+ virt_dev->tt_info->active_eps != 0) {
+ rh_bw_info->num_active_tts += 1;
+ rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
+ } else if (old_active_eps != 0 &&
+ virt_dev->tt_info->active_eps == 0) {
+ rh_bw_info->num_active_tts -= 1;
+ rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
+ }
+}
+
+static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ struct xhci_container_ctx *in_ctx)
+{
+ struct xhci_bw_info ep_bw_info[31];
+ int i;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ int old_active_eps = 0;
+
+ if (virt_dev->tt_info)
+ old_active_eps = virt_dev->tt_info->active_eps;
+
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ if (!ctrl_ctx) {
+ xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < 31; i++) {
+ if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
+ continue;
+
+ /* Make a copy of the BW info in case we need to revert this */
+ memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
+ sizeof(ep_bw_info[i]));
+ /* Drop the endpoint from the interval table if the endpoint is
+ * being dropped or changed.
+ */
+ if (EP_IS_DROPPED(ctrl_ctx, i))
+ xhci_drop_ep_from_interval_table(xhci,
+ &virt_dev->eps[i].bw_info,
+ virt_dev->bw_table,
+ virt_dev->udev,
+ &virt_dev->eps[i],
+ virt_dev->tt_info);
+ }
+ /* Overwrite the information stored in the endpoints' bw_info */
+ xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
+ for (i = 0; i < 31; i++) {
+ /* Add any changed or added endpoints to the interval table */
+ if (EP_IS_ADDED(ctrl_ctx, i))
+ xhci_add_ep_to_interval_table(xhci,
+ &virt_dev->eps[i].bw_info,
+ virt_dev->bw_table,
+ virt_dev->udev,
+ &virt_dev->eps[i],
+ virt_dev->tt_info);
+ }
+
+ if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
+ /* Ok, this fits in the bandwidth we have.
+ * Update the number of active TTs.
+ */
+ xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
+ return 0;
+ }
+
+ /* We don't have enough bandwidth for this, revert the stored info. */
+ for (i = 0; i < 31; i++) {
+ if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
+ continue;
+
+ /* Drop the new copies of any added or changed endpoints from
+ * the interval table.
+ */
+ if (EP_IS_ADDED(ctrl_ctx, i)) {
+ xhci_drop_ep_from_interval_table(xhci,
+ &virt_dev->eps[i].bw_info,
+ virt_dev->bw_table,
+ virt_dev->udev,
+ &virt_dev->eps[i],
+ virt_dev->tt_info);
+ }
+ /* Revert the endpoint back to its old information */
+ memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
+ sizeof(ep_bw_info[i]));
+ /* Add any changed or dropped endpoints back into the table */
+ if (EP_IS_DROPPED(ctrl_ctx, i))
+ xhci_add_ep_to_interval_table(xhci,
+ &virt_dev->eps[i].bw_info,
+ virt_dev->bw_table,
+ virt_dev->udev,
+ &virt_dev->eps[i],
+ virt_dev->tt_info);
+ }
+ return -ENOMEM;
+}
+
+
/* Issue a configure endpoint command or evaluate context command
* and wait for it to finish.
*/
@@ -1482,61 +2604,96 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
bool ctx_change, bool must_succeed)
{
int ret;
- int timeleft;
unsigned long flags;
- struct xhci_container_ctx *in_ctx;
- struct completion *cmd_completion;
- int *cmd_status;
+ struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_virt_device *virt_dev;
+ if (!command)
+ return -EINVAL;
+
spin_lock_irqsave(&xhci->lock, flags);
virt_dev = xhci->devs[udev->slot_id];
- if (command) {
- in_ctx = command->in_ctx;
- cmd_completion = command->completion;
- cmd_status = &command->status;
- command->command_trb = xhci->cmd_ring->enqueue;
- list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
- } else {
- in_ctx = virt_dev->in_ctx;
- cmd_completion = &virt_dev->cmd_completion;
- cmd_status = &virt_dev->cmd_status;
+
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
+ if (!ctrl_ctx) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
+ xhci_reserve_host_resources(xhci, ctrl_ctx)) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "Not enough host resources, "
+ "active endpoint contexts = %u\n",
+ xhci->num_active_eps);
+ return -ENOMEM;
+ }
+ if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
+ xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
+ xhci_free_host_resources(xhci, ctrl_ctx);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "Not enough bandwidth\n");
+ return -ENOMEM;
}
- init_completion(cmd_completion);
if (!ctx_change)
- ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
+ ret = xhci_queue_configure_endpoint(xhci, command,
+ command->in_ctx->dma,
udev->slot_id, must_succeed);
else
- ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
- udev->slot_id);
+ ret = xhci_queue_evaluate_context(xhci, command,
+ command->in_ctx->dma,
+ udev->slot_id, must_succeed);
if (ret < 0) {
- if (command)
- list_del(&command->cmd_list);
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
+ xhci_free_host_resources(xhci, ctrl_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "FIXME allocate a new ring segment");
return -ENOMEM;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Wait for the configure endpoint command to complete */
- timeleft = wait_for_completion_interruptible_timeout(
- cmd_completion,
- USB_CTRL_SET_TIMEOUT);
- if (timeleft <= 0) {
- xhci_warn(xhci, "%s while waiting for %s command\n",
- timeleft == 0 ? "Timeout" : "Signal",
- ctx_change == 0 ?
- "configure endpoint" :
- "evaluate context");
- /* FIXME cancel the configure endpoint command */
- return -ETIME;
- }
+ wait_for_completion(command->completion);
if (!ctx_change)
- return xhci_configure_endpoint_result(xhci, udev, cmd_status);
- return xhci_evaluate_context_result(xhci, udev, cmd_status);
+ ret = xhci_configure_endpoint_result(xhci, udev,
+ &command->status);
+ else
+ ret = xhci_evaluate_context_result(xhci, udev,
+ &command->status);
+
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* If the command failed, remove the reserved resources.
+ * Otherwise, clean up the estimate to include dropped eps.
+ */
+ if (ret)
+ xhci_free_host_resources(xhci, ctrl_ctx);
+ else
+ xhci_finish_resource_reservation(xhci, ctrl_ctx);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+ return ret;
+}
+
+static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
+ struct xhci_virt_device *vdev, int i)
+{
+ struct xhci_virt_ep *ep = &vdev->eps[i];
+
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
+ xhci_get_endpoint_address(i));
+ xhci_free_stream_info(xhci, ep->stream_info);
+ ep->stream_info = NULL;
+ ep->ep_state &= ~EP_HAS_STREAMS;
+ }
}
/* Called after one or more calls to xhci_add_endpoint() or
@@ -1557,39 +2714,70 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_virt_device *virt_dev;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_slot_ctx *slot_ctx;
+ struct xhci_command *command;
ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ENODEV;
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
virt_dev = xhci->devs[udev->slot_id];
+ command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
+ if (!command)
+ return -ENOMEM;
+
+ command->in_ctx = virt_dev->in_ctx;
+
/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
- ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
- ctrl_ctx->add_flags |= SLOT_FLAG;
- ctrl_ctx->add_flags &= ~EP0_FLAG;
- ctrl_ctx->drop_flags &= ~SLOT_FLAG;
- ctrl_ctx->drop_flags &= ~EP0_FLAG;
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
+ if (!ctrl_ctx) {
+ xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+ __func__);
+ ret = -ENOMEM;
+ goto command_cleanup;
+ }
+ ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
+ ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
+ ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
+
+ /* Don't issue the command if there's no endpoints to update. */
+ if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
+ ctrl_ctx->drop_flags == 0) {
+ ret = 0;
+ goto command_cleanup;
+ }
xhci_dbg(xhci, "New Input Control Context:\n");
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
xhci_dbg_ctx(xhci, virt_dev->in_ctx,
- LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
+ LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
- ret = xhci_configure_endpoint(xhci, udev, NULL,
+ ret = xhci_configure_endpoint(xhci, udev, command,
false, false);
- if (ret) {
+ if (ret)
/* Callee should call reset_bandwidth() */
- return ret;
- }
+ goto command_cleanup;
xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
xhci_dbg_ctx(xhci, virt_dev->out_ctx,
- LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
+ LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
+ /* Free any rings that were dropped, but not changed. */
+ for (i = 1; i < 31; ++i) {
+ if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
+ !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
+ xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
+ xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
+ }
+ }
xhci_zero_in_ctx(xhci, virt_dev);
- /* Install new rings and free or cache any old rings */
+ /*
+ * Install any rings for completely new endpoints or changed endpoints,
+ * and free or cache any old rings from changed endpoints.
+ */
for (i = 1; i < 31; ++i) {
if (!virt_dev->eps[i].new_ring)
continue;
@@ -1599,9 +2787,13 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
if (virt_dev->eps[i].ring) {
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
}
+ xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
virt_dev->eps[i].new_ring = NULL;
}
+command_cleanup:
+ kfree(command->completion);
+ kfree(command);
return ret;
}
@@ -1632,31 +2824,38 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx,
+ struct xhci_input_control_ctx *ctrl_ctx,
u32 add_flags, u32 drop_flags)
{
- struct xhci_input_control_ctx *ctrl_ctx;
- ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
- ctrl_ctx->add_flags = add_flags;
- ctrl_ctx->drop_flags = drop_flags;
+ ctrl_ctx->add_flags = cpu_to_le32(add_flags);
+ ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
xhci_slot_copy(xhci, in_ctx, out_ctx);
- ctrl_ctx->add_flags |= SLOT_FLAG;
+ ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
xhci_dbg(xhci, "Input Context:\n");
xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
}
-void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
+static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state)
{
+ struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_container_ctx *in_ctx;
struct xhci_ep_ctx *ep_ctx;
u32 added_ctxs;
dma_addr_t addr;
+ in_ctx = xhci->devs[slot_id]->in_ctx;
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ if (!ctrl_ctx) {
+ xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+ __func__);
+ return;
+ }
+
xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
xhci->devs[slot_id]->out_ctx, ep_index);
- in_ctx = xhci->devs[slot_id]->in_ctx;
ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
deq_state->new_deq_ptr);
@@ -1668,11 +2867,12 @@ void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
deq_state->new_deq_ptr);
return;
}
- ep_ctx->deq = addr | deq_state->new_cycle_state;
+ ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
- xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
+ xhci->devs[slot_id]->out_ctx, ctrl_ctx,
+ added_ctxs, added_ctxs);
}
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
@@ -1681,7 +2881,8 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
struct xhci_dequeue_state deq_state;
struct xhci_virt_ep *ep;
- xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+ "Cleaning up stalled endpoint ring");
ep = &xhci->devs[udev->slot_id]->eps[ep_index];
/* We need to move the HW's dequeue pointer past this TD,
* or it will attempt to resend it on the next doorbell ring.
@@ -1694,8 +2895,14 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
* issue a configure endpoint command later.
*/
if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
- xhci_dbg(xhci, "Queueing new dequeue state\n");
- xhci_queue_new_dequeue_state(xhci, udev->slot_id,
+ struct xhci_command *command;
+ /* Can't sleep if we're called from cleanup_halted_endpoint() */
+ command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
+ if (!command)
+ return;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+ "Queueing new dequeue state");
+ xhci_queue_new_dequeue_state(xhci, command, udev->slot_id,
ep_index, ep->stopped_stream, &deq_state);
} else {
/* Better hope no one uses the input context between now and the
@@ -1703,8 +2910,9 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
* XXX: No idea how this hardware will react when stream rings
* are enabled.
*/
- xhci_dbg(xhci, "Setting up input context for "
- "configure endpoint command\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Setting up input context for "
+ "configure endpoint command");
xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
ep_index, &deq_state);
}
@@ -1725,6 +2933,7 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
unsigned long flags;
int ret;
struct xhci_virt_ep *virt_ep;
+ struct xhci_command *command;
xhci = hcd_to_xhci(hcd);
udev = (struct usb_device *) ep->hcpriv;
@@ -1736,18 +2945,25 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
ep_index = xhci_get_endpoint_index(&ep->desc);
virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
if (!virt_ep->stopped_td) {
- xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
- ep->desc.bEndpointAddress);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+ "Endpoint 0x%x not halted, refusing to reset.",
+ ep->desc.bEndpointAddress);
return;
}
if (usb_endpoint_xfer_control(&ep->desc)) {
- xhci_dbg(xhci, "Control endpoint stall already handled.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+ "Control endpoint stall already handled.");
return;
}
- xhci_dbg(xhci, "Queueing reset endpoint command\n");
+ command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
+ if (!command)
+ return;
+
+ xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+ "Queueing reset endpoint command");
spin_lock_irqsave(&xhci->lock, flags);
- ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
+ ret = xhci_queue_reset_ep(xhci, command, udev->slot_id, ep_index);
/*
* Can't change the ring dequeue pointer until it's transitioned to the
* stopped state, which is only upon a successful reset endpoint
@@ -1759,7 +2975,6 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
xhci_ring_cmd_db(xhci);
}
virt_ep->stopped_td = NULL;
- virt_ep->stopped_trb = NULL;
virt_ep->stopped_stream = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1780,7 +2995,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
if (ret <= 0)
return -EINVAL;
- if (ep->ss_ep_comp.bmAttributes == 0) {
+ if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
" descriptor for ep 0x%x does not support streams\n",
ep->desc.bEndpointAddress);
@@ -1849,8 +3064,7 @@ static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
if (ret < 0)
return ret;
- max_streams = USB_SS_MAX_STREAMS(
- eps[i]->ss_ep_comp.bmAttributes);
+ max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
if (max_streams < (*num_streams - 1)) {
xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
eps[i]->desc.bEndpointAddress,
@@ -1886,8 +3100,8 @@ static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
/* Are streams already being freed for the endpoint? */
if (ep_state & EP_GETTING_NO_STREAMS) {
xhci_warn(xhci, "WARN Can't disable streams for "
- "endpoint 0x%x\n, "
- "streams are being disabled already.",
+ "endpoint 0x%x, "
+ "streams are being disabled already\n",
eps[i]->desc.bEndpointAddress);
return 0;
}
@@ -1895,8 +3109,8 @@ static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
if (!(ep_state & EP_HAS_STREAMS) &&
!(ep_state & EP_GETTING_STREAMS)) {
xhci_warn(xhci, "WARN Can't disable streams for "
- "endpoint 0x%x\n, "
- "streams are already disabled!",
+ "endpoint 0x%x, "
+ "streams are already disabled!\n",
eps[i]->desc.bEndpointAddress);
xhci_warn(xhci, "WARN xhci_free_streams() called "
"with non-streams endpoint\n");
@@ -1931,6 +3145,7 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct xhci_hcd *xhci;
struct xhci_virt_device *vdev;
struct xhci_command *config_cmd;
+ struct xhci_input_control_ctx *ctrl_ctx;
unsigned int ep_index;
unsigned int num_stream_ctxs;
unsigned long flags;
@@ -1947,11 +3162,24 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
num_streams);
+ /* MaxPSASize value 0 (2 streams) means streams are not supported */
+ if (HCC_MAX_PSA(xhci->hcc_params) < 4) {
+ xhci_dbg(xhci, "xHCI controller does not support streams.\n");
+ return -ENOSYS;
+ }
+
config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
if (!config_cmd) {
xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
return -ENOMEM;
}
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
+ if (!ctrl_ctx) {
+ xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+ __func__);
+ xhci_free_command(xhci, config_cmd);
+ return -ENOMEM;
+ }
/* Check to make sure all endpoints are not already configured for
* streams. While we're at it, find the maximum number of streams that
@@ -1973,7 +3201,7 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
return -EINVAL;
}
vdev = xhci->devs[udev->slot_id];
- /* Mark each endpoint as being in transistion, so
+ /* Mark each endpoint as being in transition, so
* xhci_urb_enqueue() will reject all URBs.
*/
for (i = 0; i < num_eps; i++) {
@@ -2018,7 +3246,8 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
* and add the updated copy from the input context.
*/
xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
- vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
+ vdev->out_ctx, ctrl_ctx,
+ changed_ep_bitmask, changed_ep_bitmask);
/* Issue and wait for the configure endpoint command */
ret = xhci_configure_endpoint(xhci, udev, config_cmd,
@@ -2076,6 +3305,7 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct xhci_hcd *xhci;
struct xhci_virt_device *vdev;
struct xhci_command *command;
+ struct xhci_input_control_ctx *ctrl_ctx;
unsigned int ep_index;
unsigned long flags;
u32 changed_ep_bitmask;
@@ -2098,6 +3328,14 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
*/
ep_index = xhci_get_endpoint_index(&eps[0]->desc);
command = vdev->eps[ep_index].stream_info->free_streams_command;
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
+ if (!ctrl_ctx) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+ __func__);
+ return -EINVAL;
+ }
+
for (i = 0; i < num_eps; i++) {
struct xhci_ep_ctx *ep_ctx;
@@ -2112,7 +3350,8 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
&vdev->eps[ep_index]);
}
xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
- vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
+ vdev->out_ctx, ctrl_ctx,
+ changed_ep_bitmask, changed_ep_bitmask);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Issue and wait for the configure endpoint command,
@@ -2144,6 +3383,35 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
}
/*
+ * Deletes endpoint resources for endpoints that were active before a Reset
+ * Device command, or a Disable Slot command. The Reset Device command leaves
+ * the control endpoint intact, whereas the Disable Slot command deletes it.
+ *
+ * Must be called with xhci->lock held.
+ */
+void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev, bool drop_control_ep)
+{
+ int i;
+ unsigned int num_dropped_eps = 0;
+ unsigned int drop_flags = 0;
+
+ for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
+ if (virt_dev->eps[i].ring) {
+ drop_flags |= 1 << i;
+ num_dropped_eps++;
+ }
+ }
+ xhci->num_active_eps -= num_dropped_eps;
+ if (num_dropped_eps)
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Dropped %u ep ctxs, flags = 0x%x, "
+ "%u now active.",
+ num_dropped_eps, drop_flags,
+ xhci->num_active_eps);
+}
+
+/*
* This submits a Reset Device Command, which will set the device state to 0,
* set the device address to 0, and disable all the endpoints except the default
* control endpoint. The USB core should come back and call
@@ -2169,8 +3437,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
unsigned int slot_id;
struct xhci_virt_device *virt_dev;
struct xhci_command *reset_device_cmd;
- int timeleft;
int last_freed_endpoint;
+ struct xhci_slot_ctx *slot_ctx;
+ int old_active_eps = 0;
ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
if (ret <= 0)
@@ -2203,6 +3472,12 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
return -EINVAL;
}
+ /* If device is not setup, there is no point in resetting it */
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+ if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
+ SLOT_STATE_DISABLED)
+ return 0;
+
xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
/* Allocate the command structure that holds the struct completion.
* Assume we're in process context, since the normal device reset
@@ -2218,12 +3493,10 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
/* Attempt to submit the Reset Device command to the command ring */
spin_lock_irqsave(&xhci->lock, flags);
- reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
- list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
- ret = xhci_queue_reset_device(xhci, slot_id);
+
+ ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
if (ret) {
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
- list_del(&reset_device_cmd->cmd_list);
spin_unlock_irqrestore(&xhci->lock, flags);
goto command_cleanup;
}
@@ -2231,22 +3504,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
spin_unlock_irqrestore(&xhci->lock, flags);
/* Wait for the Reset Device command to finish */
- timeleft = wait_for_completion_interruptible_timeout(
- reset_device_cmd->completion,
- USB_CTRL_SET_TIMEOUT);
- if (timeleft <= 0) {
- xhci_warn(xhci, "%s while waiting for reset device command\n",
- timeleft == 0 ? "Timeout" : "Signal");
- spin_lock_irqsave(&xhci->lock, flags);
- /* The timeout might have raced with the event ring handler, so
- * only delete from the list if the item isn't poisoned.
- */
- if (reset_device_cmd->cmd_list.next != LIST_POISON1)
- list_del(&reset_device_cmd->cmd_list);
- spin_unlock_irqrestore(&xhci->lock, flags);
- ret = -ETIME;
- goto command_cleanup;
- }
+ wait_for_completion(reset_device_cmd->completion);
/* The Reset Device command can't fail, according to the 0.95/0.96 spec,
* unless we tried to reset a slot ID that wasn't enabled,
@@ -2254,12 +3512,17 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
*/
ret = reset_device_cmd->status;
switch (ret) {
+ case COMP_CMD_ABORT:
+ case COMP_CMD_STOP:
+ xhci_warn(xhci, "Timeout waiting for reset device command\n");
+ ret = -ETIME;
+ goto command_cleanup;
case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
case COMP_CTX_STATE: /* 0.96 completion code for same thing */
- xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
+ xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
slot_id,
xhci_get_slot_state(xhci, virt_dev->out_ctx));
- xhci_info(xhci, "Not freeing device rings.\n");
+ xhci_dbg(xhci, "Not freeing device rings.\n");
/* Don't treat this as an error. May change my mind later. */
ret = 0;
goto command_cleanup;
@@ -2275,14 +3538,43 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
goto command_cleanup;
}
+ /* Free up host controller endpoint resources */
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* Don't delete the default control endpoint resources */
+ xhci_free_device_endpoint_resources(xhci, virt_dev, false);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+
/* Everything but endpoint 0 is disabled, so free or cache the rings. */
last_freed_endpoint = 1;
for (i = 1; i < 31; ++i) {
- if (!virt_dev->eps[i].ring)
- continue;
- xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
- last_freed_endpoint = i;
+ struct xhci_virt_ep *ep = &virt_dev->eps[i];
+
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
+ xhci_get_endpoint_address(i));
+ xhci_free_stream_info(xhci, ep->stream_info);
+ ep->stream_info = NULL;
+ ep->ep_state &= ~EP_HAS_STREAMS;
+ }
+
+ if (ep->ring) {
+ xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
+ last_freed_endpoint = i;
+ }
+ if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
+ xhci_drop_ep_from_interval_table(xhci,
+ &virt_dev->eps[i].bw_info,
+ virt_dev->bw_table,
+ udev,
+ &virt_dev->eps[i],
+ virt_dev->tt_info);
+ xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
}
+ /* If necessary, update the number of active TTs on this root port */
+ xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
+
xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
ret = 0;
@@ -2304,10 +3596,30 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
unsigned long flags;
u32 state;
int i, ret;
+ struct xhci_command *command;
+
+ command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+ if (!command)
+ return;
+
+#ifndef CONFIG_USB_DEFAULT_PERSIST
+ /*
+ * We called pm_runtime_get_noresume when the device was attached.
+ * Decrement the counter here to allow controller to runtime suspend
+ * if no devices remain.
+ */
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ pm_runtime_put_noidle(hcd->self.controller);
+#endif
ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
- if (ret <= 0)
+ /* If the host is halted due to driver unload, we still need to free the
+ * device.
+ */
+ if (ret <= 0 && ret != -ENODEV) {
+ kfree(command);
return;
+ }
virt_dev = xhci->devs[udev->slot_id];
@@ -2319,20 +3631,24 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
spin_lock_irqsave(&xhci->lock, flags);
/* Don't disable the slot if the host controller is dead. */
- state = xhci_readl(xhci, &xhci->op_regs->status);
- if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
+ state = readl(&xhci->op_regs->status);
+ if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_free_virt_device(xhci, udev->slot_id);
spin_unlock_irqrestore(&xhci->lock, flags);
+ kfree(command);
return;
}
- if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
+ if (xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
+ udev->slot_id)) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
return;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
+
/*
* Event command completion handler will free any data structures
* associated with the slot. XXX Can free sleep?
@@ -2340,6 +3656,29 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
}
/*
+ * Checks if we have enough host controller resources for the default control
+ * endpoint.
+ *
+ * Must be called with xhci->lock held.
+ */
+static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
+{
+ if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Not enough ep ctxs: "
+ "%u active, need to add 1, limit is %u.",
+ xhci->num_active_eps, xhci->limit_active_eps);
+ return -ENOMEM;
+ }
+ xhci->num_active_eps += 1;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Adding 1 ep ctx, %u now active.",
+ xhci->num_active_eps);
+ return 0;
+}
+
+
+/*
* Returns 0 if the xHC ran out of device slots, the Enable Slot command
* timed out, or allocating memory failed. Returns 1 on success.
*/
@@ -2347,77 +3686,138 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
unsigned long flags;
- int timeleft;
int ret;
+ struct xhci_command *command;
+
+ command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+ if (!command)
+ return 0;
spin_lock_irqsave(&xhci->lock, flags);
- ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
+ command->completion = &xhci->addr_dev;
+ ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+ kfree(command);
return 0;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
- /* XXX: how much time for xHC slot assignment? */
- timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
- USB_CTRL_SET_TIMEOUT);
- if (timeleft <= 0) {
- xhci_warn(xhci, "%s while waiting for a slot\n",
- timeleft == 0 ? "Timeout" : "Signal");
- /* FIXME cancel the enable slot request */
- return 0;
- }
+ wait_for_completion(command->completion);
- if (!xhci->slot_id) {
+ if (!xhci->slot_id || command->status != COMP_SUCCESS) {
xhci_err(xhci, "Error while assigning device slot ID\n");
+ xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
+ HCS_MAX_SLOTS(
+ readl(&xhci->cap_regs->hcs_params1)));
+ kfree(command);
return 0;
}
- /* xhci_alloc_virt_device() does not touch rings; no need to lock */
- if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
- /* Disable slot, if we can do it without mem alloc */
- xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
+
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
spin_lock_irqsave(&xhci->lock, flags);
- if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
- xhci_ring_cmd_db(xhci);
+ ret = xhci_reserve_host_control_ep_resources(xhci);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "Not enough host resources, "
+ "active endpoint contexts = %u\n",
+ xhci->num_active_eps);
+ goto disable_slot;
+ }
spin_unlock_irqrestore(&xhci->lock, flags);
- return 0;
+ }
+ /* Use GFP_NOIO, since this function can be called from
+ * xhci_discover_or_reset_device(), which may be called as part of
+ * mass storage driver error handling.
+ */
+ if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
+ xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
+ goto disable_slot;
}
udev->slot_id = xhci->slot_id;
+
+#ifndef CONFIG_USB_DEFAULT_PERSIST
+ /*
+ * If resetting upon resume, we can't put the controller into runtime
+ * suspend if there is a device attached.
+ */
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ pm_runtime_get_noresume(hcd->self.controller);
+#endif
+
+
+ kfree(command);
/* Is this a LS or FS device under a HS hub? */
/* Hub or peripherial? */
return 1;
+
+disable_slot:
+ /* Disable slot, if we can do it without mem alloc */
+ spin_lock_irqsave(&xhci->lock, flags);
+ command->completion = NULL;
+ command->status = 0;
+ if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
+ udev->slot_id))
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return 0;
}
/*
- * Issue an Address Device command (which will issue a SetAddress request to
- * the device).
+ * Issue an Address Device command and optionally send a corresponding
+ * SetAddress request to the device.
* We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
* we should only issue and wait on one address command at the same time.
- *
- * We add one to the device address issued by the hardware because the USB core
- * uses address 1 for the root hubs (even though they're not really devices).
*/
-int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ enum xhci_setup_dev setup)
{
+ const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
unsigned long flags;
- int timeleft;
struct xhci_virt_device *virt_dev;
int ret = 0;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_slot_ctx *slot_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
u64 temp_64;
+ struct xhci_command *command;
if (!udev->slot_id) {
- xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ "Bad Slot ID %d", udev->slot_id);
return -EINVAL;
}
virt_dev = xhci->devs[udev->slot_id];
+ if (WARN_ON(!virt_dev)) {
+ /*
+ * In plug/unplug torture test with an NEC controller,
+ * a zero-dereference was observed once due to virt_dev = 0.
+ * Print useful debug rather than crash if it is observed again!
+ */
+ xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
+ udev->slot_id);
+ return -EINVAL;
+ }
+
+ command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+ if (!command)
+ return -ENOMEM;
+
+ command->in_ctx = virt_dev->in_ctx;
+ command->completion = &xhci->addr_dev;
+
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+ if (!ctrl_ctx) {
+ xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+ __func__);
+ kfree(command);
+ return -EINVAL;
+ }
/*
* If this is the first Set Address since device plug-in or
* virt_device realloaction after a resume with an xHCI power loss,
@@ -2428,70 +3828,89 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
/* Otherwise, update the control endpoint ring enqueue pointer. */
else
xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
+ ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
+ ctrl_ctx->drop_flags = 0;
+
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
+ trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
+ le32_to_cpu(slot_ctx->dev_info) >> 27);
spin_lock_irqsave(&xhci->lock, flags);
- ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
- udev->slot_id);
+ ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
+ udev->slot_id, setup);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ "FIXME: allocate a command ring segment");
+ kfree(command);
return ret;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
- timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
- USB_CTRL_SET_TIMEOUT);
+ wait_for_completion(command->completion);
+
/* FIXME: From section 4.3.4: "Software shall be responsible for timing
* the SetAddress() "recovery interval" required by USB and aborting the
* command on a timeout.
*/
- if (timeleft <= 0) {
- xhci_warn(xhci, "%s while waiting for a slot\n",
- timeleft == 0 ? "Timeout" : "Signal");
- /* FIXME cancel the address device command */
- return -ETIME;
- }
-
- switch (virt_dev->cmd_status) {
+ switch (command->status) {
+ case COMP_CMD_ABORT:
+ case COMP_CMD_STOP:
+ xhci_warn(xhci, "Timeout while waiting for setup device command\n");
+ ret = -ETIME;
+ break;
case COMP_CTX_STATE:
case COMP_EBADSLT:
- xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
- udev->slot_id);
+ xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
+ act, udev->slot_id);
ret = -EINVAL;
break;
case COMP_TX_ERR:
- dev_warn(&udev->dev, "Device not responding to set address.\n");
+ dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
ret = -EPROTO;
break;
+ case COMP_DEV_ERR:
+ dev_warn(&udev->dev,
+ "ERROR: Incompatible device for setup %s command\n", act);
+ ret = -ENODEV;
+ break;
case COMP_SUCCESS:
- xhci_dbg(xhci, "Successful Address Device command\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ "Successful setup %s command", act);
break;
default:
- xhci_err(xhci, "ERROR: unexpected command completion "
- "code 0x%x.\n", virt_dev->cmd_status);
+ xhci_err(xhci,
+ "ERROR: unexpected setup %s command completion code 0x%x.\n",
+ act, command->status);
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
+ trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
ret = -EINVAL;
break;
}
if (ret) {
+ kfree(command);
return ret;
}
temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
- xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
- xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
- udev->slot_id,
- &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
- (unsigned long long)
- xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
- xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ "Op regs DCBAA ptr = %#016llx", temp_64);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ "Slot ID %d dcbaa entry @%p = %#016llx",
+ udev->slot_id,
+ &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
+ (unsigned long long)
+ le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
+ xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ "Output Context DMA address = %#08llx",
(unsigned long long)virt_dev->out_ctx->dma);
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
+ trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
+ le32_to_cpu(slot_ctx->dev_info) >> 27);
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
/*
@@ -2499,19 +3918,757 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
* address given back to us by the HC.
*/
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
- /* Use kernel assigned address for devices; store xHC assigned
- * address locally. */
- virt_dev->address = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1;
+ trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
+ le32_to_cpu(slot_ctx->dev_info) >> 27);
/* Zero the input context control for later use */
- ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
ctrl_ctx->add_flags = 0;
ctrl_ctx->drop_flags = 0;
- xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ "Internal device address = %d",
+ le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
+ kfree(command);
+ return 0;
+}
+
+int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
+}
+
+int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
+}
+
+/*
+ * Transfer the port index into real index in the HW port status
+ * registers. Caculate offset between the port's PORTSC register
+ * and port status base. Divide the number of per port register
+ * to get the real index. The raw port number bases 1.
+ */
+int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ __le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
+ __le32 __iomem *addr;
+ int raw_port;
+
+ if (hcd->speed != HCD_USB3)
+ addr = xhci->usb2_ports[port1 - 1];
+ else
+ addr = xhci->usb3_ports[port1 - 1];
+
+ raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
+ return raw_port;
+}
+
+/*
+ * Issue an Evaluate Context command to change the Maximum Exit Latency in the
+ * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
+ */
+static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
+ struct usb_device *udev, u16 max_exit_latency)
+{
+ struct xhci_virt_device *virt_dev;
+ struct xhci_command *command;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ struct xhci_slot_ctx *slot_ctx;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return 0;
+ }
+
+ /* Attempt to issue an Evaluate Context command to change the MEL. */
+ virt_dev = xhci->devs[udev->slot_id];
+ command = xhci->lpm_command;
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
+ if (!ctrl_ctx) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
+ slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
+ slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
+ slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
+
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Set up evaluate context for LPM MEL change.");
+ xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
+ xhci_dbg_ctx(xhci, command->in_ctx, 0);
+
+ /* Issue and wait for the evaluate context command. */
+ ret = xhci_configure_endpoint(xhci, udev, command,
+ true, true);
+ xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
+ xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
+
+ if (!ret) {
+ spin_lock_irqsave(&xhci->lock, flags);
+ virt_dev->current_mel = max_exit_latency;
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+ return ret;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+
+/* BESL to HIRD Encoding array for USB2 LPM */
+static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
+ 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
+
+/* Calculate HIRD/BESL for USB2 PORTPMSC*/
+static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
+ struct usb_device *udev)
+{
+ int u2del, besl, besl_host;
+ int besl_device = 0;
+ u32 field;
+
+ u2del = HCS_U2_LATENCY(xhci->hcs_params3);
+ field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
+
+ if (field & USB_BESL_SUPPORT) {
+ for (besl_host = 0; besl_host < 16; besl_host++) {
+ if (xhci_besl_encoding[besl_host] >= u2del)
+ break;
+ }
+ /* Use baseline BESL value as default */
+ if (field & USB_BESL_BASELINE_VALID)
+ besl_device = USB_GET_BESL_BASELINE(field);
+ else if (field & USB_BESL_DEEP_VALID)
+ besl_device = USB_GET_BESL_DEEP(field);
+ } else {
+ if (u2del <= 50)
+ besl_host = 0;
+ else
+ besl_host = (u2del - 51) / 75 + 1;
+ }
+
+ besl = besl_host + besl_device;
+ if (besl > 15)
+ besl = 15;
+
+ return besl;
+}
+
+/* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
+static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
+{
+ u32 field;
+ int l1;
+ int besld = 0;
+ int hirdm = 0;
+
+ field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
+
+ /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
+ l1 = udev->l1_params.timeout / 256;
+
+ /* device has preferred BESLD */
+ if (field & USB_BESL_DEEP_VALID) {
+ besld = USB_GET_BESL_DEEP(field);
+ hirdm = 1;
+ }
+
+ return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
+}
+
+int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
+ struct usb_device *udev, int enable)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ __le32 __iomem **port_array;
+ __le32 __iomem *pm_addr, *hlpm_addr;
+ u32 pm_val, hlpm_val, field;
+ unsigned int port_num;
+ unsigned long flags;
+ int hird, exit_latency;
+ int ret;
+
+ if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
+ !udev->lpm_capable)
+ return -EPERM;
+
+ if (!udev->parent || udev->parent->parent ||
+ udev->descriptor.bDeviceClass == USB_CLASS_HUB)
+ return -EPERM;
+
+ if (udev->usb2_hw_lpm_capable != 1)
+ return -EPERM;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ port_array = xhci->usb2_ports;
+ port_num = udev->portnum - 1;
+ pm_addr = port_array[port_num] + PORTPMSC;
+ pm_val = readl(pm_addr);
+ hlpm_addr = port_array[port_num] + PORTHLPMC;
+ field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
+
+ xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
+ enable ? "enable" : "disable", port_num + 1);
+
+ if (enable) {
+ /* Host supports BESL timeout instead of HIRD */
+ if (udev->usb2_hw_lpm_besl_capable) {
+ /* if device doesn't have a preferred BESL value use a
+ * default one which works with mixed HIRD and BESL
+ * systems. See XHCI_DEFAULT_BESL definition in xhci.h
+ */
+ if ((field & USB_BESL_SUPPORT) &&
+ (field & USB_BESL_BASELINE_VALID))
+ hird = USB_GET_BESL_BASELINE(field);
+ else
+ hird = udev->l1_params.besl;
+
+ exit_latency = xhci_besl_encoding[hird];
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx
+ * input context for link powermanagement evaluate
+ * context commands. It is protected by hcd->bandwidth
+ * mutex and is shared by all devices. We need to set
+ * the max ext latency in USB 2 BESL LPM as well, so
+ * use the same mutex and xhci_change_max_exit_latency()
+ */
+ mutex_lock(hcd->bandwidth_mutex);
+ ret = xhci_change_max_exit_latency(xhci, udev,
+ exit_latency);
+ mutex_unlock(hcd->bandwidth_mutex);
+
+ if (ret < 0)
+ return ret;
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
+ writel(hlpm_val, hlpm_addr);
+ /* flush write */
+ readl(hlpm_addr);
+ } else {
+ hird = xhci_calculate_hird_besl(xhci, udev);
+ }
+
+ pm_val &= ~PORT_HIRD_MASK;
+ pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
+ writel(pm_val, pm_addr);
+ pm_val = readl(pm_addr);
+ pm_val |= PORT_HLE;
+ writel(pm_val, pm_addr);
+ /* flush write */
+ readl(pm_addr);
+ } else {
+ pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
+ writel(pm_val, pm_addr);
+ /* flush write */
+ readl(pm_addr);
+ if (udev->usb2_hw_lpm_besl_capable) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ mutex_lock(hcd->bandwidth_mutex);
+ xhci_change_max_exit_latency(xhci, udev, 0);
+ mutex_unlock(hcd->bandwidth_mutex);
+ return 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
return 0;
}
+/* check if a usb2 port supports a given extened capability protocol
+ * only USB2 ports extended protocol capability values are cached.
+ * Return 1 if capability is supported
+ */
+static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
+ unsigned capability)
+{
+ u32 port_offset, port_count;
+ int i;
+
+ for (i = 0; i < xhci->num_ext_caps; i++) {
+ if (xhci->ext_caps[i] & capability) {
+ /* port offsets starts at 1 */
+ port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
+ port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
+ if (port >= port_offset &&
+ port < port_offset + port_count)
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int portnum = udev->portnum - 1;
+
+ if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
+ !udev->lpm_capable)
+ return 0;
+
+ /* we only support lpm for non-hub device connected to root hub yet */
+ if (!udev->parent || udev->parent->parent ||
+ udev->descriptor.bDeviceClass == USB_CLASS_HUB)
+ return 0;
+
+ if (xhci->hw_lpm_support == 1 &&
+ xhci_check_usb2_port_capability(
+ xhci, portnum, XHCI_HLC)) {
+ udev->usb2_hw_lpm_capable = 1;
+ udev->l1_params.timeout = XHCI_L1_TIMEOUT;
+ udev->l1_params.besl = XHCI_DEFAULT_BESL;
+ if (xhci_check_usb2_port_capability(xhci, portnum,
+ XHCI_BLC))
+ udev->usb2_hw_lpm_besl_capable = 1;
+ }
+
+ return 0;
+}
+
+#else
+
+int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
+ struct usb_device *udev, int enable)
+{
+ return 0;
+}
+
+int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PM_RUNTIME */
+
+/*---------------------- USB 3.0 Link PM functions ------------------------*/
+
+#ifdef CONFIG_PM
+/* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
+static unsigned long long xhci_service_interval_to_ns(
+ struct usb_endpoint_descriptor *desc)
+{
+ return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
+}
+
+static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
+ enum usb3_link_state state)
+{
+ unsigned long long sel;
+ unsigned long long pel;
+ unsigned int max_sel_pel;
+ char *state_name;
+
+ switch (state) {
+ case USB3_LPM_U1:
+ /* Convert SEL and PEL stored in nanoseconds to microseconds */
+ sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
+ pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
+ max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
+ state_name = "U1";
+ break;
+ case USB3_LPM_U2:
+ sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
+ pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
+ max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
+ state_name = "U2";
+ break;
+ default:
+ dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
+ __func__);
+ return USB3_LPM_DISABLED;
+ }
+
+ if (sel <= max_sel_pel && pel <= max_sel_pel)
+ return USB3_LPM_DEVICE_INITIATED;
+
+ if (sel > max_sel_pel)
+ dev_dbg(&udev->dev, "Device-initiated %s disabled "
+ "due to long SEL %llu ms\n",
+ state_name, sel);
+ else
+ dev_dbg(&udev->dev, "Device-initiated %s disabled "
+ "due to long PEL %llu ms\n",
+ state_name, pel);
+ return USB3_LPM_DISABLED;
+}
+
+/* Returns the hub-encoded U1 timeout value.
+ * The U1 timeout should be the maximum of the following values:
+ * - For control endpoints, U1 system exit latency (SEL) * 3
+ * - For bulk endpoints, U1 SEL * 5
+ * - For interrupt endpoints:
+ * - Notification EPs, U1 SEL * 3
+ * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
+ * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
+ */
+static u16 xhci_calculate_intel_u1_timeout(struct usb_device *udev,
+ struct usb_endpoint_descriptor *desc)
+{
+ unsigned long long timeout_ns;
+ int ep_type;
+ int intr_type;
+
+ ep_type = usb_endpoint_type(desc);
+ switch (ep_type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ timeout_ns = udev->u1_params.sel * 3;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ timeout_ns = udev->u1_params.sel * 5;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ intr_type = usb_endpoint_interrupt_type(desc);
+ if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
+ timeout_ns = udev->u1_params.sel * 3;
+ break;
+ }
+ /* Otherwise the calculation is the same as isoc eps */
+ case USB_ENDPOINT_XFER_ISOC:
+ timeout_ns = xhci_service_interval_to_ns(desc);
+ timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
+ if (timeout_ns < udev->u1_params.sel * 2)
+ timeout_ns = udev->u1_params.sel * 2;
+ break;
+ default:
+ return 0;
+ }
+
+ /* The U1 timeout is encoded in 1us intervals. */
+ timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
+ /* Don't return a timeout of zero, because that's USB3_LPM_DISABLED. */
+ if (timeout_ns == USB3_LPM_DISABLED)
+ timeout_ns++;
+
+ /* If the necessary timeout value is bigger than what we can set in the
+ * USB 3.0 hub, we have to disable hub-initiated U1.
+ */
+ if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
+ return timeout_ns;
+ dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
+ "due to long timeout %llu ms\n", timeout_ns);
+ return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
+}
+
+/* Returns the hub-encoded U2 timeout value.
+ * The U2 timeout should be the maximum of:
+ * - 10 ms (to avoid the bandwidth impact on the scheduler)
+ * - largest bInterval of any active periodic endpoint (to avoid going
+ * into lower power link states between intervals).
+ * - the U2 Exit Latency of the device
+ */
+static u16 xhci_calculate_intel_u2_timeout(struct usb_device *udev,
+ struct usb_endpoint_descriptor *desc)
+{
+ unsigned long long timeout_ns;
+ unsigned long long u2_del_ns;
+
+ timeout_ns = 10 * 1000 * 1000;
+
+ if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
+ (xhci_service_interval_to_ns(desc) > timeout_ns))
+ timeout_ns = xhci_service_interval_to_ns(desc);
+
+ u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
+ if (u2_del_ns > timeout_ns)
+ timeout_ns = u2_del_ns;
+
+ /* The U2 timeout is encoded in 256us intervals */
+ timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
+ /* If the necessary timeout value is bigger than what we can set in the
+ * USB 3.0 hub, we have to disable hub-initiated U2.
+ */
+ if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
+ return timeout_ns;
+ dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
+ "due to long timeout %llu ms\n", timeout_ns);
+ return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
+}
+
+static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
+ struct usb_device *udev,
+ struct usb_endpoint_descriptor *desc,
+ enum usb3_link_state state,
+ u16 *timeout)
+{
+ if (state == USB3_LPM_U1) {
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ return xhci_calculate_intel_u1_timeout(udev, desc);
+ } else {
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ return xhci_calculate_intel_u2_timeout(udev, desc);
+ }
+
+ return USB3_LPM_DISABLED;
+}
+
+static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
+ struct usb_device *udev,
+ struct usb_endpoint_descriptor *desc,
+ enum usb3_link_state state,
+ u16 *timeout)
+{
+ u16 alt_timeout;
+
+ alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
+ desc, state, timeout);
+
+ /* If we found we can't enable hub-initiated LPM, or
+ * the U1 or U2 exit latency was too high to allow
+ * device-initiated LPM as well, just stop searching.
+ */
+ if (alt_timeout == USB3_LPM_DISABLED ||
+ alt_timeout == USB3_LPM_DEVICE_INITIATED) {
+ *timeout = alt_timeout;
+ return -E2BIG;
+ }
+ if (alt_timeout > *timeout)
+ *timeout = alt_timeout;
+ return 0;
+}
+
+static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
+ struct usb_device *udev,
+ struct usb_host_interface *alt,
+ enum usb3_link_state state,
+ u16 *timeout)
+{
+ int j;
+
+ for (j = 0; j < alt->desc.bNumEndpoints; j++) {
+ if (xhci_update_timeout_for_endpoint(xhci, udev,
+ &alt->endpoint[j].desc, state, timeout))
+ return -E2BIG;
+ continue;
+ }
+ return 0;
+}
+
+static int xhci_check_intel_tier_policy(struct usb_device *udev,
+ enum usb3_link_state state)
+{
+ struct usb_device *parent;
+ unsigned int num_hubs;
+
+ if (state == USB3_LPM_U2)
+ return 0;
+
+ /* Don't enable U1 if the device is on a 2nd tier hub or lower. */
+ for (parent = udev->parent, num_hubs = 0; parent->parent;
+ parent = parent->parent)
+ num_hubs++;
+
+ if (num_hubs < 2)
+ return 0;
+
+ dev_dbg(&udev->dev, "Disabling U1 link state for device"
+ " below second-tier hub.\n");
+ dev_dbg(&udev->dev, "Plug device into first-tier hub "
+ "to decrease power consumption.\n");
+ return -E2BIG;
+}
+
+static int xhci_check_tier_policy(struct xhci_hcd *xhci,
+ struct usb_device *udev,
+ enum usb3_link_state state)
+{
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ return xhci_check_intel_tier_policy(udev, state);
+ return -EINVAL;
+}
+
+/* Returns the U1 or U2 timeout that should be enabled.
+ * If the tier check or timeout setting functions return with a non-zero exit
+ * code, that means the timeout value has been finalized and we shouldn't look
+ * at any more endpoints.
+ */
+static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
+ struct usb_device *udev, enum usb3_link_state state)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct usb_host_config *config;
+ char *state_name;
+ int i;
+ u16 timeout = USB3_LPM_DISABLED;
+
+ if (state == USB3_LPM_U1)
+ state_name = "U1";
+ else if (state == USB3_LPM_U2)
+ state_name = "U2";
+ else {
+ dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
+ state);
+ return timeout;
+ }
+
+ if (xhci_check_tier_policy(xhci, udev, state) < 0)
+ return timeout;
+
+ /* Gather some information about the currently installed configuration
+ * and alternate interface settings.
+ */
+ if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
+ state, &timeout))
+ return timeout;
+
+ config = udev->actconfig;
+ if (!config)
+ return timeout;
+
+ for (i = 0; i < config->desc.bNumInterfaces; i++) {
+ struct usb_driver *driver;
+ struct usb_interface *intf = config->interface[i];
+
+ if (!intf)
+ continue;
+
+ /* Check if any currently bound drivers want hub-initiated LPM
+ * disabled.
+ */
+ if (intf->dev.driver) {
+ driver = to_usb_driver(intf->dev.driver);
+ if (driver && driver->disable_hub_initiated_lpm) {
+ dev_dbg(&udev->dev, "Hub-initiated %s disabled "
+ "at request of driver %s\n",
+ state_name, driver->name);
+ return xhci_get_timeout_no_hub_lpm(udev, state);
+ }
+ }
+
+ /* Not sure how this could happen... */
+ if (!intf->cur_altsetting)
+ continue;
+
+ if (xhci_update_timeout_for_interface(xhci, udev,
+ intf->cur_altsetting,
+ state, &timeout))
+ return timeout;
+ }
+ return timeout;
+}
+
+static int calculate_max_exit_latency(struct usb_device *udev,
+ enum usb3_link_state state_changed,
+ u16 hub_encoded_timeout)
+{
+ unsigned long long u1_mel_us = 0;
+ unsigned long long u2_mel_us = 0;
+ unsigned long long mel_us = 0;
+ bool disabling_u1;
+ bool disabling_u2;
+ bool enabling_u1;
+ bool enabling_u2;
+
+ disabling_u1 = (state_changed == USB3_LPM_U1 &&
+ hub_encoded_timeout == USB3_LPM_DISABLED);
+ disabling_u2 = (state_changed == USB3_LPM_U2 &&
+ hub_encoded_timeout == USB3_LPM_DISABLED);
+
+ enabling_u1 = (state_changed == USB3_LPM_U1 &&
+ hub_encoded_timeout != USB3_LPM_DISABLED);
+ enabling_u2 = (state_changed == USB3_LPM_U2 &&
+ hub_encoded_timeout != USB3_LPM_DISABLED);
+
+ /* If U1 was already enabled and we're not disabling it,
+ * or we're going to enable U1, account for the U1 max exit latency.
+ */
+ if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
+ enabling_u1)
+ u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
+ if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
+ enabling_u2)
+ u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
+
+ if (u1_mel_us > u2_mel_us)
+ mel_us = u1_mel_us;
+ else
+ mel_us = u2_mel_us;
+ /* xHCI host controller max exit latency field is only 16 bits wide. */
+ if (mel_us > MAX_EXIT) {
+ dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
+ "is too big.\n", mel_us);
+ return -E2BIG;
+ }
+ return mel_us;
+}
+
+/* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
+int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
+ struct usb_device *udev, enum usb3_link_state state)
+{
+ struct xhci_hcd *xhci;
+ u16 hub_encoded_timeout;
+ int mel;
+ int ret;
+
+ xhci = hcd_to_xhci(hcd);
+ /* The LPM timeout values are pretty host-controller specific, so don't
+ * enable hub-initiated timeouts unless the vendor has provided
+ * information about their timeout algorithm.
+ */
+ if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
+ !xhci->devs[udev->slot_id])
+ return USB3_LPM_DISABLED;
+
+ hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
+ mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
+ if (mel < 0) {
+ /* Max Exit Latency is too big, disable LPM. */
+ hub_encoded_timeout = USB3_LPM_DISABLED;
+ mel = 0;
+ }
+
+ ret = xhci_change_max_exit_latency(xhci, udev, mel);
+ if (ret)
+ return ret;
+ return hub_encoded_timeout;
+}
+
+int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
+ struct usb_device *udev, enum usb3_link_state state)
+{
+ struct xhci_hcd *xhci;
+ u16 mel;
+ int ret;
+
+ xhci = hcd_to_xhci(hcd);
+ if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
+ !xhci->devs[udev->slot_id])
+ return 0;
+
+ mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
+ ret = xhci_change_max_exit_latency(xhci, udev, mel);
+ if (ret)
+ return ret;
+ return 0;
+}
+#else /* CONFIG_PM */
+
+int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
+ struct usb_device *udev, enum usb3_link_state state)
+{
+ return USB3_LPM_DISABLED;
+}
+
+int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
+ struct usb_device *udev, enum usb3_link_state state)
+{
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+/*-------------------------------------------------------------------------*/
+
/* Once a hub descriptor is fetched for a device, we need to update the xHC's
* internal data structures for the device.
*/
@@ -2541,28 +4698,47 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
return -ENOMEM;
}
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
+ if (!ctrl_ctx) {
+ xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+ __func__);
+ xhci_free_command(xhci, config_cmd);
+ return -ENOMEM;
+ }
spin_lock_irqsave(&xhci->lock, flags);
+ if (hdev->speed == USB_SPEED_HIGH &&
+ xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
+ xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
+ xhci_free_command(xhci, config_cmd);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return -ENOMEM;
+ }
+
xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
- ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
- ctrl_ctx->add_flags |= SLOT_FLAG;
+ ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
- slot_ctx->dev_info |= DEV_HUB;
+ slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
if (tt->multi)
- slot_ctx->dev_info |= DEV_MTT;
+ slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
if (xhci->hci_version > 0x95) {
xhci_dbg(xhci, "xHCI version %x needs hub "
"TT think time and number of ports\n",
(unsigned int) xhci->hci_version);
- slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild);
+ slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
/* Set TT think time - convert from ns to FS bit times.
* 0 = 8 FS bit times, 1 = 16 FS bit times,
* 2 = 24 FS bit times, 3 = 32 FS bit times.
+ *
+ * xHCI 1.0: this field shall be 0 if the device is not a
+ * High-spped hub.
*/
think_time = tt->think_time;
if (think_time != 0)
think_time = (think_time / 666) - 1;
- slot_ctx->tt_info |= TT_THINK_TIME(think_time);
+ if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
+ slot_ctx->tt_info |=
+ cpu_to_le32(TT_THINK_TIME(think_time));
} else {
xhci_dbg(xhci, "xHCI version %x doesn't need hub "
"TT think time or number of ports\n",
@@ -2598,7 +4774,103 @@ int xhci_get_frame(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
/* EHCI mods by the periodic size. Why? */
- return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
+ return readl(&xhci->run_regs->microframe_index) >> 3;
+}
+
+int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+{
+ struct xhci_hcd *xhci;
+ struct device *dev = hcd->self.controller;
+ int retval;
+
+ /* Accept arbitrarily long scatter-gather lists */
+ hcd->self.sg_tablesize = ~0;
+
+ /* support to build packet from discontinuous buffers */
+ hcd->self.no_sg_constraint = 1;
+
+ /* XHCI controllers don't stop the ep queue on short packets :| */
+ hcd->self.no_stop_on_short = 1;
+
+ if (usb_hcd_is_primary_hcd(hcd)) {
+ xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
+ if (!xhci)
+ return -ENOMEM;
+ *((struct xhci_hcd **) hcd->hcd_priv) = xhci;
+ xhci->main_hcd = hcd;
+ /* Mark the first roothub as being USB 2.0.
+ * The xHCI driver will register the USB 3.0 roothub.
+ */
+ hcd->speed = HCD_USB2;
+ hcd->self.root_hub->speed = USB_SPEED_HIGH;
+ /*
+ * USB 2.0 roothub under xHCI has an integrated TT,
+ * (rate matching hub) as opposed to having an OHCI/UHCI
+ * companion controller.
+ */
+ hcd->has_tt = 1;
+ } else {
+ /* xHCI private pointer was set in xhci_pci_probe for the second
+ * registered roothub.
+ */
+ return 0;
+ }
+
+ xhci->cap_regs = hcd->regs;
+ xhci->op_regs = hcd->regs +
+ HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
+ xhci->run_regs = hcd->regs +
+ (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
+ /* Cache read-only capability registers */
+ xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
+ xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
+ xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
+ xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
+ xhci->hci_version = HC_VERSION(xhci->hcc_params);
+ xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
+ xhci_print_registers(xhci);
+
+ xhci->quirks = quirks;
+
+ get_quirks(dev, xhci);
+
+ /* In xhci controllers which follow xhci 1.0 spec gives a spurious
+ * success event after a short transfer. This quirk will ignore such
+ * spurious event.
+ */
+ if (xhci->hci_version > 0x96)
+ xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+
+ /* Make sure the HC is halted. */
+ retval = xhci_halt(xhci);
+ if (retval)
+ goto error;
+
+ xhci_dbg(xhci, "Resetting HCD\n");
+ /* Reset the internal HC memory state and registers. */
+ retval = xhci_reset(xhci);
+ if (retval)
+ goto error;
+ xhci_dbg(xhci, "Reset complete\n");
+
+ /* Set dma_mask and coherent_dma_mask to 64-bits,
+ * if xHC supports 64-bit addressing */
+ if (HCC_64BIT_ADDR(xhci->hcc_params) &&
+ !dma_set_mask(dev, DMA_BIT_MASK(64))) {
+ xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
+ }
+
+ xhci_dbg(xhci, "Calling HCD init\n");
+ /* Initialize HCD and host controller data structures. */
+ retval = xhci_init(hcd);
+ if (retval)
+ goto error;
+ xhci_dbg(xhci, "Called HCD init\n");
+ return 0;
+error:
+ kfree(xhci);
+ return retval;
}
MODULE_DESCRIPTION(DRIVER_DESC);
@@ -2607,16 +4879,18 @@ MODULE_LICENSE("GPL");
static int __init xhci_hcd_init(void)
{
-#ifdef CONFIG_PCI
- int retval = 0;
+ int retval;
retval = xhci_register_pci();
-
if (retval < 0) {
- printk(KERN_DEBUG "Problem registering PCI driver.");
+ pr_debug("Problem registering PCI driver.\n");
return retval;
}
-#endif
+ retval = xhci_register_plat();
+ if (retval < 0) {
+ pr_debug("Problem registering platform driver.\n");
+ goto unreg_pci;
+ }
/*
* Check the compiler generated sizes of structures that must be laid
* out in specific ways for hardware access.
@@ -2634,15 +4908,16 @@ static int __init xhci_hcd_init(void)
BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
/* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
- BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
return 0;
+unreg_pci:
+ xhci_unregister_pci();
+ return retval;
}
module_init(xhci_hcd_init);
static void __exit xhci_hcd_cleanup(void)
{
-#ifdef CONFIG_PCI
xhci_unregister_pci();
-#endif
+ xhci_unregister_plat();
}
module_exit(xhci_hcd_cleanup);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 93d3bf4d213..9ffecd56600 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -30,6 +30,7 @@
/* Code sharing between pci-quirks and xhci hcd */
#include "xhci-ext-caps.h"
+#include "pci-quirks.h"
/* xHCI PCI Configuration Registers */
#define XHCI_SBRN_OFFSET (0x60)
@@ -56,13 +57,13 @@
* @run_regs_off: RTSOFF - Runtime register space offset
*/
struct xhci_cap_regs {
- u32 hc_capbase;
- u32 hcs_params1;
- u32 hcs_params2;
- u32 hcs_params3;
- u32 hcc_params;
- u32 db_off;
- u32 run_regs_off;
+ __le32 hc_capbase;
+ __le32 hcs_params1;
+ __le32 hcs_params2;
+ __le32 hcs_params3;
+ __le32 hcc_params;
+ __le32 db_off;
+ __le32 run_regs_off;
/* Reserved up to (CAPLENGTH - 0x1C) */
};
@@ -131,6 +132,11 @@ struct xhci_cap_regs {
/* Number of registers per port */
#define NUM_PORT_REGS 4
+#define PORTSC 0
+#define PORTPMSC 1
+#define PORTLI 2
+#define PORTHLPMC 3
+
/**
* struct xhci_op_regs - xHCI Host Controller Operational Registers.
* @command: USBCMD - xHC command register
@@ -154,26 +160,26 @@ struct xhci_cap_regs {
* devices.
*/
struct xhci_op_regs {
- u32 command;
- u32 status;
- u32 page_size;
- u32 reserved1;
- u32 reserved2;
- u32 dev_notification;
- u64 cmd_ring;
+ __le32 command;
+ __le32 status;
+ __le32 page_size;
+ __le32 reserved1;
+ __le32 reserved2;
+ __le32 dev_notification;
+ __le64 cmd_ring;
/* rsvd: offset 0x20-2F */
- u32 reserved3[4];
- u64 dcbaa_ptr;
- u32 config_reg;
+ __le32 reserved3[4];
+ __le64 dcbaa_ptr;
+ __le32 config_reg;
/* rsvd: offset 0x3C-3FF */
- u32 reserved4[241];
+ __le32 reserved4[241];
/* port 1 registers, which serve as a base address for other ports */
- u32 port_status_base;
- u32 port_power_base;
- u32 port_link_base;
- u32 reserved5;
+ __le32 port_status_base;
+ __le32 port_power_base;
+ __le32 port_link_base;
+ __le32 reserved5;
/* registers for ports 2-255 */
- u32 reserved6[NUM_PORT_REGS*254];
+ __le32 reserved6[NUM_PORT_REGS*254];
};
/* USBCMD - USB command - command bitmasks */
@@ -204,6 +210,10 @@ struct xhci_op_regs {
#define CMD_PM_INDEX (1 << 11)
/* bits 12:31 are reserved (and should be preserved on writes). */
+/* IMAN - Interrupt Management Register */
+#define IMAN_IE (1 << 1)
+#define IMAN_IP (1 << 0)
+
/* USBSTS - USB status - status bitmasks */
/* HC not running - set to 1 when run/stop bit is cleared. */
#define STS_HALT XHCI_STS_HALT
@@ -232,7 +242,7 @@ struct xhci_op_regs {
* notification type that matches a bit set in this bit field.
*/
#define DEV_NOTE_MASK (0xffff)
-#define ENABLE_DEV_NOTE(x) (1 << x)
+#define ENABLE_DEV_NOTE(x) (1 << (x))
/* Most of the device notification types should only be used for debug.
* SW does need to pay attention to function wake notifications.
*/
@@ -271,6 +281,7 @@ struct xhci_op_regs {
*/
#define PORT_PLS_MASK (0xf << 5)
#define XDEV_U0 (0x0 << 5)
+#define XDEV_U2 (0x2 << 5)
#define XDEV_U3 (0x3 << 5)
#define XDEV_RESUME (0xf << 5)
/* true: port has power (see HCC_PPC) */
@@ -335,7 +346,11 @@ struct xhci_op_regs {
#define PORT_PLC (1 << 22)
/* port configure error change - port failed to configure its link partner */
#define PORT_CEC (1 << 23)
-/* bit 24 reserved */
+/* Cold Attach Status - xHC can set this bit to report device attached during
+ * Sx state. Warm port reset should be perfomed to clear this bit and move port
+ * to connected state.
+ */
+#define PORT_CAS (1 << 24)
/* wake on connect (enable) */
#define PORT_WKCONN_E (1 << 25)
/* wake on disconnect (enable) */
@@ -348,17 +363,50 @@ struct xhci_op_regs {
/* Initiate a warm port reset - complete when PORT_WRC is '1' */
#define PORT_WR (1 << 31)
+/* We mark duplicate entries with -1 */
+#define DUPLICATE_ENTRY ((u8)(-1))
+
/* Port Power Management Status and Control - port_power_base bitmasks */
/* Inactivity timer value for transitions into U1, in microseconds.
* Timeout can be up to 127us. 0xFF means an infinite timeout.
*/
#define PORT_U1_TIMEOUT(p) ((p) & 0xff)
+#define PORT_U1_TIMEOUT_MASK 0xff
/* Inactivity timer value for transitions into U2 */
#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
+#define PORT_U2_TIMEOUT_MASK (0xff << 8)
/* Bits 24:31 for port testing */
/* USB2 Protocol PORTSPMSC */
-#define PORT_RWE (1 << 0x3)
+#define PORT_L1S_MASK 7
+#define PORT_L1S_SUCCESS 1
+#define PORT_RWE (1 << 3)
+#define PORT_HIRD(p) (((p) & 0xf) << 4)
+#define PORT_HIRD_MASK (0xf << 4)
+#define PORT_L1DS_MASK (0xff << 8)
+#define PORT_L1DS(p) (((p) & 0xff) << 8)
+#define PORT_HLE (1 << 16)
+
+
+/* USB2 Protocol PORTHLPMC */
+#define PORT_HIRDM(p)((p) & 3)
+#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
+#define PORT_BESLD(p)(((p) & 0xf) << 10)
+
+/* use 512 microseconds as USB2 LPM L1 default timeout. */
+#define XHCI_L1_TIMEOUT 512
+
+/* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
+ * Safe to use with mixed HIRD and BESL systems (host and device) and is used
+ * by other operating systems.
+ *
+ * XHCI 1.0 errata 8/14/12 Table 13 notes:
+ * "Software should choose xHC BESL/BESLD field values that do not violate a
+ * device's resume latency requirements,
+ * e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
+ * or not program values < '4' if BLC = '0' and a BESL device is attached.
+ */
+#define XHCI_DEFAULT_BESL 4
/**
* struct xhci_intr_reg - Interrupt Register Set
@@ -378,12 +426,12 @@ struct xhci_op_regs {
* updates the dequeue pointer.
*/
struct xhci_intr_reg {
- u32 irq_pending;
- u32 irq_control;
- u32 erst_size;
- u32 rsvd;
- u64 erst_base;
- u64 erst_dequeue;
+ __le32 irq_pending;
+ __le32 irq_control;
+ __le32 erst_size;
+ __le32 rsvd;
+ __le64 erst_base;
+ __le64 erst_dequeue;
};
/* irq_pending bitmasks */
@@ -428,30 +476,44 @@ struct xhci_intr_reg {
* or larger accesses"
*/
struct xhci_run_regs {
- u32 microframe_index;
- u32 rsvd[7];
+ __le32 microframe_index;
+ __le32 rsvd[7];
struct xhci_intr_reg ir_set[128];
};
/**
* struct doorbell_array
*
+ * Bits 0 - 7: Endpoint target
+ * Bits 8 - 15: RsvdZ
+ * Bits 16 - 31: Stream ID
+ *
* Section 5.6
*/
struct xhci_doorbell_array {
- u32 doorbell[256];
+ __le32 doorbell[256];
};
-#define DB_TARGET_MASK 0xFFFFFF00
-#define DB_STREAM_ID_MASK 0x0000FFFF
-#define DB_TARGET_HOST 0x0
-#define DB_STREAM_ID_HOST 0x0
-#define DB_MASK (0xff << 8)
+#define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16))
+#define DB_VALUE_HOST 0x00000000
-/* Endpoint Target - bits 0:7 */
-#define EPI_TO_DB(p) (((p) + 1) & 0xff)
-#define STREAM_ID_TO_DB(p) (((p) & 0xffff) << 16)
+/**
+ * struct xhci_protocol_caps
+ * @revision: major revision, minor revision, capability ID,
+ * and next capability pointer.
+ * @name_string: Four ASCII characters to say which spec this xHC
+ * follows, typically "USB ".
+ * @port_info: Port offset, count, and protocol-defined information.
+ */
+struct xhci_protocol_caps {
+ u32 revision;
+ u32 name_string;
+ u32 port_info;
+};
+#define XHCI_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff)
+#define XHCI_EXT_PORT_OFF(x) ((x) & 0xff)
+#define XHCI_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff)
/**
* struct xhci_container_ctx
@@ -486,12 +548,12 @@ struct xhci_container_ctx {
* reserved at the end of the slot context for HC internal use.
*/
struct xhci_slot_ctx {
- u32 dev_info;
- u32 dev_info2;
- u32 tt_info;
- u32 dev_state;
+ __le32 dev_info;
+ __le32 dev_info2;
+ __le32 tt_info;
+ __le32 dev_state;
/* offset 0x10 to 0x1f reserved for HC internal use */
- u32 reserved[4];
+ __le32 reserved[4];
};
/* dev_info bitmasks */
@@ -542,6 +604,11 @@ struct xhci_slot_ctx {
#define SLOT_STATE (0x1f << 27)
#define GET_SLOT_STATE(p) (((p) & (0x1f << 27)) >> 27)
+#define SLOT_STATE_DISABLED 0
+#define SLOT_STATE_ENABLED SLOT_STATE_DISABLED
+#define SLOT_STATE_DEFAULT 1
+#define SLOT_STATE_ADDRESSED 2
+#define SLOT_STATE_CONFIGURED 3
/**
* struct xhci_ep_ctx
@@ -562,12 +629,12 @@ struct xhci_slot_ctx {
* reserved at the end of the endpoint context for HC internal use.
*/
struct xhci_ep_ctx {
- u32 ep_info;
- u32 ep_info2;
- u64 deq;
- u32 tx_info;
+ __le32 ep_info;
+ __le32 ep_info2;
+ __le64 deq;
+ __le32 tx_info;
/* offset 0x14 - 0x1f reserved for HC internal use */
- u32 reserved[3];
+ __le32 reserved[3];
};
/* ep_info bitmasks */
@@ -587,12 +654,14 @@ struct xhci_ep_ctx {
#define EP_STATE_STOPPED 3
#define EP_STATE_ERROR 4
/* Mult - Max number of burtst within an interval, in EP companion desc. */
-#define EP_MULT(p) ((p & 0x3) << 8)
+#define EP_MULT(p) (((p) & 0x3) << 8)
+#define CTX_TO_EP_MULT(p) (((p) >> 8) & 0x3)
/* bits 10:14 are Max Primary Streams */
/* bit 15 is Linear Stream Array */
/* Interval - period between requests to an endpoint - 125u increments. */
-#define EP_INTERVAL(p) ((p & 0xff) << 16)
+#define EP_INTERVAL(p) (((p) & 0xff) << 16)
#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
+#define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff)
#define EP_MAXPSTREAMS_MASK (0x1f << 10)
#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
@@ -617,13 +686,24 @@ struct xhci_ep_ctx {
/* bit 6 reserved */
/* bit 7 is Host Initiate Disable - for disabling stream selection */
#define MAX_BURST(p) (((p)&0xff) << 8)
+#define CTX_TO_MAX_BURST(p) (((p) >> 8) & 0xff)
#define MAX_PACKET(p) (((p)&0xffff) << 16)
#define MAX_PACKET_MASK (0xffff << 16)
#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
+/* Get max packet size from ep desc. Bit 10..0 specify the max packet size.
+ * USB2.0 spec 9.6.6.
+ */
+#define GET_MAX_PACKET(p) ((p) & 0x7ff)
+
/* tx_info bitmasks */
#define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff)
#define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16)
+#define CTX_TO_MAX_ESIT_PAYLOAD(p) (((p) >> 16) & 0xffff)
+
+/* deq bitmasks */
+#define EP_CTX_CYCLE_MASK (1 << 0)
+#define SCTX_DEQ_MASK (~0xfL)
/**
@@ -634,11 +714,16 @@ struct xhci_ep_ctx {
* @add_context: set the bit of the endpoint context you want to enable
*/
struct xhci_input_control_ctx {
- u32 drop_flags;
- u32 add_flags;
- u32 rsvd2[6];
+ __le32 drop_flags;
+ __le32 add_flags;
+ __le32 rsvd2[6];
};
+#define EP_IS_ADDED(ctrl_ctx, i) \
+ (le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))
+#define EP_IS_DROPPED(ctrl_ctx, i) \
+ (le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1)))
+
/* Represents everything that is needed to issue a command on the command ring.
* It's useful to pre-allocate these for commands that cannot fail due to
* out-of-memory errors, like freeing streams.
@@ -662,13 +747,13 @@ struct xhci_command {
struct xhci_stream_ctx {
/* 64-bit stream ring address, cycle state, and stream type */
- u64 stream_ring;
+ __le64 stream_ring;
/* offset 0x14 - 0x1f reserved for HC internal use */
- u32 reserved[2];
+ __le32 reserved[2];
};
/* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */
-#define SCT_FOR_CTX(p) (((p) << 1) & 0x7)
+#define SCT_FOR_CTX(p) (((p) & 0x7) << 1)
/* Secondary stream array type, dequeue pointer is to a transfer ring */
#define SCT_SEC_TR 0
/* Primary stream array type, dequeue pointer is to a transfer ring */
@@ -700,6 +785,67 @@ struct xhci_stream_info {
#define SMALL_STREAM_ARRAY_SIZE 256
#define MEDIUM_STREAM_ARRAY_SIZE 1024
+/* Some Intel xHCI host controllers need software to keep track of the bus
+ * bandwidth. Keep track of endpoint info here. Each root port is allocated
+ * the full bus bandwidth. We must also treat TTs (including each port under a
+ * multi-TT hub) as a separate bandwidth domain. The direct memory interface
+ * (DMI) also limits the total bandwidth (across all domains) that can be used.
+ */
+struct xhci_bw_info {
+ /* ep_interval is zero-based */
+ unsigned int ep_interval;
+ /* mult and num_packets are one-based */
+ unsigned int mult;
+ unsigned int num_packets;
+ unsigned int max_packet_size;
+ unsigned int max_esit_payload;
+ unsigned int type;
+};
+
+/* "Block" sizes in bytes the hardware uses for different device speeds.
+ * The logic in this part of the hardware limits the number of bits the hardware
+ * can use, so must represent bandwidth in a less precise manner to mimic what
+ * the scheduler hardware computes.
+ */
+#define FS_BLOCK 1
+#define HS_BLOCK 4
+#define SS_BLOCK 16
+#define DMI_BLOCK 32
+
+/* Each device speed has a protocol overhead (CRC, bit stuffing, etc) associated
+ * with each byte transferred. SuperSpeed devices have an initial overhead to
+ * set up bursts. These are in blocks, see above. LS overhead has already been
+ * translated into FS blocks.
+ */
+#define DMI_OVERHEAD 8
+#define DMI_OVERHEAD_BURST 4
+#define SS_OVERHEAD 8
+#define SS_OVERHEAD_BURST 32
+#define HS_OVERHEAD 26
+#define FS_OVERHEAD 20
+#define LS_OVERHEAD 128
+/* The TTs need to claim roughly twice as much bandwidth (94 bytes per
+ * microframe ~= 24Mbps) of the HS bus as the devices can actually use because
+ * of overhead associated with split transfers crossing microframe boundaries.
+ * 31 blocks is pure protocol overhead.
+ */
+#define TT_HS_OVERHEAD (31 + 94)
+#define TT_DMI_OVERHEAD (25 + 12)
+
+/* Bandwidth limits in blocks */
+#define FS_BW_LIMIT 1285
+#define TT_BW_LIMIT 1320
+#define HS_BW_LIMIT 1607
+#define SS_BW_LIMIT_IN 3906
+#define DMI_BW_LIMIT_IN 3906
+#define SS_BW_LIMIT_OUT 3906
+#define DMI_BW_LIMIT_OUT 3906
+
+/* Percentage of bus bandwidth reserved for non-periodic transfers */
+#define FS_BW_RESERVED 10
+#define HS_BW_RESERVED 20
+#define SS_BW_RESERVED 10
+
struct xhci_virt_ep {
struct xhci_ring *ring;
/* Related to endpoints that are configured to use stream IDs only */
@@ -719,14 +865,18 @@ struct xhci_virt_ep {
#define EP_GETTING_NO_STREAMS (1 << 5)
/* ---- Related to URB cancellation ---- */
struct list_head cancelled_td_list;
- /* The TRB that was last reported in a stopped endpoint ring */
- union xhci_trb *stopped_trb;
struct xhci_td *stopped_td;
unsigned int stopped_stream;
/* Watchdog timer for stop endpoint command to cancel URBs */
struct timer_list stop_cmd_timer;
int stop_cmds_pending;
struct xhci_hcd *xhci;
+ /* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue
+ * command. We'll need to update the ring's dequeue segment and dequeue
+ * pointer after the command completes.
+ */
+ struct xhci_segment *queued_deq_seg;
+ union xhci_trb *queued_deq_ptr;
/*
* Sometimes the xHC can not process isochronous endpoint ring quickly
* enough, and it will miss some isoc tds on the ring and generate
@@ -735,8 +885,39 @@ struct xhci_virt_ep {
* process the missed tds on the endpoint ring.
*/
bool skip;
+ /* Bandwidth checking storage */
+ struct xhci_bw_info bw_info;
+ struct list_head bw_endpoint_list;
};
+enum xhci_overhead_type {
+ LS_OVERHEAD_TYPE = 0,
+ FS_OVERHEAD_TYPE,
+ HS_OVERHEAD_TYPE,
+};
+
+struct xhci_interval_bw {
+ unsigned int num_packets;
+ /* Sorted by max packet size.
+ * Head of the list is the greatest max packet size.
+ */
+ struct list_head endpoints;
+ /* How many endpoints of each speed are present. */
+ unsigned int overhead[3];
+};
+
+#define XHCI_MAX_INTERVAL 16
+
+struct xhci_interval_bw_table {
+ unsigned int interval0_esit_payload;
+ struct xhci_interval_bw interval_bw[XHCI_MAX_INTERVAL];
+ /* Includes reserved bandwidth for async endpoints */
+ unsigned int bw_used;
+ unsigned int ss_bw_in;
+ unsigned int ss_bw_out;
+};
+
+
struct xhci_virt_device {
struct usb_device *udev;
/*
@@ -753,15 +934,37 @@ struct xhci_virt_device {
/* Rings saved to ensure old alt settings can be re-instated */
struct xhci_ring **ring_cache;
int num_rings_cached;
- /* Store xHC assigned device address */
- int address;
#define XHCI_MAX_RINGS_CACHED 31
struct xhci_virt_ep eps[31];
struct completion cmd_completion;
- /* Status of the last command issued for this device */
- u32 cmd_status;
- struct list_head cmd_list;
- u8 port;
+ u8 fake_port;
+ u8 real_port;
+ struct xhci_interval_bw_table *bw_table;
+ struct xhci_tt_bw_info *tt_info;
+ /* The current max exit latency for the enabled USB3 link states. */
+ u16 current_mel;
+};
+
+/*
+ * For each roothub, keep track of the bandwidth information for each periodic
+ * interval.
+ *
+ * If a high speed hub is attached to the roothub, each TT associated with that
+ * hub is a separate bandwidth domain. The interval information for the
+ * endpoints on the devices under that TT will appear in the TT structure.
+ */
+struct xhci_root_port_bw_info {
+ struct list_head tts;
+ unsigned int num_active_tts;
+ struct xhci_interval_bw_table bw_table;
+};
+
+struct xhci_tt_bw_info {
+ struct list_head tt_list;
+ int slot_id;
+ int ttport;
+ struct xhci_interval_bw_table bw_table;
+ int active_eps;
};
@@ -771,7 +974,7 @@ struct xhci_virt_device {
*/
struct xhci_device_context_array {
/* 64-bit device addresses; we only write 32-bit addresses */
- u64 dev_context_ptrs[MAX_HC_SLOTS];
+ __le64 dev_context_ptrs[MAX_HC_SLOTS];
/* private xHCD pointers */
dma_addr_t dma;
};
@@ -784,12 +987,16 @@ struct xhci_device_context_array {
struct xhci_transfer_event {
/* 64-bit buffer address, or immediate data */
- u64 buffer;
- u32 transfer_len;
+ __le64 buffer;
+ __le32 transfer_len;
/* This field is interpreted differently based on the type of TRB */
- u32 flags;
+ __le32 flags;
};
+/* Transfer event TRB length bit mask */
+/* bits 0:23 */
+#define EVENT_TRB_LEN(p) ((p) & 0xffffff)
+
/** Transfer Event bit fields **/
#define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f)
@@ -837,6 +1044,8 @@ struct xhci_transfer_event {
#define COMP_PING_ERR 20
/* Event Ring is full */
#define COMP_ER_FULL 21
+/* Incompatible Device Error */
+#define COMP_DEV_ERR 22
/* Missed Service Error - HC couldn't service an isoc ep within interval */
#define COMP_MISSED_INT 23
/* Successfully stopped command ring */
@@ -845,11 +1054,13 @@ struct xhci_transfer_event {
#define COMP_CMD_ABORT 25
/* Stopped - transfer was terminated by a stop endpoint command */
#define COMP_STOP 26
-/* Same as COMP_EP_STOPPED, but the transfered length in the event is invalid */
+/* Same as COMP_EP_STOPPED, but the transferred length in the event is invalid */
#define COMP_STOP_INVAL 27
/* Control Abort Error - Debug Capability - control pipe aborted */
#define COMP_DBG_ABORT 28
-/* TRB type 29 and 30 reserved */
+/* Max Exit Latency Too Large Error */
+#define COMP_MEL_ERR 29
+/* TRB type 30 reserved */
/* Isoc Buffer Overrun - an isoc IN ep sent more data than could fit in TD */
#define COMP_BUFF_OVER 31
/* Event Lost Error - xHC has an "internal event overrun condition" */
@@ -859,16 +1070,15 @@ struct xhci_transfer_event {
/* Invalid Stream ID Error */
#define COMP_STRID_ERR 34
/* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */
-/* FIXME - check for this */
#define COMP_2ND_BW_ERR 35
/* Split Transaction Error */
#define COMP_SPLIT_ERR 36
struct xhci_link_trb {
/* 64-bit segment pointer*/
- u64 segment_ptr;
- u32 intr_target;
- u32 control;
+ __le64 segment_ptr;
+ __le32 intr_target;
+ __le32 control;
};
/* control bitfields */
@@ -877,12 +1087,20 @@ struct xhci_link_trb {
/* Command completion event TRB */
struct xhci_event_cmd {
/* Pointer to command TRB, or the value passed by the event data trb */
- u64 cmd_trb;
- u32 status;
- u32 flags;
+ __le64 cmd_trb;
+ __le32 status;
+ __le32 flags;
};
/* flags bitmasks */
+
+/* Address device - disable SetAddress */
+#define TRB_BSR (1<<9)
+enum xhci_setup_dev {
+ SETUP_CONTEXT_ONLY,
+ SETUP_CONTEXT_ADDRESS,
+};
+
/* bits 16:23 are the virtual function ID */
/* bits 24:31 are the slot ID */
#define TRB_TO_SLOT_ID(p) (((p) & (0xff<<24)) >> 24)
@@ -896,9 +1114,10 @@ struct xhci_event_cmd {
#define TRB_TO_SUSPEND_PORT(p) (((p) & (1 << 23)) >> 23)
#define LAST_EP_INDEX 30
-/* Set TR Dequeue Pointer command TRB fields */
+/* Set TR Dequeue Pointer command TRB fields, 6.4.3.9 */
#define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16))
#define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16)
+#define SCT_FOR_TRB(p) (((p) << 1) & 0x7)
/* Port Status Change Event TRB fields */
@@ -911,6 +1130,8 @@ struct xhci_event_cmd {
/* Interrupter Target - which MSI-X vector to target the completion event at */
#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
+#define TRB_TBC(p) (((p) & 0x3) << 7)
+#define TRB_TLBPC(p) (((p) & 0xf) << 16)
/* Cycle bit - indicates TRB ownership by HC or HCD */
#define TRB_CYCLE (1<<0)
@@ -930,15 +1151,20 @@ struct xhci_event_cmd {
/* The buffer pointer contains immediate data */
#define TRB_IDT (1<<6)
+/* Block Event Interrupt */
+#define TRB_BEI (1<<9)
/* Control transfer TRB specific fields */
#define TRB_DIR_IN (1<<16)
+#define TRB_TX_TYPE(p) ((p) << 16)
+#define TRB_DATA_OUT 2
+#define TRB_DATA_IN 3
/* Isochronous TRB specific fields */
#define TRB_SIA (1<<31)
struct xhci_generic_trb {
- u32 field[4];
+ __le32 field[4];
};
union xhci_trb {
@@ -1024,6 +1250,13 @@ union xhci_trb {
/* Get NEC firmware revision. */
#define TRB_NEC_GET_FW 49
+#define TRB_TYPE_LINK(x) (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
+/* Above, but for __le32 types -- can avoid work by swapping constants: */
+#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
+ cpu_to_le32(TRB_TYPE(TRB_LINK)))
+#define TRB_TYPE_NOOP_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
+ cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)))
+
#define NEC_FW_MINOR(p) (((p) >> 0) & 0xff)
#define NEC_FW_MAJOR(p) (((p) >> 8) & 0xff)
@@ -1035,11 +1268,8 @@ union xhci_trb {
#define TRBS_PER_SEGMENT 64
/* Allow two commands + a link TRB, along with any reserved command TRBs */
#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
-#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
-/* SEGMENT_SHIFT should be log2(SEGMENT_SIZE).
- * Change this if you change TRBS_PER_SEGMENT!
- */
-#define SEGMENT_SHIFT 10
+#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
+#define TRB_SEGMENT_SHIFT (ilog2(TRB_SEGMENT_SIZE))
/* TRB buffer pointers can't cross 64KB boundaries */
#define TRB_MAX_BUFF_SHIFT 16
#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT)
@@ -1060,14 +1290,34 @@ struct xhci_td {
union xhci_trb *last_trb;
};
+/* xHCI command default timeout value */
+#define XHCI_CMD_DEFAULT_TIMEOUT (5 * HZ)
+
+/* command descriptor */
+struct xhci_cd {
+ struct xhci_command *command;
+ union xhci_trb *cmd_trb;
+};
+
struct xhci_dequeue_state {
struct xhci_segment *new_deq_seg;
union xhci_trb *new_deq_ptr;
int new_cycle_state;
};
+enum xhci_ring_type {
+ TYPE_CTRL = 0,
+ TYPE_ISOC,
+ TYPE_BULK,
+ TYPE_INTR,
+ TYPE_STREAM,
+ TYPE_COMMAND,
+ TYPE_EVENT,
+};
+
struct xhci_ring {
struct xhci_segment *first_seg;
+ struct xhci_segment *last_seg;
union xhci_trb *enqueue;
struct xhci_segment *enq_seg;
unsigned int enq_updates;
@@ -1082,14 +1332,20 @@ struct xhci_ring {
*/
u32 cycle_state;
unsigned int stream_id;
+ unsigned int num_segs;
+ unsigned int num_trbs_free;
+ unsigned int num_trbs_free_temp;
+ enum xhci_ring_type type;
+ bool last_td_was_short;
+ struct radix_tree_root *trb_address_map;
};
struct xhci_erst_entry {
/* 64-bit event ring segment address */
- u64 seg_addr;
- u32 seg_size;
+ __le64 seg_addr;
+ __le32 seg_size;
/* Set to zero */
- u32 rsvd;
+ __le32 rsvd;
};
struct xhci_erst {
@@ -1142,8 +1398,48 @@ struct s3_save {
u64 erst_dequeue;
};
-/* There is one ehci_hci structure per controller */
+/* Use for lpm */
+struct dev_info {
+ u32 dev_id;
+ struct list_head list;
+};
+
+struct xhci_bus_state {
+ unsigned long bus_suspended;
+ unsigned long next_statechange;
+
+ /* Port suspend arrays are indexed by the portnum of the fake roothub */
+ /* ports suspend status arrays - max 31 ports for USB2, 15 for USB3 */
+ u32 port_c_suspend;
+ u32 suspended_ports;
+ u32 port_remote_wakeup;
+ unsigned long resume_done[USB_MAXCHILDREN];
+ /* which ports have started to resume */
+ unsigned long resuming_ports;
+ /* Which ports are waiting on RExit to U0 transition. */
+ unsigned long rexit_ports;
+ struct completion rexit_done[USB_MAXCHILDREN];
+};
+
+
+/*
+ * It can take up to 20 ms to transition from RExit to U0 on the
+ * Intel Lynx Point LP xHCI host.
+ */
+#define XHCI_MAX_REXIT_TIMEOUT (20 * 1000)
+
+static inline unsigned int hcd_index(struct usb_hcd *hcd)
+{
+ if (hcd->speed == HCD_USB3)
+ return 0;
+ else
+ return 1;
+}
+
+/* There is one xhci_hcd structure per controller */
struct xhci_hcd {
+ struct usb_hcd *main_hcd;
+ struct usb_hcd *shared_hcd;
/* glue to PCI and HCD framework */
struct xhci_cap_regs __iomem *cap_regs;
struct xhci_op_regs __iomem *op_regs;
@@ -1176,20 +1472,35 @@ struct xhci_hcd {
/* msi-x vectors */
int msix_count;
struct msix_entry *msix_entries;
+ /* optional clock */
+ struct clk *clk;
/* data structures */
struct xhci_device_context_array *dcbaa;
struct xhci_ring *cmd_ring;
+ unsigned int cmd_ring_state;
+#define CMD_RING_STATE_RUNNING (1 << 0)
+#define CMD_RING_STATE_ABORTED (1 << 1)
+#define CMD_RING_STATE_STOPPED (1 << 2)
+ struct list_head cmd_list;
unsigned int cmd_ring_reserved_trbs;
+ struct timer_list cmd_timer;
+ struct xhci_command *current_cmd;
struct xhci_ring *event_ring;
struct xhci_erst erst;
/* Scratchpad */
struct xhci_scratchpad *scratchpad;
+ /* Store LPM test failed devices' information */
+ struct list_head lpm_failed_devs;
/* slot enabling and address device helpers */
struct completion addr_dev;
int slot_id;
+ /* For USB 3.0 LPM enable/disable. */
+ struct xhci_command *lpm_command;
/* Internal mirror of the HW's dcbaa */
struct xhci_virt_device *devs[MAX_HC_SLOTS];
+ /* For keeping track of bandwidth domains per roothub. */
+ struct xhci_root_port_bw_info *rh_bw;
/* DMA pools */
struct dma_pool *device_pool;
@@ -1197,17 +1508,9 @@ struct xhci_hcd {
struct dma_pool *small_streams_pool;
struct dma_pool *medium_streams_pool;
-#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
- /* Poll the rings - for debugging */
- struct timer_list event_ring_timer;
- int zombie;
-#endif
/* Host controller watchdog timer structures */
unsigned int xhc_state;
- unsigned long bus_suspended;
- unsigned long next_statechange;
-
u32 command;
struct s3_save s3;
/* Host controller is dying - not responding to commands. "I'm not dead yet!"
@@ -1223,64 +1526,83 @@ struct xhci_hcd {
* There are no reports of xHCI host controllers that display this issue.
*/
#define XHCI_STATE_DYING (1 << 0)
+#define XHCI_STATE_HALTED (1 << 1)
/* Statistics */
- int noops_submitted;
- int noops_handled;
int error_bitmask;
unsigned int quirks;
#define XHCI_LINK_TRB_QUIRK (1 << 0)
#define XHCI_RESET_EP_QUIRK (1 << 1)
#define XHCI_NEC_HOST (1 << 2)
- u32 port_c_suspend[8]; /* port suspend change*/
- u32 suspended_ports[8]; /* which ports are
- suspended */
- unsigned long resume_done[MAX_HC_PORTS];
+#define XHCI_AMD_PLL_FIX (1 << 3)
+#define XHCI_SPURIOUS_SUCCESS (1 << 4)
+/*
+ * Certain Intel host controllers have a limit to the number of endpoint
+ * contexts they can handle. Ideally, they would signal that they can't handle
+ * anymore endpoint contexts by returning a Resource Error for the Configure
+ * Endpoint command, but they don't. Instead they expect software to keep track
+ * of the number of active endpoints for them, across configure endpoint
+ * commands, reset device commands, disable slot commands, and address device
+ * commands.
+ */
+#define XHCI_EP_LIMIT_QUIRK (1 << 5)
+#define XHCI_BROKEN_MSI (1 << 6)
+#define XHCI_RESET_ON_RESUME (1 << 7)
+#define XHCI_SW_BW_CHECKING (1 << 8)
+#define XHCI_AMD_0x96_HOST (1 << 9)
+#define XHCI_TRUST_TX_LENGTH (1 << 10)
+#define XHCI_LPM_SUPPORT (1 << 11)
+#define XHCI_INTEL_HOST (1 << 12)
+#define XHCI_SPURIOUS_REBOOT (1 << 13)
+#define XHCI_COMP_MODE_QUIRK (1 << 14)
+#define XHCI_AVOID_BEI (1 << 15)
+#define XHCI_PLAT (1 << 16)
+#define XHCI_SLOW_SUSPEND (1 << 17)
+#define XHCI_SPURIOUS_WAKEUP (1 << 18)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+ /* There are two roothubs to keep track of bus suspend info for */
+ struct xhci_bus_state bus_state[2];
+ /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
+ u8 *port_array;
+ /* Array of pointers to USB 3.0 PORTSC registers */
+ __le32 __iomem **usb3_ports;
+ unsigned int num_usb3_ports;
+ /* Array of pointers to USB 2.0 PORTSC registers */
+ __le32 __iomem **usb2_ports;
+ unsigned int num_usb2_ports;
+ /* support xHCI 0.96 spec USB2 software LPM */
+ unsigned sw_lpm_support:1;
+ /* support xHCI 1.0 spec USB2 hardware LPM */
+ unsigned hw_lpm_support:1;
+ /* cached usb2 extened protocol capabilites */
+ u32 *ext_caps;
+ unsigned int num_ext_caps;
+ /* Compliance Mode Recovery Data */
+ struct timer_list comp_mode_recovery_timer;
+ u32 port_status_u0;
+/* Compliance Mode Timer Triggered every 2 seconds */
+#define COMP_MODE_RCVRY_MSECS 2000
};
-/* For testing purposes */
-#define NUM_TEST_NOOPS 0
-
/* convert between an HCD pointer and the corresponding EHCI_HCD */
static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
{
- return (struct xhci_hcd *) (hcd->hcd_priv);
+ return *((struct xhci_hcd **) (hcd->hcd_priv));
}
static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
{
- return container_of((void *) xhci, struct usb_hcd, hcd_priv);
+ return xhci->main_hcd;
}
-#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-#define XHCI_DEBUG 1
-#else
-#define XHCI_DEBUG 0
-#endif
-
#define xhci_dbg(xhci, fmt, args...) \
- do { if (XHCI_DEBUG) dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
-#define xhci_info(xhci, fmt, args...) \
- do { if (XHCI_DEBUG) dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
+ dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
#define xhci_err(xhci, fmt, args...) \
dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
#define xhci_warn(xhci, fmt, args...) \
dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
-
-/* TODO: copied from ehci.h - can be refactored? */
-/* xHCI spec says all registers are little endian */
-static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
- __u32 __iomem *regs)
-{
- return readl(regs);
-}
-static inline void xhci_writel(struct xhci_hcd *xhci,
- const unsigned int val, __u32 __iomem *regs)
-{
- xhci_dbg(xhci,
- "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
- regs, val);
- writel(val, regs);
-}
+#define xhci_warn_ratelimited(xhci, fmt, args...) \
+ dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
/*
* Registers should always be accessed with double word or quad word accesses.
@@ -1292,7 +1614,7 @@ static inline void xhci_writel(struct xhci_hcd *xhci,
* the high dword, and write order is irrelevant.
*/
static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
- __u64 __iomem *regs)
+ __le64 __iomem *regs)
{
__u32 __iomem *ptr = (__u32 __iomem *) regs;
u64 val_lo = readl(ptr);
@@ -1300,28 +1622,23 @@ static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
return val_lo + (val_hi << 32);
}
static inline void xhci_write_64(struct xhci_hcd *xhci,
- const u64 val, __u64 __iomem *regs)
+ const u64 val, __le64 __iomem *regs)
{
__u32 __iomem *ptr = (__u32 __iomem *) regs;
u32 val_lo = lower_32_bits(val);
u32 val_hi = upper_32_bits(val);
- xhci_dbg(xhci,
- "`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n",
- regs, (long unsigned int) val);
writel(val_lo, ptr);
writel(val_hi, ptr + 1);
}
static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
{
- u32 temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
- return ((HC_VERSION(temp) == 0x95) &&
- (xhci->quirks & XHCI_LINK_TRB_QUIRK));
+ return xhci->quirks & XHCI_LINK_TRB_QUIRK;
}
/* xHCI debugging */
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
void xhci_print_registers(struct xhci_hcd *xhci);
void xhci_dbg_regs(struct xhci_hcd *xhci);
void xhci_print_run_regs(struct xhci_hcd *xhci);
@@ -1338,6 +1655,8 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_virt_ep *ep);
+void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
+ const char *fmt, ...);
/* xHCI memory management */
void xhci_mem_cleanup(struct xhci_hcd *xhci);
@@ -1348,10 +1667,25 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
struct usb_device *udev);
unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
+unsigned int xhci_get_endpoint_address(unsigned int ep_index);
unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index);
unsigned int xhci_last_valid_endpoint(u32 added_ctxs);
void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
+void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
+ struct xhci_bw_info *ep_bw,
+ struct xhci_interval_bw_table *bw_table,
+ struct usb_device *udev,
+ struct xhci_virt_ep *virt_ep,
+ struct xhci_tt_bw_info *tt_info);
+void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int old_active_eps);
+void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info);
+void xhci_update_bw_info(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx,
+ struct xhci_input_control_ctx *ctrl_ctx,
+ struct xhci_virt_device *virt_dev);
void xhci_endpoint_copy(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx,
@@ -1363,6 +1697,8 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
struct usb_device *udev, struct usb_host_endpoint *ep,
gfp_t mem_flags);
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
+int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ unsigned int num_trbs, gfp_t flags);
void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index);
@@ -1377,6 +1713,8 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx,
struct xhci_virt_ep *ep);
+void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev, bool drop_control_ep);
struct xhci_ring *xhci_dma_to_transfer_ring(
struct xhci_virt_ep *ep,
u64 address);
@@ -1395,9 +1733,25 @@ void xhci_free_command(struct xhci_hcd *xhci,
/* xHCI PCI glue */
int xhci_register_pci(void);
void xhci_unregister_pci(void);
+#else
+static inline int xhci_register_pci(void) { return 0; }
+static inline void xhci_unregister_pci(void) {}
+#endif
+
+#if IS_ENABLED(CONFIG_USB_XHCI_PLATFORM)
+int xhci_register_plat(void);
+void xhci_unregister_plat(void);
+#else
+static inline int xhci_register_plat(void)
+{ return 0; }
+static inline void xhci_unregister_plat(void)
+{ }
#endif
/* xHCI host controller glue */
+typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
+int xhci_handshake(struct xhci_hcd *xhci, void __iomem *ptr,
+ u32 mask, u32 done, int usec);
void xhci_quiesce(struct xhci_hcd *xhci);
int xhci_halt(struct xhci_hcd *xhci);
int xhci_reset(struct xhci_hcd *xhci);
@@ -1405,6 +1759,7 @@ int xhci_init(struct usb_hcd *hcd);
int xhci_run(struct usb_hcd *hcd);
void xhci_stop(struct usb_hcd *hcd);
void xhci_shutdown(struct usb_hcd *hcd);
+int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
#ifdef CONFIG_PM
int xhci_suspend(struct xhci_hcd *xhci);
@@ -1416,9 +1771,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
int xhci_get_frame(struct usb_hcd *hcd);
irqreturn_t xhci_irq(struct usb_hcd *hcd);
-irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd);
+irqreturn_t xhci_msi_irq(int irq, void *hcd);
int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_alloc_tt_info(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags);
int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
unsigned int num_streams, gfp_t mem_flags);
@@ -1426,6 +1785,10 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
gfp_t mem_flags);
int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
+ struct usb_device *udev, int enable);
int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
@@ -1444,14 +1807,14 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
dma_addr_t suspect_dma);
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
void xhci_ring_cmd_db(struct xhci_hcd *xhci);
-void *xhci_setup_one_noop(struct xhci_hcd *xhci);
-int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
-int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
- u32 slot_id);
-int xhci_queue_vendor_command(struct xhci_hcd *xhci,
+int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ u32 trb_type, u32 slot_id);
+int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev);
+int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
u32 field1, u32 field2, u32 field3, u32 field4);
-int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index, int suspend);
+int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ int slot_id, unsigned int ep_index, int suspend);
int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index);
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
@@ -1460,18 +1823,21 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index);
int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index);
-int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
- u32 slot_id, bool command_must_succeed);
-int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
+ struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id,
+ bool command_must_succeed);
+int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed);
+int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ int slot_id, unsigned int ep_index);
+int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
u32 slot_id);
-int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index);
-int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id);
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id, struct xhci_td *cur_td,
struct xhci_dequeue_state *state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
+ struct xhci_command *cmd,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id,
struct xhci_dequeue_state *deq_state);
@@ -1481,13 +1847,25 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state);
void xhci_stop_endpoint_command_watchdog(unsigned long arg);
+void xhci_handle_command_timeout(unsigned long data);
+
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, unsigned int stream_id);
+void xhci_cleanup_command_queue(struct xhci_hcd *xhci);
/* xHCI roothub code */
+void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
+ int port_id, u32 link_state);
+int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
+ struct usb_device *udev, enum usb3_link_state state);
+int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
+ struct usb_device *udev, enum usb3_link_state state);
+void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
+ int port_id, u32 port_bit);
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
char *buf, u16 wLength);
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
+int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
#ifdef CONFIG_PM
int xhci_bus_suspend(struct usb_hcd *hcd);
@@ -1498,7 +1876,8 @@ int xhci_bus_resume(struct usb_hcd *hcd);
#endif /* CONFIG_PM */
u32 xhci_port_state_to_neutral(u32 state);
-int xhci_find_slot_id_by_port(struct xhci_hcd *xhci, u16 port);
+int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+ u16 port);
void xhci_ring_device(struct xhci_hcd *xhci, int slot_id);
/* xHCI contexts */
@@ -1506,4 +1885,7 @@ struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
+/* xHCI quirks */
+bool xhci_compliance_mode_recovery_timer_quirk_check(void);
+
#endif /* __LINUX_XHCI_HCD_H */